source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
_channel.py | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invocation-side implementation of gRPC Python."""
import logging
import sys
import threading
import time
import grpc
from grpc import _common
from grpc import _grpcio_metadata
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_LOGGER = logging.getLogger(__name__)
_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
_UNARY_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_UNARY_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
_OK_RENDEZVOUS_REPR_FORMAT = ('<_Rendezvous of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'>')
_NON_OK_RENDEZVOUS_REPR_FORMAT = ('<_Rendezvous of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'\tdebug_error_string = "{}"\n'
'>')
def _deadline(timeout):
return None if timeout is None else time.time() + timeout
def _unknown_code_details(unknown_cygrpc_code, details):
return 'Server sent unknown code {} and details "{}"'.format(
unknown_cygrpc_code, details)
def _wait_once_until(condition, until):
if until is None:
condition.wait()
else:
remaining = until - time.time()
if remaining < 0:
raise grpc.FutureTimeoutError()
else:
condition.wait(timeout=remaining)
class _RPCState(object):
def __init__(self, due, initial_metadata, trailing_metadata, code, details):
self.condition = threading.Condition()
# The cygrpc.OperationType objects representing events due from the RPC's
# completion queue.
self.due = set(due)
self.initial_metadata = initial_metadata
self.response = None
self.trailing_metadata = trailing_metadata
self.code = code
self.details = details
self.debug_error_string = None
# The semantics of grpc.Future.cancel and grpc.Future.cancelled are
# slightly wonky, so they have to be tracked separately from the rest of the
# result of the RPC. This field tracks whether cancellation was requested
# prior to termination of the RPC.
self.cancelled = False
self.callbacks = []
self.fork_epoch = cygrpc.get_fork_epoch()
def reset_postfork_child(self):
self.condition = threading.Condition()
def _abort(state, code, details):
if state.code is None:
state.code = code
state.details = details
if state.initial_metadata is None:
state.initial_metadata = ()
state.trailing_metadata = ()
def _handle_event(event, state, response_deserializer):
callbacks = []
for batch_operation in event.batch_operations:
operation_type = batch_operation.type()
state.due.remove(operation_type)
if operation_type == cygrpc.OperationType.receive_initial_metadata:
state.initial_metadata = batch_operation.initial_metadata()
elif operation_type == cygrpc.OperationType.receive_message:
serialized_response = batch_operation.message()
if serialized_response is not None:
response = _common.deserialize(serialized_response,
response_deserializer)
if response is None:
details = 'Exception deserializing response!'
_abort(state, grpc.StatusCode.INTERNAL, details)
else:
state.response = response
elif operation_type == cygrpc.OperationType.receive_status_on_client:
state.trailing_metadata = batch_operation.trailing_metadata()
if state.code is None:
code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
batch_operation.code())
if code is None:
state.code = grpc.StatusCode.UNKNOWN
state.details = _unknown_code_details(
code, batch_operation.details())
else:
state.code = code
state.details = batch_operation.details()
state.debug_error_string = batch_operation.error_string()
callbacks.extend(state.callbacks)
state.callbacks = None
return callbacks
def _event_handler(state, response_deserializer):
def handle_event(event):
with state.condition:
callbacks = _handle_event(event, state, response_deserializer)
state.condition.notify_all()
done = not state.due
for callback in callbacks:
callback()
return done and state.fork_epoch >= cygrpc.get_fork_epoch()
return handle_event
#pylint: disable=too-many-statements
def _consume_request_iterator(request_iterator, state, call, request_serializer,
event_handler):
if cygrpc.is_fork_support_enabled():
condition_wait_timeout = 1.0
else:
condition_wait_timeout = None
def consume_request_iterator(): # pylint: disable=too-many-branches
while True:
return_from_user_request_generator_invoked = False
try:
# The thread may die in user-code. Do not block fork for this.
cygrpc.enter_user_request_generator()
request = next(request_iterator)
except StopIteration:
break
except Exception: # pylint: disable=broad-except
cygrpc.return_from_user_request_generator()
return_from_user_request_generator_invoked = True
code = grpc.StatusCode.UNKNOWN
details = 'Exception iterating requests!'
_LOGGER.exception(details)
call.cancel(_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
finally:
if not return_from_user_request_generator_invoked:
cygrpc.return_from_user_request_generator()
serialized_request = _common.serialize(request, request_serializer)
with state.condition:
if state.code is None and not state.cancelled:
if serialized_request is None:
code = grpc.StatusCode.INTERNAL
details = 'Exception serializing request!'
call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
else:
operations = (cygrpc.SendMessageOperation(
serialized_request, _EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_message)
else:
return
while True:
state.condition.wait(condition_wait_timeout)
cygrpc.block_if_fork_in_progress(state)
if state.code is None:
if cygrpc.OperationType.send_message not in state.due:
break
else:
return
else:
return
with state.condition:
if state.code is None:
operations = (
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_close_from_client)
consumption_thread = cygrpc.ForkManagedThread(
target=consume_request_iterator)
consumption_thread.setDaemon(True)
consumption_thread.start()
class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call):
def __init__(self, state, call, response_deserializer, deadline):
super(_Rendezvous, self).__init__()
self._state = state
self._call = call
self._response_deserializer = response_deserializer
self._deadline = deadline
def cancel(self):
with self._state.condition:
if self._state.code is None:
code = grpc.StatusCode.CANCELLED
details = 'Locally cancelled by application!'
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code], details)
self._state.cancelled = True
_abort(self._state, code, details)
self._state.condition.notify_all()
return False
def cancelled(self):
with self._state.condition:
return self._state.cancelled
def running(self):
with self._state.condition:
return self._state.code is None
def done(self):
with self._state.condition:
return self._state.code is not None
def result(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return self._state.response
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
raise self
def exception(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
return self
def traceback(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._state.condition:
if self._state.code is None:
self._state.callbacks.append(lambda: fn(self))
return
fn(self)
def _next(self):
with self._state.condition:
if self._state.code is None:
event_handler = _event_handler(self._state,
self._response_deserializer)
operating = self._call.operate(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
event_handler)
if operating:
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
while True:
self._state.condition.wait()
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def is_active(self):
with self._state.condition:
return self._state.code is None
def time_remaining(self):
if self._deadline is None:
return None
else:
return max(self._deadline - time.time(), 0)
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def initial_metadata(self):
with self._state.condition:
while self._state.initial_metadata is None:
self._state.condition.wait()
return self._state.initial_metadata
def trailing_metadata(self):
with self._state.condition:
while self._state.trailing_metadata is None:
self._state.condition.wait()
return self._state.trailing_metadata
def code(self):
with self._state.condition:
while self._state.code is None:
self._state.condition.wait()
return self._state.code
def details(self):
with self._state.condition:
while self._state.details is None:
self._state.condition.wait()
return _common.decode(self._state.details)
def debug_error_string(self):
with self._state.condition:
while self._state.debug_error_string is None:
self._state.condition.wait()
return _common.decode(self._state.debug_error_string)
def _repr(self):
with self._state.condition:
if self._state.code is None:
return '<_Rendezvous object of in-flight RPC>'
elif self._state.code is grpc.StatusCode.OK:
return _OK_RENDEZVOUS_REPR_FORMAT.format(
self._state.code, self._state.details)
else:
return _NON_OK_RENDEZVOUS_REPR_FORMAT.format(
self._state.code, self._state.details,
self._state.debug_error_string)
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def __del__(self):
with self._state.condition:
if self._state.code is None:
self._state.code = grpc.StatusCode.CANCELLED
self._state.details = 'Cancelled upon garbage collection!'
self._state.cancelled = True
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[self._state.code],
self._state.details)
self._state.condition.notify_all()
def _start_unary_request(request, timeout, request_serializer):
deadline = _deadline(timeout)
serialized_request = _common.serialize(request, request_serializer)
if serialized_request is None:
state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
'Exception serializing request!')
rendezvous = _Rendezvous(state, None, None, deadline)
return deadline, None, rendezvous
else:
return deadline, serialized_request, None
def _end_unary_response_blocking(state, call, with_call, deadline):
if state.code is grpc.StatusCode.OK:
if with_call:
rendezvous = _Rendezvous(state, call, None, deadline)
return state.response, rendezvous
else:
return state.response
else:
raise _Rendezvous(state, None, None, deadline)
def _stream_unary_invocation_operationses(metadata, initial_metadata_flags):
return (
(
cygrpc.SendInitialMetadataOperation(metadata,
initial_metadata_flags),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
def _stream_unary_invocation_operationses_and_tags(metadata,
initial_metadata_flags):
return tuple((
operations,
None,
)
for operations in _stream_unary_invocation_operationses(
metadata, initial_metadata_flags))
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_context()
def _prepare(self, request, timeout, metadata, wait_for_ready):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
if serialized_request is None:
return None, None, None, rendezvous
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
cygrpc.SendInitialMetadataOperation(metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
return state, operations, deadline, None
def _blocking(self, request, timeout, metadata, credentials,
wait_for_ready):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials, ((
operations,
None,
),), self._context)
event = call.next_event()
_handle_event(event, state, self._response_deserializer)
return state, call
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials,
(operations,), event_handler, self._context)
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_context()
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
if serialized_request is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
operationses = (
(
cygrpc.SendInitialMetadataOperation(metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request,
_EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials,
operationses, event_handler, self._context)
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_context()
def _blocking(self, request_iterator, timeout, metadata, credentials,
wait_for_ready):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, deadline, metadata, None
if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses_and_tags(
metadata, initial_metadata_flags), self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, None)
while True:
event = call.next_event()
with state.condition:
_handle_event(event, state, self._response_deserializer)
state.condition.notify_all()
if not state.due:
break
return state, call
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
event_handler = _event_handler(state, self._response_deserializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, deadline, metadata, None
if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses(
metadata, initial_metadata_flags), event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_context()
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
operationses = (
(
cygrpc.SendInitialMetadataOperation(metadata,
initial_metadata_flags),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, deadline, metadata, None
if credentials is None else credentials._credentials, operationses,
event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _InitialMetadataFlags(int):
"""Stores immutable initial metadata flags"""
def __new__(cls, value=_EMPTY_FLAGS):
value &= cygrpc.InitialMetadataFlags.used_mask
return super(_InitialMetadataFlags, cls).__new__(cls, value)
def with_wait_for_ready(self, wait_for_ready):
if wait_for_ready is not None:
if wait_for_ready:
return self.__class__(self | cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
elif not wait_for_ready:
return self.__class__(self & ~cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
return self
class _ChannelCallState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.managed_calls = 0
self.threading = False
def reset_postfork_child(self):
self.managed_calls = 0
def _run_channel_spin_thread(state):
def channel_spin():
while True:
cygrpc.block_if_fork_in_progress(state)
event = state.channel.next_call_event()
if event.completion_type == cygrpc.CompletionType.queue_timeout:
continue
call_completed = event.tag(event)
if call_completed:
with state.lock:
state.managed_calls -= 1
if state.managed_calls == 0:
return
channel_spin_thread = cygrpc.ForkManagedThread(target=channel_spin)
channel_spin_thread.setDaemon(True)
channel_spin_thread.start()
def _channel_managed_call_management(state):
# pylint: disable=too-many-arguments
def create(flags, method, host, deadline, metadata, credentials,
operationses, event_handler, context):
"""Creates a cygrpc.IntegratedCall.
Args:
flags: An integer bitfield of call flags.
method: The RPC method.
host: A host string for the created call.
deadline: A float to be the deadline of the created call or None if
the call is to have an infinite deadline.
metadata: The metadata for the call or None.
credentials: A cygrpc.CallCredentials or None.
operationses: An iterable of iterables of cygrpc.Operations to be
started on the call.
event_handler: A behavior to call to handle the events resultant from
the operations on the call.
context: Context object for distributed tracing.
Returns:
A cygrpc.IntegratedCall with which to conduct an RPC.
"""
operationses_and_tags = tuple((
operations,
event_handler,
) for operations in operationses)
with state.lock:
call = state.channel.integrated_call(flags, method, host, deadline,
metadata, credentials,
operationses_and_tags, context)
if state.managed_calls == 0:
state.managed_calls = 1
_run_channel_spin_thread(state)
else:
state.managed_calls += 1
return call
return create
class _ChannelConnectivityState(object):
def __init__(self, channel):
self.lock = threading.RLock()
self.channel = channel
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def reset_postfork_child(self):
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def _deliveries(state):
callbacks_needing_update = []
for callback_and_connectivity in state.callbacks_and_connectivities:
callback, callback_connectivity, = callback_and_connectivity
if callback_connectivity is not state.connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = state.connectivity
return callbacks_needing_update
def _deliver(state, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
cygrpc.block_if_fork_in_progress(state)
callable_util.call_logging_exceptions(
callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE,
connectivity)
with state.lock:
callbacks = _deliveries(state)
if callbacks:
connectivity = state.connectivity
else:
state.delivering = False
return
def _spawn_delivery(state, callbacks):
delivering_thread = cygrpc.ForkManagedThread(
target=_deliver, args=(
state,
state.connectivity,
callbacks,
))
delivering_thread.start()
state.delivering = True
# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
def _poll_connectivity(state, channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
callbacks = tuple(callback
for callback, unused_but_known_to_be_none_connectivity
in state.callbacks_and_connectivities)
for callback_and_connectivity in state.callbacks_and_connectivities:
callback_and_connectivity[1] = state.connectivity
if callbacks:
_spawn_delivery(state, callbacks)
while True:
event = channel.watch_connectivity_state(connectivity,
time.time() + 0.2)
cygrpc.block_if_fork_in_progress(state)
with state.lock:
if not state.callbacks_and_connectivities and not state.try_to_connect:
state.polling = False
state.connectivity = None
break
try_to_connect = state.try_to_connect
state.try_to_connect = False
if event.success or try_to_connect:
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
if not state.delivering:
callbacks = _deliveries(state)
if callbacks:
_spawn_delivery(state, callbacks)
def _moot(state):
with state.lock:
del state.callbacks_and_connectivities[:]
def _subscribe(state, callback, try_to_connect):
with state.lock:
if not state.callbacks_and_connectivities and not state.polling:
polling_thread = cygrpc.ForkManagedThread(
target=_poll_connectivity,
args=(state, state.channel, bool(try_to_connect)))
polling_thread.setDaemon(True)
polling_thread.start()
state.polling = True
state.callbacks_and_connectivities.append([callback, None])
elif not state.delivering and state.connectivity is not None:
_spawn_delivery(state, (callback,))
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append(
[callback, state.connectivity])
else:
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append([callback, None])
def _unsubscribe(state, callback):
with state.lock:
for index, (subscribed_callback, unused_connectivity) in enumerate(
state.callbacks_and_connectivities):
if callback == subscribed_callback:
state.callbacks_and_connectivities.pop(index)
break
def _options(options):
return list(options) + [
(
cygrpc.ChannelArgKey.primary_user_agent_string,
_USER_AGENT,
),
]
class Channel(grpc.Channel):
"""A cygrpc.Channel-backed implementation of grpc.Channel."""
def __init__(self, target, options, credentials):
"""Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
"""
self._channel = cygrpc.Channel(
_common.encode(target), _options(options), credentials)
self._call_state = _ChannelCallState(self._channel)
self._connectivity_state = _ChannelConnectivityState(self._channel)
cygrpc.fork_register_channel(self)
def subscribe(self, callback, try_to_connect=None):
_subscribe(self._connectivity_state, callback, try_to_connect)
def unsubscribe(self, callback):
_unsubscribe(self._connectivity_state, callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def _close(self):
self._channel.close(cygrpc.StatusCode.cancelled, 'Channel closed!')
_moot(self._connectivity_state)
def _close_on_fork(self):
self._channel.close_on_fork(cygrpc.StatusCode.cancelled,
'Channel closed due to fork')
_moot(self._connectivity_state)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._close()
return False
def close(self):
self._close()
def __del__(self):
# TODO(https://github.com/grpc/grpc/issues/12531): Several releases
# after 1.12 (1.16 or thereabouts?) add a "self._channel.close" call
# here (or more likely, call self._close() here). We don't do this today
# because many valid use cases today allow the channel to be deleted
# immediately after stubs are created. After a sufficient period of time
# has passed for all users to be trusted to hang out to their channels
# for as long as they are in use and to close them after using them,
# then deletion of this grpc._channel.Channel instance can be made to
# effect closure of the underlying cygrpc.Channel instance.
if cygrpc is not None: # Globals may have already been collected.
cygrpc.fork_unregister_channel(self)
# This prevent the failed-at-initializing object removal from failing.
# Though the __init__ failed, the removal will still trigger __del__.
if _moot is not None and hasattr(self, '_connectivity_state'):
_moot(self._connectivity_state)
|
dlapi.py | import json
import sys
import os
import logging
import asyncio
from threading import Thread
from time import sleep
from flask import Flask, abort, request, Response, send_from_directory
from flask_restful import Resource, Api
from flask_jsonpify import jsonify
from flask_cors import CORS
import youtube_dl as yt
import sys
import glob
import shutil
USERS = [
"omni",
"omnibus"
]
HOST = "omnijunk.xyz"
PORT = 66
MAINDIR = "/scripts/ytapi"
# flask base
app = Flask(__name__)
api = Api(app)
CORS(app)
def ytdl(vid):
urls = []
ydl_opts = {
'format': 'mp4',
}
if "https://youtu.be/" in vid:
urls.append(vid)
elif "https://youtube.com/" in vid:
modid = vid.replace("https://www.youtube.com/watch?v=","")
modid = "https://youtu.be/" + str(vid)
urls.append(modid)
else:
modid = "https://youtu.be/" + str(vid)
urls.append(modid)
with yt.YoutubeDL(ydl_opts) as ydl:
ydl.download(urls)
print("done downloading.")
sleep(1)
class base(Resource):
def post(self):
return {"RESPONSE":405}
def get(self):
vid = request.args['id']
user = request.args['u']
conf = request.args['dl']
if vid != "" or user != "":
if user in USERS:
#print(str(vid))
thread = Thread(target = ytdl, args = (vid, ))
thread.start()
thread.join()
globstr = "*"+ str(vid)+".mp4"
files = glob.glob(str(globstr))
print(files)
filename = files[0]
if conf == "y":
return send_from_directory(MAINDIR,filename, as_attachment = True)
else:
return {"RESPONSE":200}
else:
return {"RESPONSE":401}
else:
return {"RESPONSE":400}
if __name__ == '__main__':
import logging
logging.basicConfig(filename='DB_Server.log',level=logging.ERROR)
if sys.platform == "linux":
os.system('clear')
elif sys.platform == "win32":
os.system('cls')
else:
print("Now running on port "+str(PORT))
api.add_resource(base,'/') # send raw request data to database
#api.add_resource(view,'/dl')
#api.add_resource(QueryDB,'/query/')
app.run(host=HOST,port=PORT,debug=False)
|
deadlock.py | import threading
import time
def main():
lock1 = threading.Lock()
lock2 = threading.Lock()
t1 = threading.Thread(target=foo, args=(lock1, lock2))
t2 = threading.Thread(target=foo, args=(lock2, lock1))
t1.start()
t2.start()
def foo(lock1, lock2):
lock1.acquire()
time.sleep(1)
lock2.acquire()
lock2.release()
lock1.release()
if __name__ == '__main__':
main()
|
run.py | # coding=utf-8
# author: Lan_zhijiang
# description: 启动ALR_Cloud的脚本
import sys
import threading
import argparse
import json
import socket
from main_system.log import AlrCloudLog
from network_communicator.connection_manager.websocket_server import AlrCloudWebsocketServer
sys.path.append("./network_communicator/connection_manager/")
import http_server
class AlrCloudRunInit():
def __init__(self):
self.basic_setting = json.load(open("./setting/cloud_setting/basic_setting.json", encoding="utf-8"))
self.log = AlrCloudLog()
self.ws_server = AlrCloudWebsocketServer(self.log)
# update host ip
self.basic_setting["hostIp"] = self.get_ip()
json.dump(self.basic_setting, open("./setting/cloud_setting/basic_setting.json", "w", encoding="utf-8"))
self.basic_setting = json.load(open("./setting/cloud_setting/basic_setting.json", encoding="utf-8"))
def get_ip(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
def run(self):
"""
启动ALR_Cloud
:return:
"""
print("""
##################################
Hadream-AutoLearningRobotCloud
Author: Lan_zhijiang
##################################
Github:
https://github.com/AutoLearningRobotHadream/ALR_Cloud
E-mail:
lanzhijiang@foxmail.com
""")
self.log.add_log(1, "### New ALR_Cloud Log ###", is_print=False, is_period=False)
self.log.add_log(1, "RunInit: Start ALR_Cloud")
# 启动HTTP+WEBSOCKET服务器-并发线程 #
self.log.add_log(1, "RunInit: Start http and websocket server...")
http_server_thread = threading.Thread(target=http_server.run_flask, args=(self.log,))
websocket_server_thread = threading.Thread(target=self.ws_server.run_websocket_server, args=())
http_server_thread.start()
websocket_server_thread.start()
acri = AlrCloudRunInit()
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mode", default="normal")
args = parser.parse_args()
if args.mode == "normal":
acri.run()
else:
print("It's still not finished...")
|
tasks_plugin.py | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import hashlib
import random
from threading import Thread
# ============= enthought library imports =======================
from envisage.extension_point import ExtensionPoint
from envisage.ui.tasks.action.exit_action import ExitAction
from envisage.ui.tasks.task_extension import TaskExtension
from envisage.ui.tasks.tasks_plugin import TasksPlugin
from pyface.action.group import Group
from pyface.confirmation_dialog import confirm
from pyface.constant import NO
from pyface.tasks.action.dock_pane_toggle_group import DockPaneToggleGroup
from pyface.tasks.action.schema import SMenu, SGroup
from pyface.tasks.action.schema_addition import SchemaAddition
from traits.api import List, Tuple, HasTraits, Password
from traitsui.api import View, Item
from pychron.envisage.resources import icon
from pychron.envisage.tasks.actions import ToggleFullWindowAction, EditInitializationAction, EditTaskExtensionsAction, \
GenericFindAction, GenericSaveAsAction, GenericSaveAction, ShareSettingsAction, ApplySettingsAction, CloseAction, \
CloseOthersAction, OpenAdditionalWindow, MinimizeAction, ResetLayoutAction, PositionAction, IssueAction, NoteAction, \
AboutAction, DocumentationAction, ChangeLogAction, RestartAction, StartupTestsAction
from pychron.envisage.tasks.base_plugin import BasePlugin
from pychron.envisage.tasks.base_task import WindowGroup
from pychron.envisage.tasks.preferences import GeneralPreferencesPane, BrowserPreferencesPane
from pychron.globals import globalv
from pychron.paths import paths
class PychronTasksPlugin(BasePlugin):
id = 'pychron.tasks.plugin'
name = 'Tasks'
preferences_panes = List(
contributes_to='envisage.ui.tasks.preferences_panes')
task_extensions = List(contributes_to='envisage.ui.tasks.task_extensions')
actions = ExtensionPoint(List, id='pychron.actions')
file_defaults = ExtensionPoint(List(Tuple), id='pychron.plugin.file_defaults')
help_tips = ExtensionPoint(List, id='pychron.plugin.help_tips')
available_task_extensions = ExtensionPoint(List, id='pychron.available_task_extensions')
my_tips = List(contributes_to='pychron.plugin.help_tips')
background_processes = ExtensionPoint(List, id='pychron.background_processes')
def start(self):
self.info('Writing plugin file defaults')
paths.write_file_defaults(self.file_defaults)
self._set_user()
self._random_tip()
self._start_background_processes()
def _start_background_processes(self):
self.info('starting background processes disabled')
return
for i, p in enumerate(self.background_processes):
if isinstance(p, tuple):
name, func = p
else:
func = p
name = 'Background{:02n}'.format(i)
if hasattr(func, '__call__'):
t = Thread(target=func, name=name)
t.setDaemon(True)
t.start()
def _set_user(self):
self.application.preferences.set('pychron.general.username', globalv.username)
self.application.preferences.save()
def _random_tip(self):
if globalv.random_tip_enabled and self.application.get_boolean_preference('pychron.general.show_random_tip'):
from pychron.envisage.tasks.tip_view import TipView
t = random.choice(self.help_tips)
tv = TipView(text=t)
tv.edit_traits()
def _my_tips_default(self):
return ["Use <b>Help>What's New</b> to view the official ChangeLog for the current version",
'Turn Off Random Tip two ways:<br><b>1. Preferences>General></b> Uncheck "Random Tip".</b><br>'
'<b>2.</b> Set the flag <i>random_tip_enabled</i> to False in the initialization file',
'Use <b>Window/Reset Layout</b> to change the current window back to its default "Look"',
'Submit bugs or issues to the developers manually using <b>Help/Add Request/Report Bug</b>',
'The current version of Pychron contains over 156K lines of code in 1676 files',
'If menu actions are missing first check that the desired "Plugin" is enabled using <b>Help/Edit '
'Initialization</b>. If "Plugin" is enabled, check that the desired action is enabled using '
'<b>Help/Edit UI</b>.']
def _preferences_panes_default(self):
return [GeneralPreferencesPane, BrowserPreferencesPane]
def _task_extensions_default(self):
actions = [SchemaAddition(factory=EditInitializationAction,
id='edit_plugins',
path='MenuBar/help.menu')]
return [TaskExtension(actions=actions)]
# class mPreferencesAction(PreferencesAction):
# image = icon('preferences-desktop')
class ConfirmApplicationExit(HasTraits):
pwd = Password
def validate(self):
return True
return hashlib.sha1(self.pwd).hexdigest() == globalv.dev_pwd
def traits_view(self):
v = View(Item('pwd', label='Password'),
buttons=['OK', 'Cancel'])
return v
class mExitAction(ExitAction):
def perform(self, event):
app = event.task.window.application
if globalv.username == 'dev' and globalv.dev_confirm_exit:
window = event.task.window
dialog = ConfirmApplicationExit()
ui = dialog.edit_traits(parent=window.control, kind='livemodal')
if not ui.result or not dialog.validate():
return
else:
prefs = app.preferences
if prefs.get('pychron.general.confirm_quit'):
ret = confirm(None, 'Are you sure you want to Quit?')
if ret == NO:
return
app.exit(force=True)
class myTasksPlugin(TasksPlugin):
def _my_task_extensions_default(self):
from pyface.tasks.action.api import SchemaAddition
from envisage.ui.tasks.action.preferences_action import PreferencesGroup
def edit_menu():
return SMenu(GenericFindAction(), id='edit.menu', name='&Edit')
def file_menu():
return SMenu(SGroup(id='Open'),
SGroup(id='New'),
SGroup(
GenericSaveAsAction(),
GenericSaveAction(),
id='Save'),
mExitAction(),
PreferencesGroup(),
id='file.menu', name='&File')
def tools_menu():
return SMenu(ShareSettingsAction(),
ApplySettingsAction(),
id='tools.menu', name='Tools')
def window_menu():
return SMenu(WindowGroup(),
Group(CloseAction(),
CloseOthersAction(),
id='Close'),
OpenAdditionalWindow(),
Group(MinimizeAction(),
ResetLayoutAction(),
PositionAction()),
ToggleFullWindowAction(),
# SplitEditorAction(),
id='window.menu',
name='Window')
def help_menu():
return SMenu(IssueAction(),
NoteAction(),
AboutAction(),
DocumentationAction(),
ChangeLogAction(),
RestartAction(),
# KeyBindingsAction(),
# SwitchUserAction(),
StartupTestsAction(),
EditTaskExtensionsAction(),
id='help.menu',
name='&Help')
def view_menu():
return SMenu(id='view.menu', name='&View')
actions = [
SchemaAddition(id='DockPaneToggleGroup',
factory=DockPaneToggleGroup,
path='MenuBar/view.menu'),
SchemaAddition(path='MenuBar',
factory=file_menu,
absolute_position='first'),
SchemaAddition(path='MenuBar',
after='file.menu',
before='view.menu',
factory=edit_menu),
SchemaAddition(path='MenuBar',
after='edit.menu',
factory=view_menu),
SchemaAddition(path='MenuBar',
factory=tools_menu,
absolute_position='last'),
SchemaAddition(path='MenuBar',
factory=window_menu,
absolute_position='last'),
SchemaAddition(path='MenuBar',
factory=help_menu,
absolute_position='last'),
]
return [TaskExtension(actions=actions)]
def _create_preferences_dialog_service(self):
from .preferences_dialog import PreferencesDialog
dialog = PreferencesDialog(application=self.application)
dialog.trait_set(categories=self.preferences_categories,
panes=[factory(dialog=dialog)
for factory in self.preferences_panes])
return dialog
# ============= EOF =============================================
|
notify.py | #!/usr/bin/env python3
# _*_ coding:utf-8 _*_
import base64
import hashlib
import hmac
import json
import os
import re
import threading
import time
import urllib.parse
import requests
# 原先的 print 函数和主线程的锁
_print = print
mutex = threading.Lock()
# 定义新的 print 函数
def print(text, *args, **kw):
"""
使输出有序进行,不出现多线程同一时间输出导致错乱的问题。
"""
with mutex:
_print(text, *args, **kw)
# 通知服务
# fmt: off
push_config = {
'HITOKOTO': False, # 启用一言(随机句子)
'BARK_PUSH': '', # bark IP 或设备码,例:https://api.day.app/DxHcxxxxxRxxxxxxcm/
'BARK_ARCHIVE': '', # bark 推送是否存档
'BARK_GROUP': '', # bark 推送分组
'BARK_SOUND': '', # bark 推送声音
'BARK_ICON': '', # bark 推送图标
'CONSOLE': True, # 控制台输出
'DD_BOT_SECRET': '', # 钉钉机器人的 DD_BOT_SECRET
'DD_BOT_TOKEN': '', # 钉钉机器人的 DD_BOT_TOKEN
'FSKEY': '', # 飞书机器人的 FSKEY
'GOBOT_URL': '', # go-cqhttp
# 推送到个人QQ:http://127.0.0.1/send_private_msg
# 群:http://127.0.0.1/send_group_msg
'GOBOT_QQ': '', # go-cqhttp 的推送群或用户
# GOBOT_URL 设置 /send_private_msg 时填入 user_id=个人QQ
# /send_group_msg 时填入 group_id=QQ群
'GOBOT_TOKEN': '', # go-cqhttp 的 access_token
'GOTIFY_URL': '', # gotify地址,如https://push.example.de:8080
'GOTIFY_TOKEN': '', # gotify的消息应用token
'GOTIFY_PRIORITY': 0, # 推送消息优先级,默认为0
'IGOT_PUSH_KEY': '', # iGot 聚合推送的 IGOT_PUSH_KEY
'PUSH_KEY': '', # server 酱的 PUSH_KEY,兼容旧版与 Turbo 版
'PUSH_PLUS_TOKEN': '', # push+ 微信推送的用户令牌
'PUSH_PLUS_USER': '', # push+ 微信推送的群组编码
'QMSG_KEY': '', # qmsg 酱的 QMSG_KEY
'QMSG_TYPE': '', # qmsg 酱的 QMSG_TYPE
'QYWX_AM': '', # 企业微信应用
'QYWX_KEY': '', # 企业微信机器人
'TG_BOT_TOKEN': '', # tg 机器人的 TG_BOT_TOKEN,例:1407203283:AAG9rt-6RDaaX0HBLZQq0laNOh898iFYaRQ
'TG_USER_ID': '', # tg 机器人的 TG_USER_ID,例:1434078534
'TG_API_HOST': '', # tg 代理 api
'TG_PROXY_AUTH': '', # tg 代理认证参数
'TG_PROXY_HOST': '', # tg 机器人的 TG_PROXY_HOST
'TG_PROXY_PORT': '', # tg 机器人的 TG_PROXY_PORT
}
notify_function = []
# fmt: on
# 首先读取 面板变量 或者 github action 运行变量
for k in push_config:
if os.getenv(k):
v = os.getenv(k)
push_config[k] = v
def bark(title: str, content: str) -> None:
"""
使用 bark 推送消息。
"""
if not push_config.get("BARK_PUSH"):
print("bark 服务的 BARK_PUSH 未设置!!\n取消推送")
return
print("bark 服务启动")
if push_config.get("BARK_PUSH").startswith("http"):
url = f'{push_config.get("BARK_PUSH")}/{urllib.parse.quote_plus(title)}/{urllib.parse.quote_plus(content)}'
else:
url = f'https://api.day.app/{push_config.get("BARK_PUSH")}/{urllib.parse.quote_plus(title)}/{urllib.parse.quote_plus(content)}'
bark_params = {
"BARK_ARCHIVE": "isArchive",
"BARK_GROUP": "group",
"BARK_SOUND": "sound",
"BARK_ICON": "icon",
}
params = ""
for pair in filter(
lambda pairs: pairs[0].startswith("BARK_")
and pairs[0] != "BARK_PUSH"
and pairs[1]
and bark_params.get(pairs[0]),
push_config.items(),
):
params += f"{bark_params.get(pair[0])}={pair[1]}&"
if params:
url = url + "?" + params.rstrip("&")
response = requests.get(url).json()
if response["code"] == 200:
print("bark 推送成功!")
else:
print("bark 推送失败!")
def console(title: str, content: str) -> None:
"""
使用 控制台 推送消息。
"""
print(f"{title}\n\n{content}")
def dingding_bot(title: str, content: str) -> None:
"""
使用 钉钉机器人 推送消息。
"""
if not push_config.get("DD_BOT_SECRET") or not push_config.get("DD_BOT_TOKEN"):
print("钉钉机器人 服务的 DD_BOT_SECRET 或者 DD_BOT_TOKEN 未设置!!\n取消推送")
return
print("钉钉机器人 服务启动")
timestamp = str(round(time.time() * 1000))
secret_enc = push_config.get("DD_BOT_SECRET").encode("utf-8")
string_to_sign = "{}\n{}".format(timestamp, push_config.get("DD_BOT_SECRET"))
string_to_sign_enc = string_to_sign.encode("utf-8")
hmac_code = hmac.new(
secret_enc, string_to_sign_enc, digestmod=hashlib.sha256
).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
url = f'https://oapi.dingtalk.com/robot/send?access_token={push_config.get("DD_BOT_TOKEN")}×tamp={timestamp}&sign={sign}'
headers = {"Content-Type": "application/json;charset=utf-8"}
data = {"msgtype": "text", "text": {"content": f"{title}\n\n{content}"}}
response = requests.post(
url=url, data=json.dumps(data), headers=headers, timeout=15
).json()
if not response["errcode"]:
print("钉钉机器人 推送成功!")
else:
print("钉钉机器人 推送失败!")
def feishu_bot(title: str, content: str) -> None:
"""
使用 飞书机器人 推送消息。
"""
if not push_config.get("FSKEY"):
print("飞书 服务的 FSKEY 未设置!!\n取消推送")
return
print("飞书 服务启动")
url = f'https://open.feishu.cn/open-apis/bot/v2/hook/{push_config.get("FSKEY")}'
data = {"msg_type": "text", "content": {"text": f"{title}\n\n{content}"}}
response = requests.post(url, data=json.dumps(data)).json()
if response.get("StatusCode") == 0:
print("飞书 推送成功!")
else:
print("飞书 推送失败!错误信息如下:\n", response)
def go_cqhttp(title: str, content: str) -> None:
"""
使用 go_cqhttp 推送消息。
"""
if not push_config.get("GOBOT_URL") or not push_config.get("GOBOT_QQ"):
print("go-cqhttp 服务的 GOBOT_URL 或 GOBOT_QQ 未设置!!\n取消推送")
return
print("go-cqhttp 服务启动")
url = f'{push_config.get("GOBOT_URL")}?access_token={push_config.get("GOBOT_TOKEN")}&{push_config.get("GOBOT_QQ")}&message=标题:{title}\n内容:{content}'
response = requests.get(url).json()
if response["status"] == "ok":
print("go-cqhttp 推送成功!")
else:
print("go-cqhttp 推送失败!")
def gotify(title:str,content:str) -> None:
"""
使用 gotify 推送消息。
"""
if not push_config.get("GOTIFY_URL") or not push_config.get("GOTIFY_TOKEN"):
print("gotify 服务的 GOTIFY_URL 或 GOTIFY_TOKEN 未设置!!\n取消推送")
return
print("gotify 服务启动")
url = f'{push_config.get("GOTIFY_URL")}/message?token={push_config.get("GOTIFY_TOKEN")}'
data = {"title": title,"message": content,"priority": push_config.get("GOTIFY_PRIORITY")}
response = requests.post(url,data=data).json()
if response.get("id"):
print("gotify 推送成功!")
else:
print("gotify 推送失败!")
def iGot(title: str, content: str) -> None:
"""
使用 iGot 推送消息。
"""
if not push_config.get("IGOT_PUSH_KEY"):
print("iGot 服务的 IGOT_PUSH_KEY 未设置!!\n取消推送")
return
print("iGot 服务启动")
url = f'https://push.hellyw.com/{push_config.get("IGOT_PUSH_KEY")}'
data = {"title": title, "content": content}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
response = requests.post(url, data=data, headers=headers).json()
if response["ret"] == 0:
print("iGot 推送成功!")
else:
print(f'iGot 推送失败!{response["errMsg"]}')
def serverJ(title: str, content: str) -> None:
"""
通过 serverJ 推送消息。
"""
if not push_config.get("PUSH_KEY"):
print("serverJ 服务的 PUSH_KEY 未设置!!\n取消推送")
return
print("serverJ 服务启动")
data = {"text": title, "desp": content.replace("\n", "\n\n")}
if push_config.get("PUSH_KEY").index("SCT") != -1:
url = f'https://sctapi.ftqq.com/{push_config.get("PUSH_KEY")}.send'
else:
url = f'https://sc.ftqq.com/${push_config.get("PUSH_KEY")}.send'
response = requests.post(url, data=data).json()
if response.get("errno") == 0 or response.get("code") == 0:
print("serverJ 推送成功!")
else:
print(f'serverJ 推送失败!错误码:{response["message"]}')
def pushplus_bot(title: str, content: str) -> None:
"""
通过 push+ 推送消息。
"""
if not push_config.get("PUSH_PLUS_TOKEN"):
print("PUSHPLUS 服务的 PUSH_PLUS_TOKEN 未设置!!\n取消推送")
return
print("PUSHPLUS 服务启动")
url = "http://www.pushplus.plus/send"
data = {
"token": push_config.get("PUSH_PLUS_TOKEN"),
"title": title,
"content": content,
"topic": push_config.get("PUSH_PLUS_USER"),
}
body = json.dumps(data).encode(encoding="utf-8")
headers = {"Content-Type": "application/json"}
response = requests.post(url=url, data=body, headers=headers).json()
if response["code"] == 200:
print("PUSHPLUS 推送成功!")
else:
url_old = "http://pushplus.hxtrip.com/send"
headers["Accept"] = "application/json"
response = requests.post(url=url_old, data=body, headers=headers).json()
if response["code"] == 200:
print("PUSHPLUS(hxtrip) 推送成功!")
else:
print("PUSHPLUS 推送失败!")
def qmsg_bot(title: str, content: str) -> None:
"""
使用 qmsg 推送消息。
"""
if not push_config.get("QMSG_KEY") or not push_config.get("QMSG_TYPE"):
print("qmsg 的 QMSG_KEY 或者 QMSG_TYPE 未设置!!\n取消推送")
return
print("qmsg 服务启动")
url = f'https://qmsg.zendee.cn/{push_config.get("QMSG_TYPE")}/{push_config.get("QMSG_KEY")}'
payload = {"msg": f'{title}\n\n{content.replace("----", "-")}'.encode("utf-8")}
response = requests.post(url=url, params=payload).json()
if response["code"] == 0:
print("qmsg 推送成功!")
else:
print(f'qmsg 推送失败!{response["reason"]}')
def wecom_app(title: str, content: str) -> None:
"""
通过 企业微信 APP 推送消息。
"""
if not push_config.get("QYWX_AM"):
print("QYWX_AM 未设置!!\n取消推送")
return
QYWX_AM_AY = re.split(",", push_config.get("QYWX_AM"))
if 4 < len(QYWX_AM_AY) > 5:
print("QYWX_AM 设置错误!!\n取消推送")
return
print("企业微信 APP 服务启动")
corpid = QYWX_AM_AY[0]
corpsecret = QYWX_AM_AY[1]
touser = QYWX_AM_AY[2]
agentid = QYWX_AM_AY[3]
try:
media_id = QYWX_AM_AY[4]
except IndexError:
media_id = ""
wx = WeCom(corpid, corpsecret, agentid)
# 如果没有配置 media_id 默认就以 text 方式发送
if not media_id:
message = title + "\n\n" + content
response = wx.send_text(message, touser)
else:
response = wx.send_mpnews(title, content, media_id, touser)
if response == "ok":
print("企业微信推送成功!")
else:
print("企业微信推送失败!错误信息如下:\n", response)
class WeCom:
def __init__(self, corpid, corpsecret, agentid):
self.CORPID = corpid
self.CORPSECRET = corpsecret
self.AGENTID = agentid
def get_access_token(self):
url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken"
values = {
"corpid": self.CORPID,
"corpsecret": self.CORPSECRET,
}
req = requests.post(url, params=values)
data = json.loads(req.text)
return data["access_token"]
def send_text(self, message, touser="@all"):
send_url = (
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="
+ self.get_access_token()
)
send_values = {
"touser": touser,
"msgtype": "text",
"agentid": self.AGENTID,
"text": {"content": message},
"safe": "0",
}
send_msges = bytes(json.dumps(send_values), "utf-8")
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
def send_mpnews(self, title, message, media_id, touser="@all"):
send_url = (
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="
+ self.get_access_token()
)
send_values = {
"touser": touser,
"msgtype": "mpnews",
"agentid": self.AGENTID,
"mpnews": {
"articles": [
{
"title": title,
"thumb_media_id": media_id,
"author": "Author",
"content_source_url": "",
"content": message.replace("\n", "<br/>"),
"digest": message,
}
]
},
}
send_msges = bytes(json.dumps(send_values), "utf-8")
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
def wecom_bot(title: str, content: str) -> None:
"""
通过 企业微信机器人 推送消息。
"""
if not push_config.get("QYWX_KEY"):
print("企业微信机器人 服务的 QYWX_KEY 未设置!!\n取消推送")
return
print("企业微信机器人服务启动")
url = f"https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={push_config.get('QYWX_KEY')}"
headers = {"Content-Type": "application/json;charset=utf-8"}
data = {"msgtype": "text", "text": {"content": f"{title}\n\n{content}"}}
response = requests.post(
url=url, data=json.dumps(data), headers=headers, timeout=15
).json()
if response["errcode"] == 0:
print("企业微信机器人推送成功!")
else:
print("企业微信机器人推送失败!")
def telegram_bot(title: str, content: str) -> None:
"""
使用 telegram 机器人 推送消息。
"""
if not push_config.get("TG_BOT_TOKEN") or not push_config.get("TG_USER_ID"):
print("tg 服务的 bot_token 或者 user_id 未设置!!\n取消推送")
return
print("tg 服务启动")
if push_config.get("TG_API_HOST"):
url = f"https://{push_config.get('TG_API_HOST')}/bot{push_config.get('TG_BOT_TOKEN')}/sendMessage"
else:
url = (
f"https://api.telegram.org/bot{push_config.get('TG_BOT_TOKEN')}/sendMessage"
)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
payload = {
"chat_id": str(push_config.get("TG_USER_ID")),
"text": f"{title}\n\n{content}",
"disable_web_page_preview": "true",
}
proxies = None
if push_config.get("TG_PROXY_HOST") and push_config.get("TG_PROXY_PORT"):
if push_config.get("TG_PROXY_AUTH") is not None and "@" not in push_config.get(
"TG_PROXY_HOST"
):
push_config["TG_PROXY_HOST"] = (
push_config.get("TG_PROXY_AUTH")
+ "@"
+ push_config.get("TG_PROXY_HOST")
)
proxyStr = "http://{}:{}".format(
push_config.get("TG_PROXY_HOST"), push_config.get("TG_PROXY_PORT")
)
proxies = {"http": proxyStr, "https": proxyStr}
response = requests.post(
url=url, headers=headers, params=payload, proxies=proxies
).json()
if response["ok"]:
print("tg 推送成功!")
else:
print("tg 推送失败!")
def one() -> str:
"""
获取一条一言。
:return:
"""
url = "https://v1.hitokoto.cn/"
res = requests.get(url).json()
return res["hitokoto"] + " ----" + res["from"]
if push_config.get("BARK_PUSH"):
notify_function.append(bark)
if push_config.get("CONSOLE"):
notify_function.append(console)
if push_config.get("DD_BOT_TOKEN") and push_config.get("DD_BOT_SECRET"):
notify_function.append(dingding_bot)
if push_config.get("FSKEY"):
notify_function.append(feishu_bot)
if push_config.get("GOBOT_URL") and push_config.get("GOBOT_QQ"):
notify_function.append(go_cqhttp)
if push_config.get("GOTIFY_URL") and push_config.get("GOTIFY_TOKEN"):
notify_function.append(gotify)
if push_config.get("IGOT_PUSH_KEY"):
notify_function.append(iGot)
if push_config.get("PUSH_KEY"):
notify_function.append(serverJ)
if push_config.get("PUSH_PLUS_TOKEN"):
notify_function.append(pushplus_bot)
if push_config.get("QMSG_KEY") and push_config.get("QMSG_TYPE"):
notify_function.append(qmsg_bot)
if push_config.get("QYWX_AM"):
notify_function.append(wecom_app)
if push_config.get("QYWX_KEY"):
notify_function.append(wecom_bot)
if push_config.get("TG_BOT_TOKEN") and push_config.get("TG_USER_ID"):
notify_function.append(telegram_bot)
def send(title: str, content: str) -> None:
if not content:
print(f"{title} 推送内容为空!")
return
hitokoto = push_config.get("HITOKOTO")
text = one() if hitokoto else ""
content += "\n\n" + text
ts = [
threading.Thread(target=mode, args=(title, content), name=mode.__name__)
for mode in notify_function
]
[t.start() for t in ts]
[t.join() for t in ts]
def main():
send("title", "content")
if __name__ == "__main__":
main()
|
middleware.py | import socket
import os
import multiprocessing as mp
import time
import json
print(os.getpid())
def listToString(s):
str1 = " "
return (str1.join(s))
def node_status_check(node_id):
if node_id>=4:
IP = '3.16.164.181'
else:
IP= '0.0.0.0'
port = (10000) + node_id
print("Status check initiated...................................")
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(3.0)
try:
client.connect((IP, port))
except socket.error, e:
#print("Socket error: ", e)
node_status[node_id-1] = 0
node_age[node_id-1] = 0
return
status_check_message = '{"activity":"node_status_check","checked_node_id":'+str(node_id)+'}'
client.send(status_check_message)
from_server = client.recv(4096)
rcvd_mssg = json.loads(from_server)
node_status[node_id-1] = 1
node_age[node_id-1] = rcvd_mssg["age"]
client.close()
print('My port is :', port, 'the age of the pinged node is: ', node_age[node_id-1], ' seconds')
#print(node_age)
def group_update(node_id, elected_leader_id):
if node_id>=4:
IP = '3.16.164.181'
else:
IP= '0.0.0.0'
print("Group update initiated...................................")
port = (10000) + node_id
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(3.0)
try:
client.connect((IP, port))
except socket.error, e:
#print("Socket error: ", e)
node_status[node_id-1] = 0
node_age[node_id-1] = 0
return
status_mssg = ' '.join([str(elem) for elem in node_status])
#print(status_mssg)
send_update_message = '{"activity":"group_update","leader_node_id":'+str(elected_leader_id)+',"node_status":"'+status_mssg+'"}'
client.send(send_update_message)
from_server = client.recv(4096)
print(from_server)
#node_status[node_id-1] = True
#node_age[node_id-1] = rcvd_mssg["age"]
client.close()
#print('My port is :', port, 'I got the message: ', node_age[node_id-1])
def write_request(elected_leader_id):
global request_array
response_array = []
print('Response array: ', response_array)
if elected_leader_id>=4:
IP = '3.16.164.181'
else:
IP= '0.0.0.0'
print("Write request initiated...................................")
port = (10000) + elected_leader_id
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(3.0)
try:
client.connect((IP, port))
except socket.error, e:
print("Socket error: ", e)
return "Could not connect with leader"
while len(request_array)>0:
send_write_message = '{"activity":"write_request","value_to_be_written":"'+ request_array[0]+'"}'
client.send(send_write_message)
from_server = client.recv(4096)
rcvd_mssg = from_server
print('Received write message udpate statue from Leader node: ', rcvd_mssg)
response_array.append(str(rcvd_mssg))
request_array.pop(0)
#node_status[node_id-1] = True
#node_age[node_id-1] = rcvd_mssg["age"]
client.close()
return_response_array = listToString(response_array)
return return_response_array
#print('My port is :', port, 'I got the message: ', node_age[node_id-1])
def read_request(elected_leader_id):
if elected_leader_id>=4:
IP = '3.16.164.181'
else:
IP= '0.0.0.0'
print("Read request initiated...................................")
port = (10000) + elected_leader_id
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(3.0)
try:
client.connect((IP, port))
except socket.error, e:
print("Socket error: ", e)
return "Could not connect with leader"
send_write_message = '{"activity":"read_request"}'
client.send(send_write_message)
from_server = client.recv(4096)
rcvd_mssg = from_server
print('Read result: ', rcvd_mssg)
#node_status[node_id-1] = True
#node_age[node_id-1] = rcvd_mssg["age"]
client.close()
return rcvd_mssg
#print('My port is :', port, 'I got the message: ', node_age[node_id-1])
def listen_to_new_node(node_id):
print("Listening to any new nodes.............. ")
port = (50000) + node_id
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.bind(('0.0.0.0', port))
print('Listening for new node at port : ', port)
serv.listen(5)
while True:
conn, addr = serv.accept()
from_new_node = ''
while True:
data = conn.recv(4096)
if not data: break
from_new_node += data
print('Received message from new node')
if from_new_node=='ping_new_node':
status_mssg = ' '.join([str(elem) for elem in node_status])
#print(status_mssg)
send_update_message = '{"activity":"group_update","leader_node_id":'+str(leader_node_id[0])+',"node_status":"'+status_mssg+'"}'
conn.send(send_update_message)
conn.close()
print('Connection closed at listeners end (listener to leader)....................................')
def listen_to_user(user_id):
print("Listening to users.............. ")
port = (8000) + user_id
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.bind(('0.0.0.0', port))
print('Listening for new node at port : ', port)
serv.listen(5)
while True:
conn, addr = serv.accept()
from_user = ''
while True:
data = conn.recv(4096)
if not data: break
from_user += data
rcvd_mssg = from_user
print('Received message from user')
rcvd_mssg = json.loads(from_user)
if rcvd_mssg["activity"]=="user_write_request":
print('Write request received at Group_man from user...........................................')
print('Current leader is ........', leader_node_id[0])
request_array.append(rcvd_mssg["value_to_be_written"])
print(request_array)
#consistency_state_of_write = write_request(leader_node_id[0], rcvd_mssg["value_to_be_written"])
consistency_state_of_write = write_request(leader_node_id[0])
conn.send(consistency_state_of_write)
if rcvd_mssg["activity"]=="user_read_request":
print('Write request received at Group_man from user...........................................')
print('Current leader is ........', leader_node_id[0])
read_result = read_request(leader_node_id[0])
conn.send(read_result)
conn.shutdown(1)
conn.close()
print('Connection closed at group_man with the user...................................')
manager = mp.Manager()
node_age = manager.list([0]*5)
node_status = manager.list([0]*5)
leader_node_id = manager.list([0])
request_array = manager.list()
#node_age = [0,0,0]
status_check_procs = []
group_update_procs = []
listen_new_node_procs = []
listen_user_procs = []
nodes = [1,2,3,4,5]
users = [1,2]
for i in users:
#print i
p = mp.Process(target=listen_to_user, args=(i,))
listen_user_procs.append(p)
p.start()
#p.join()
#time.sleep(2)
for i in nodes:
#print i
p = mp.Process(target=listen_to_new_node, args=(i,))
listen_new_node_procs.append(p)
p.start()
#p.join()
#time.sleep(2)
try:
while True:
#print("Staring new loop...............................................................................")
time.sleep(20)
for i in nodes:
#print i
p = mp.Process(target=node_status_check, args=(i,))
status_check_procs.append(p)
p.start()
p.join()
#time.sleep(2)
time.sleep(5)
#leader election
print(node_status)
print(node_age)
if sum(node_age)==0:
leader_node_id[0] = 0
else:
leader_node_id[0] = node_age.index(max(node_age))+1
print('The leader elected is Node ', leader_node_id[0])
for j in nodes:
#print i
#if node_status[j]==1:
p = mp.Process(target=group_update, args=(j,leader_node_id[0]))
group_update_procs.append(p)
p.start()
p.join()
#print("Ending new loop.....xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
except KeyboardInterrupt:
pass
#############
#7000:10004,10006:65000/tcp |
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, QPoint
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum_grlc as electrum
from electrum_grlc.gui import messages
from electrum_grlc import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum_grlc.bitcoin import COIN, is_address
from electrum_grlc.plugin import run_hook, BasePlugin
from electrum_grlc.i18n import _
from electrum_grlc.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME,
InvoiceError, parse_max_spend)
from electrum_grlc.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum_grlc.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum_grlc.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum_grlc.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum_grlc.version import ELECTRUM_VERSION
from electrum_grlc.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum_grlc.exchange_rate import FxThread
from electrum_grlc.simple_config import SimpleConfig
from electrum_grlc.logging import Logger
from electrum_grlc.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum_grlc.lnaddr import lndecode, LnInvoiceException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit, SizedFreezableLineEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
from .rbf_dialog import BumpFeeDialog, DSCancelDialog
from .qrreader import scan_qrcode
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [Qt.Key_Return, Qt.Key_Enter]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
Exception_Hook.maybe_setup(config=self.config, wallet=self.wallet)
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self._cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QScrollArea()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
self.setMinimumWidth(640)
self.setMinimumHeight(400)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum-grlc.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum-GRLC - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
self._update_check_thread = None
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-LTC"
if constants.net.TESTNET:
name += " " + constants.net.NET_NAME.capitalize()
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend garlicoin with it."),
_("Make sure you own the seed phrase or the private keys, before you request garlicoin to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Garlicoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return False
backup_dir = self.config.get_backup_dir()
if backup_dir is None:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not configured"))
return
try:
new_path = self.wallet.save_backup(backup_dir)
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
return True
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
if self.network and self.network.local_watchtower:
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://garlicoin.io"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('garlicoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-GRLC",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Garlicoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Garlicoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-GRLC - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-GRLC", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-GRLC", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, amount_sat, is_diff=False, whitespaces=False) -> str:
"""Formats amount as string, converting to desired unit.
E.g. 500_000 -> '0.005'
"""
return self.config.format_amount(amount_sat, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount_sat, *, timestamp: int = None) -> str:
"""Returns string with both bitcoin and fiat amounts, in desired units.
E.g. 500_000 -> '0.005 BTC (191.42 EUR)'
"""
text = self.config.format_amount_and_units(amount_sat)
fiat = self.fx.format_amount_and_units(amount_sat, timestamp=timestamp) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fiat_and_units(self, amount_sat) -> str:
"""Returns string of FX fiat amount, in desired units.
E.g. 500_000 -> '191.42 EUR'
"""
return self.fx.format_amount_and_units(amount_sat) if self.fx else ''
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance") + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
if self.tray:
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon(icon)
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return tab
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = SizedFreezableLineEdit(width=700)
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ''.join([
_('Expiration date of your request.'), ' ',
_('This information is seen by the recipient if you send them a signed payment request.'),
'\n\n',
_('For on-chain requests, the address gets reserved until expiration. After that, it might get reused.'), ' ',
_('The garlicoin address never expires and will always be part of this electrum wallet.'), ' ',
_('You can reuse a garlicoin address any number of times but it is not good for your privacy.'),
'\n\n',
_('For Lightning requests, payments will not be accepted after the expiration.'),
])
grid.addWidget(HelpLabel(_('Expires after') + ' (?)', msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("garlicoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 0, 1, -1)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning: bool):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
except InvoiceError as e:
self.show_error(_('Error creating payment request') + ':\n' + str(e))
return
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount: int, message: str, expiration: int) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = (_("Recipient of the funds.") + "\n\n"
+ _("You may enter a Garlicoin address, a label from your list of contacts "
"(a list of completions will be proposed), "
"or an alias (email-like address that forwards to a Garlicoin address)") + ". "
+ _("Lightning invoices are also supported.") + "\n\n"
+ _("You can also pay to many outputs in a single transaction, "
"specifying one output per line.") + "\n" + _("Format: address, amount") + "\n"
+ _("To set the amount to 'max', use the '!' special character.") + "\n"
+ _("Integers weights can also be used in conjunction with '!', "
"e.g. set one amount to '2!' and another to '3!' to split your coins 40-60."))
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = SizedFreezableLineEdit(width=700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = (_('The amount to be received by the recipient.') + ' '
+ _('Fees are paid by the sender.') + '\n\n'
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' '
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n'
+ _('Keyboard shortcut: type "!" to send all your coins.'))
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
# show tooltip explaining max amount
mining_fee = tx.get_fee()
mining_fee_str = self.format_amount_and_units(mining_fee)
msg = _("Mining fee: {} (can be adjusted on next screen)").format(mining_fee_str)
if x_fee_amount:
twofactor_fee_str = self.format_amount_and_units(x_fee_amount)
msg += "\n" + _("2fa fee: {} (for the next batch of transactions)").format(twofactor_fee_str)
frozen_bal = self.get_frozen_balance_str()
if frozen_bal:
msg += "\n" + _("Some coins are frozen: {} (can be unfrozen in the Addresses or in the Coins tab)").format(frozen_bal)
QToolTip.showText(self.max_button.mapToGlobal(QPoint(0, 0)), msg)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Garlicoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
def task():
coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
try:
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
except InvoiceError as e:
self.show_error(_('Error creating payment') + ':\n' + str(e))
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_str = self.get_frozen_balance_str()
if frozen_str:
text += " ({} {})".format(
frozen_str, _("are frozen")
)
return text
def get_frozen_balance_str(self) -> Optional[str]:
frozen_bal = sum(self.wallet.get_frozen_balance())
if not frozen_bal:
return None
return self.format_amount_and_units(frozen_bal)
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if any(parse_max_spend(outval) for outval in output_values):
output_value = '!'
else:
output_value = sum(output_values)
conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if conf_dlg.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not conf_dlg.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
return
cancelled, is_send, password, tx = conf_dlg.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, *, funding_sat, node_id):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(
coins=coins,
funding_sat=funding_sat,
node_id=node_id,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
node_id, rest = extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
if self.wallet.lnworker.has_conflicting_backup_with(node_id):
msg = messages.MGS_CONFLICTING_BACKUP_INSTANCE
if not self.question(msg):
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat=funding_sat, node_id=node_id)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(
connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, self.on_open_channel_success, on_failure)
def on_open_channel_success(self, args):
chan, funding_tx = args
lnworker = self.wallet.lnworker
if not chan.has_onchain_backup():
backup_dir = self.config.get_backup_dir()
if backup_dir is not None:
self.show_message(_(f'Your wallet backup has been updated in {backup_dir}'))
else:
data = lnworker.export_channel_backup(chan.channel_id)
help_text = _(messages.MSG_CREATED_NON_RECOVERABLE_CHANNEL)
self.show_qrcode(
data, _('Save channel backup'),
help_text=help_text,
show_copy_text_btn=True)
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
self.show_transaction(funding_tx)
else:
self.show_message(message)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def set_ln_invoice(self, invoice: str):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice)
except LnInvoiceException as e:
self.show_error(_("Error parsing Lightning invoice") + f":\n{e}")
return
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.payto_e.lightning_invoice = invoice
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def set_bip21(self, text: str):
try:
out = util.parse_URI(text, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
def pay_to_URI(self, text: str):
if not text:
return
# first interpret as lightning invoice
bolt11_invoice = maybe_extract_bolt11_invoice(text)
if bolt11_invoice:
self.set_ln_invoice(bolt11_invoice)
else:
self.set_bip21(text)
# update fiat amount
self.amount_e.textEdited.emit("")
self.show_send_tab()
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return tab
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog)
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog))
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog)
sb.addPermanentWidget(self.seed_button)
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.update_lightning_icon()
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if not self.wallet.has_lightning():
self.lightning_button.setVisible(False)
return
if self.network is None or self.network.channel_db is None:
self.lightning_button.setVisible(False)
return
self.lightning_button.setVisible(True)
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum_grlc.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def init_lightning_dialog(self, dialog):
assert not self.wallet.has_lightning()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")
if self.question(msg):
self._init_lightning_dialog(dialog=dialog)
@protected
def _init_lightning_dialog(self, *, dialog, password):
dialog.close()
self.wallet.init_lightning(password=password)
self.update_lightning_icon()
self.show_message(_('Lightning keys have been initialized.'))
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(800, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('False')
if self.wallet.has_seed():
seed_available = _('True')
ks = self.wallet.keystore
assert isinstance(ks, keystore.Deterministic_KeyStore)
seed_available += f" ({ks.get_seed_type()})"
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(WWLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(WWLabel(basename), 0, 1)
grid.addWidget(WWLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(WWLabel(wallet_type), 1, 1)
grid.addWidget(WWLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(WWLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(WWLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(WWLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(WWLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(WWLabel(ks_type), 4, 1)
# lightning
grid.addWidget(WWLabel(_('Lightning') + ':'), 5, 0)
from .util import IconLabel
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
grid.addWidget(WWLabel(_('Enabled')), 5, 1)
else:
label = IconLabel(text='Enabled, non-recoverable channels')
label.setIcon(read_QIcon('nocloud'))
grid.addWidget(label, 5, 1)
if self.wallet.db.get('seed_type') == 'segwit':
msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
grid.addWidget(HelpButton(msg), 5, 3)
grid.addWidget(WWLabel(_('Lightning Node ID:')), 7, 0)
# TODO: ButtonsLineEdit should have a addQrButton method
nodeid_text = self.wallet.lnworker.node_keypair.pubkey.hex()
nodeid_e = ButtonsLineEdit(nodeid_text)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
nodeid_e.addButton(qr_icon, lambda: self.show_qrcode(nodeid_text, _("Node ID")), _("Show QR Code"))
nodeid_e.addCopyButton(self.app)
nodeid_e.setReadOnly(True)
nodeid_e.setFont(QFont(MONOSPACE_FONT))
grid.addWidget(nodeid_e, 8, 0, 1, 4)
else:
if self.wallet.can_have_lightning():
grid.addWidget(WWLabel('Not enabled'), 5, 1)
button = QPushButton(_("Enable"))
button.pressed.connect(lambda: self.init_lightning_dialog(dialog))
grid.addWidget(button, 5, 3)
else:
grid.addWidget(WWLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(WWLabel(_("Derivation path") + ':'))
der_path_text = WWLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(WWLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Garlicoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Garlicoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum_grlc.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
def cb(success: bool, error: str, data):
if not success:
if error:
self.show_error(error)
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
scan_qrcode(parent=self.top_level_window(), config=self.config, callback=cb)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_grlc import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
raw_tx = self._fetch_tx_from_network(txid)
if not raw_tx:
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
def _fetch_tx_from_network(self, txid: str) -> Optional[str]:
if not self.network:
self.show_message(_("You are offline."))
return
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
return raw_tx
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-grlc-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join(map(lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# note that closeEvent is NOT called if the user quits with Ctrl-C
self.clean_up()
event.accept()
def clean_up(self):
if self._cleaned_up:
return
self._cleaned_up = True
if self.wallet.thread:
self.wallet.thread.stop()
self.wallet.thread = None
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
if self._update_check_thread:
self._update_check_thread.exit()
self._update_check_thread.wait()
if self.tray:
self.tray = None
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = _(
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(msg))
msg2 = _("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(msg2))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb: Optional[int]) -> Optional[int]:
if fee_per_kb is None:
return None
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = round(fee)
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = BumpFeeDialog(main_window=self, tx=tx, txid=txid)
d.run()
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = DSCancelDialog(main_window=self, tx=tx, txid=txid)
d.run()
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
flask_wrapper.py | from flask import Flask, jsonify, request
from threading import Thread
from .server import Server
from .errors import HoistExistsError
from .error import Error
#from .proxy.proxy import Proxy
from typing import Callable
from .version import __version__
HTML: str = '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>Hoist V{{ version }}</title>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<link
rel="stylesheet"
href="https://maxcdn.bootstrapcdn.com/bootstrap/4.5.2/css/bootstrap.min.css"
/>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.5.2/js/bootstrap.min.js"></script>
</head>
<body>
<style text="text/css">
.nav-link {
font-size: 18px;
color: black;
}
.nav-link:hover {
color: #32cd32;
}
</style>
<nav class="navbar navbar-expand-sm bg-light justify-content-center">
<ul class="navbar-nav">
<li class="nav-item">
<a class="nav-link" href="https://github.com/ZeroIntensity/Hoist"
>GitHub</a
>
</li>
<li class="nav-item">
<a class="nav-link" href="https://pypi.org/project/hoist3">PyPI</a>
</li>
<li class="nav-item">
<a class="nav-link" href="https://discord.gg/W9QwbpbUbJ">Discord</a>
</li>
</ul>
</nav>
<br />
<div class="container-fluid text-center" style="margin-top: 10%">
<h3 class="display-4" style="font-size: 60px">Hoist V{{ version }}</h3>
<p style="font-size: 20px">App running successfully!</p>
</div>
<script type="text/javascript">
const serverUrl = window.location.href;
var auth = "";
async function httpPost(url) {
return await fetch(url, {
method: "post",
headers: {
Accept: "application/json",
"Content-Type": "application/json",
},
}).then(response => {
return response.json();
});
}
async function clicked() {
const input = document.getElementById("message").value;
const url = `${serverUrl}/send?msg=${input}&auth=${auth}`;
var resp = httpPost(url, input);
resp.then(json => {
var element = document.getElementById("response");
if (json.hasOwnProperty("ERROR")) {
element.innerHTML = `<div class="container">
<div class="alert alert-danger alert-dismissible">
<button type="button" class="close" data-dismiss="alert">×</button>
<strong>Error</strong> Server responded with error "${json["ERROR"]}"
</div></div>
`;
} else {
element.innerHTML = `
<div class="container">
<div class="alert alert-success alert-dismissible">
<button type="button" class="close" data-dismiss="alert">×</button>
<strong>Response</strong> ${json["RESPONSE"]}
</div></div>
`;
}
});
return false;
}
</script>
</body>
<div class="container">
<p style="font-size: 20px">Send Message To Server</p>
<form>
<div class="form-group">
<input
type="message"
class="form-control"
placeholder="Enter message..."
name="message"
id="message"
/>
</div>
<button
onclick="clicked(); return false;"
type="submit"
class="btn btn-success"
>
Send
</button>
</form>
<div class="container" style="margin-top: 4%" id="response"></div>
</div>
</html>
'''
class FlaskWrapper:
"""Wrapper for Flask."""
@staticmethod
def make_server() -> Flask:
"""Generate a flask server."""
app: Flask = Flask(__name__)
return app
def add_hoist(self, app: Flask, handle_errors: bool = True, auth: list = [""], premade_pages: bool = True) -> Flask:
"""Function for setting up hoist on an app."""
if hasattr(app, 'HOIST_INTERNALSERVER'):
raise HoistExistsError('hoist is already set up on app')
app.HOIST_INTERNALSERVER = Server(app, handle_errors)
@app.route('/hoist/send', methods=['POST'])
def hoist_send() -> str:
return self.get_response(app, auth, app.HOIST_INTERNALSERVER._received, 'msg')
if premade_pages:
@app.route('/hoist', methods=['POST', 'GET'])
def hoist_home() -> str:
if request.method == 'POST':
return jsonify({'RESPONSE': f'Version {__version__}'})
# done with html instead of flask.render_template so i dont have to touch the apps template_folder property
html = HTML.replace('{{ version }}', __version__).replace('{{ serverUrl }}', request.base_url)
return html
return app
@staticmethod
def get_response(app: Flask, auth: list, callback: Callable, argument: str) -> str:
"""Function for getting the response of a request."""
ARG: str = request.args.get(argument)
TOKEN = request.args.get('auth')
if not TOKEN in auth:
return jsonify({'ERROR': 'unauthorized'}), 401
resp, success = callback(ARG)
if isinstance(resp, Error):
return jsonify({'ERROR': resp._message}), resp._code
if not success:
return jsonify({'ERROR': resp}), 500
else:
return jsonify({'RESPONSE': resp})
def add_proxy(self, app: Flask, handle_errors: bool = True, auth: list = [""]) -> Flask:
"""Function for setting up a hoist proxy on an app."""
raise NotImplemented('proxys are not yet supported')
if hasattr(app, 'HOIST_INTERNALPROXY'):
raise HoistExistsError('hoist is already set up on app')
app.HOIST_INTERNALPROXY = HoistProxy(app, handle_errors)
@app.route('/hoist/proxy/connect', methods=['POST'])
def hoist_proxy_connect() -> str:
return self.get_response(app, auth, app.HOIST_INTERNALPROXY._connect, 'data')
@app.route('/hoist/proxy/disconnect', methods=['POST'])
def hoist_proxy_disconnect() -> str:
return self.get_response(app, auth, app.HOIST_INTERNALPROXY._disconnect, 'data')
return app
@staticmethod
def run_server(app: Flask, ip: str, port: int) -> Flask:
"""Function for running a flask app."""
app.run(ip, port)
return app
def thread_server(self, app: Flask, ip: str, port: int) -> Flask:
"""Function for running a flask app with a thread."""
server: Thread = Thread(target = self.run_server, args = (app, ip, port))
server.start()
return app |
gridmodule.py | #!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2018 École Polytechnique Fédérale de Lausanne (EPFL)
# Author: Jagdish P. Achara
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from argparse import ArgumentParser
from csv import DictWriter
from logging import basicConfig, getLogger, INFO
from os import path
from socket import socket, AF_INET, SOCK_DGRAM
from sys import stdout, exit, exc_info
from numpy import maximum, absolute, angle
from datetime import datetime
from gridapi import GridAPI
from multiprocessing import Process, Manager
from singlephasegrid import SinglePhaseGrid
from snippets import load_json_file, load_json_data, dump_json_data, \
dump_api
from timeit import default_timer as timer
from csv import reader, QUOTE_NONNUMERIC
basicConfig(stream=stdout, level=INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = getLogger('grid.module')
BUFFER_LIMIT = 20000
def extract_state(grid):
"""Extract the state from a grid.
Parameters
----------
grid : SinglePhaseGrid
Grid to extract the state from.
Returns
-------
state : dict
State of the grid after negating the "demand" values.
"""
grid.computeSlackPower()
listP = [p for p in grid.pqBusesP]
listP.insert(0, grid.slackPower[0])
listQ = [q for q in grid.pqBusesQ]
listQ.insert(0, grid.slackPower[1])
grid.computeCurrents()
lineCurrents = maximum(absolute(grid.forwardCurrents), absolute(grid.backwardCurrents)).tolist()
# convert real and imaginary to magnitude and angle (degrees)
Vm = []
Va = []
for i in range(len(grid.realV)):
complexVoltage = complex(grid.realV[i], grid.imagV[i])
Vm.append(absolute(complexVoltage))
Va.append(angle(complexVoltage, deg=True))
return {
'P': listP,
'Q': listQ,
'Vm': Vm,
'Va': Va,
'LineCurrents': lineCurrents
}
def log_generator(state_queue, log_path):
"""Write logs to CSV files, and update it whenever the state is changed.
Parameters
----------
state_queue : multiprocessing.Queue
Queue where the state should be put.
log_path : path_like
Relative path to which to write the bus and line log.
"""
log_path_bus = path.join(log_path, 'grid_bus.csv')
log_path_line = path.join(log_path, 'grid_line.csv')
log_file_bus = open(log_path_bus, 'w', buffering=1, newline='')
log_writer_bus = DictWriter(
log_file_bus, ('Timestamp','BusIndex', 'P', 'Q', 'Vm', 'Va')
)
log_writer_bus.writeheader()
log_file_line = open(log_path_line, 'w', buffering=1, newline='')
log_writer_line = DictWriter(
log_file_line, ('Timestamp', 'Line #', 'LineCurrent')
)
log_writer_line.writeheader()
while True:
# Retrieve the state from the queue.
state = state_queue.get()
assert len({
len(state['P']), len(state['Q']),
len(state['Vm']), len(state['Va'])
}) == 1
row = {'Timestamp': state['Ts']}
for index, (P, Q, Vm, Va) in enumerate(
zip(state['P'], state['Q'], state['Vm'], state['Va'])
):
# Write the state of the current bus.
row.update({
'BusIndex': index,
'P': P,
'Q': Q,
'Vm': Vm,
'Va': Va
})
log_writer_bus.writerow(row)
row = {'Timestamp': state['Ts']}
for index, LineCurrent in enumerate(
(state['LineCurrents'])
):
# Write the state of the current line.
row.update({
'Line #': index,
'LineCurrent': LineCurrent
})
log_writer_line.writerow(row)
log_writer_bus.close()
log_writer_line.close()
def update_handler(state, message_queue, state_queue, *args, **kwargs):
"""Handle messages that update the grid, i.e., implement a setpoint.
Parameters
----------
state : multiprocessing.manager.dict
Shared dict that stores the state of the grid.
message_queue : multiprocessing.manager.Queue
Queue in which the main process stores messages.
state_queue : multiprocessing.manager.Queue
Queue in which the updated state will be put.
Raises
------
error : IOError
Could not open the trace
error : ValueError
Wrong or missing value in the trace
"""
# Parameters for slack voltage from trace
use_trace = args[0]['slack_voltage']['use_trace']
# Load trace
if use_trace:
trace_file_path = args[0]['slack_voltage']['trace_file_path']
try:
with open(trace_file_path, 'r') as f:
reader_ = reader(f, quoting=QUOTE_NONNUMERIC)
slack_voltage = list(reader_)
except IOError as e:
logger.error("Could not open {}: {}".format(trace_file_path, e))
return 1
except ValueError as e:
logger.error("ValueError, wrong or missing value in {}: {}".format(trace_file_path, e))
return 1
except Exception as e:
logger.error("Unexpected error", exc_info()[0])
raise
# normalize the trace timestamp
slack_voltage_first_ts = slack_voltage[0][0]
for i in range(0,len(slack_voltage)):
slack_voltage[i][0] = slack_voltage[i][0] - slack_voltage_first_ts
found = False
end_trace_reach = False
ptr_ID = -1
slack_voltage_real = slack_voltage[0][1]
slack_voltage_imaginary = slack_voltage[0][2]
else:
slack_voltage_real = args[0]['slack_voltage']['voltage_real']
slack_voltage_imaginary = args[0]['slack_voltage']['voltage_imaginary']
# Initialize the grid.
grid = SinglePhaseGrid(*args, **kwargs)
grid.update([0] * (grid.no_buses - 1), [0] * (grid.no_buses - 1), slack_voltage_real, slack_voltage_imaginary)
state.update(extract_state(grid))
logger.info("Initial state: {}".format(state))
state_log = state.copy()
state_log['Ts'] = datetime.now()
state_queue.put(state_log)
reference_time = timer()
# Process and update messages one by one, and perform load-flow analysis.
while True:
msg_qsize = message_queue.qsize()
if msg_qsize == 0:
continue
logger.info("Queue size in update_handler: {}".format(msg_qsize))
# Get these many messages from the message_queue...
index_with_updates = {}
for i in range(msg_qsize):
msg, _ = message_queue.get()
bus_index = int(msg['bus_index'])
Pd, Qd = float(msg['P']), float(msg['Q'])
index_with_updates[bus_index] = Pd, Qd
# Construct P, Q lists for doing LF with single update at all buses...
# P, Q are the three phase voltage
Pd = []
Qd = []
for i in range(1, grid.no_buses):
if i in index_with_updates:
Pd_new = index_with_updates[i][0]
Qd_new = index_with_updates[i][1]
else:
Pd_new = grid.pqBusesP[i - 1]
Qd_new = grid.pqBusesQ[i - 1]
Pd.append(Pd_new)
Qd.append(Qd_new)
# Get Voltage from trace (or use default value) for slack bus...
if use_trace:
found = False
current_time = timer() - reference_time
delta = abs(current_time - slack_voltage[ptr_ID][0])
while (not found and not end_trace_reach):
# reach the end of the trace, take the last element as correct value
if(ptr_ID + 1) >= len(slack_voltage):
end_trace_reach = True
# take the last entry
ptr_ID = -1
else:
next_delta = abs(current_time - slack_voltage[ptr_ID + 1][0])
# the closest value of current_time is at ptr_ID
if next_delta > delta:
found = True
else:
delta = next_delta
ptr_ID = ptr_ID + 1
slack_voltage_real = slack_voltage[ptr_ID][1]
slack_voltage_imaginary = slack_voltage[ptr_ID][2]
logger.info("Update grid with P, Q ({}, {}) and slack voltage ({}, {}i)".format(Pd, Qd, slack_voltage_real, slack_voltage_imaginary))
initial_time = datetime.now()
grid.update(Pd, Qd, slack_voltage_real, slack_voltage_imaginary) # positive power is generation in grid model except slack bus power.
logger.info("LF took {} ms".
format((datetime.now() - initial_time).total_seconds() * 1e3))
state.update(extract_state(grid))
logger.info("Put state onto queue: {}".format(state))
state_log = state.copy()
state_log['Ts'] = datetime.now()
state_queue.put(state_log)
def main():
# Parse the arguments.
parser = ArgumentParser(
description="Grid module that facilitates the operation of the grid."
)
parser.add_argument("config_path",
help="Path to the JSON config file for the grid",
nargs='?')
parser.add_argument("log_path",
help="Path to the log directory for the grid",
nargs='?')
parser.add_argument("grid_module_ip",
help="T-RECS (private) IP of the grid module where \
it listens for the messages from T-RECS resource models.",
nargs='?')
parser.add_argument("grid_module_port",
help="Port where grid module listens for the \
messages from T-RECS resource models.",
nargs='?')
parser.add_argument("--api_path",
help="Path to which the GridAPI will be pickled",
default='grid_api.pickle')
args = parser.parse_args()
# Load the configuration file.
config = load_json_file(args.config_path, logger)
# Inform the GridAPI of the grid module's address.
api = GridAPI(args.grid_module_ip, int(args.grid_module_port))
dump_api(api, args.api_path)
kwargs = {'api_path': args.api_path}
# Initialize a multiprocessing manager.
with Manager() as manager:
# Shared memory for the state.
state = manager.dict()
# Socket to listen for incoming messages.
sock = socket(AF_INET, SOCK_DGRAM)
sock.bind((args.grid_module_ip, int(args.grid_module_port)))
# Handle update messages.
message_queue = manager.Queue()
state_queue = manager.Queue()
Process(target=update_handler,
args=(state, message_queue, state_queue, config['grid']),
kwargs=kwargs).start()
# Log generation.
Process(target=log_generator, args=(state_queue, args.log_path)).start()
# Wait for the child process to initialize the grid.
while not state:
continue
while True:
# The socket listens for messages that ask it to provide its state,
# or implement a new setpoint.
data, addr = sock.recvfrom(BUFFER_LIMIT)
message = load_json_data(data)
logger.info("Received message from {}: {}".format(addr, message))
try:
if message['type'] == 'request':
reply = {key: value for key, value in state.items()}
logger.info("Send state to {}: {}".format(addr, reply))
sock.sendto(dump_json_data(reply), addr)
elif message['type'] == 'implement_setpoint':
logger.info("Implement setpoint: {}".format(message))
message_queue.put((message, datetime.now()))
logger.info("Queue size: {}".format(message_queue.qsize()))
else:
logger.warn(
"Unknown message type: {}".format(message['type']))
except Exception as e:
logger.warn("Bad message: {}".format(e))
return 0
if __name__ == '__main__':
exit(main())
|
functions.py | from docker.errors import APIError, NotFound
from requests import ConnectionError
import docker, threading, os, logging
def create_client(use_local_socket=False):
_config = DockerConfig(use_local_socket)
return docker.DockerClient(
base_url = _config.base_url(),
version = _config.version(),
timeout = _config.timeout()
)
def docker_run_settings(docker_client, docker_settings):
if 'ports' in docker_settings:
for port_mapping, value in docker_settings['ports'].items():
docker_settings['ports'][port_mapping] = tuple(value)
return docker_client.containers.run(**docker_settings)
def docker_start_settings(docker_client, docker_settings):
logger = logging.getLogger(__name__)
container_name = docker_settings['name']
logger.info('Starting container: {container_name}'.format(**locals()))
if len(docker_client.containers.list(all=True, sparse=True, filters={'name':container_name})) > 0:
logger.info('Found container')
container = docker_client.containers.get(container_name)
container.start()
logger.info('Started')
return True
else:
logger.info('Container not found, creating')
docker_run_settings(docker_client, docker_settings)
return True
def docker_stop_settings(docker_client, docker_settings):
container_name = docker_settings['name']
if len(docker_client.containers.list(all=True, sparse=True, filters={'name':container_name})) > 0:
container = docker_client.containers.get(container_name)
container.stop(timeout=60)
container.wait(
timeout = 70,
condition = 'not-running'
)
return True
return False
def docker_restart_settings(docker_client, docker_settings):
container_name = docker_settings['name']
if len(docker_client.containers.list(all=True, sparse=True, filters= {'name':container_name})) > 0:
container = docker_client.containers.get(container_name)
container.restart(timeout=60)
return True
else:
docker_run_settings(docker_client, docker_settings)
return True
def docker_remove_settings(docker_client, use_local_socket, docker_settings):
logger = logging.getLogger(__name__)
container_name = docker_settings['name']
logger.info('Finding container: {container_name}'.format(**locals()))
if len(docker_client.containers.list(all=True, sparse=True, filters= {'name':container_name})) > 0:
container = docker_client.containers.get(container_name)
if container:
logger.info('Found container')
removed = threading.Event()
wait_ready = threading.Event()
t = threading.Thread(target=wait_container_status, args = (create_client(use_local_socket), docker_settings,'removed',wait_ready,removed,10))
t.daemon = True
t.start()
wait_ready.wait()
container.remove(force=True)
logger.info('Waiting...')
removed.wait()
logger.info('Removed container')
return False
def container_get_status(docker_client, docker_settings):
container_name = docker_settings['name']
try:
container = find_container(docker_client, container_name)
return container.status
except NotFound:
return 'CONTAINER_NOT_FOUND'
def docker_logs_settings(docker_client, docker_settings):
container_name = docker_settings['name']
try:
container = find_container(docker_client, container_name)
return container.logs(stdout=True, stderr=True, tail=10)
except NotFound:
return 'Container not found'
def wait_container_status(docker_client, docker_settings, condition, wait_ready, status_achieved, timeout=10, max_tries=1, tries=0):
if not status_achieved:
raise ValueError('status_achieved must contain a value')
container_name = docker_settings['name']
try:
container = find_container(docker_client, container_name)
if wait_ready and not wait_ready.is_set():
wait_ready.set()
container.wait(timeout=timeout,condition=condition)
status_achieved.set()
except ConnectionError as ex:
if tries < max_tries:
tries += 1
wait_container_status(docker_client, docker_settings, condition, wait_ready, status_achieved, timeout, tries=tries)
else:
raise ex
except NotFound as ex:
if condition == 'removed':
status_achieved.set()
else:
raise ex
def find_container(docker_client, container_name):
logger=logging.getLogger(__name__)
logger.info('Finding container: {container_name}'.format(**locals()))
if len(docker_client.containers.list(all=True, sparse=True, filters= {'name':container_name})) > 0:
return docker_client.containers.get(container_name)
else:
raise NotFound('No containers with name {container_name} available.'.format(**locals()))
class DockerConfig():
def __init__(self, use_local_socket):
self.use_local_socket = use_local_socket
def base_url(self):
if not self.use_local_socket and 'DOCKER_BASE_URL' in os.environ and os.environ['DOCKER_BASE_URL']:
return os.environ['DOCKER_BASE_URL']
else:
return 'unix:///var/run/docker.sock'
def version(self):
if 'DOCKER_VERSION' in os.environ and os.environ['DOCKER_VERSION']:
return os.environ['DOCKER_VERSION']
else:
return 'auto'
def timeout(self):
if 'DOCKER_TIMEOUT' in os.environ and os.environ['DOCKER_TIMEOUT']:
return int(os.environ['DOCKER_TIMEOUT'])
else:
return 120 |
wifireconnect.py | # wificontrol code is placed under the GPL license.
# Written by Denis Chagin (denis.chagin@emlid.com)
# Copyright (c) 2016, Emlid Limited
# All rights reserved.
# If you are interested in using wificontrol code as a part of a
# closed source project, please contact Emlid Limited (info@emlid.com).
# This file is part of wificontrol.
# wificontrol is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# wificontrol is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with wificontrol. If not, see <http://www.gnu.org/licenses/>.
import dbus
import signal
import threading
import logging
from daemon_tree import DaemonTreeSvr
from wificontrol import WiFiControl
logging.basicConfig()
logger = logging.getLogger(__name__)
WORKER_NAME = 'wifi_reconnect'
class ReconnectWorker(object):
TIMEOUT = 10
def __init__(self):
self.manager = WiFiControl()
self.interrupt = threading.Event()
self.worker = None
def start_reconnection(self, ssid):
if self.worker is None:
self.worker = threading.Thread(target=self._reconnect, args=(ssid,))
self.worker.daemon = True
self.worker.start()
def _reconnect(self, ssid):
self.interrupt.clear()
self.interrupt.wait(self.TIMEOUT)
while not self.interrupt.is_set():
try:
self.manager.scan()
scan_results = self.manager.get_scan_results()
scanned_ssids = [net['ssid'] for net in scan_results]
if ssid in scanned_ssids:
network = {'ssid': ssid}
self.manager.start_connecting(network, callback=self._callback)
except dbus.exceptions.DBusException as error:
logger.error(error)
self.interrupt.wait(self.TIMEOUT)
def _callback(self, result=None):
if result:
self.interrupt.set()
def stop_reconnection(self):
self.interrupt.set()
if self.worker:
self.worker.join()
self.worker = None
def main():
def handler(signum, frame):
reconnect_worker.stop_reconnection()
reconnect_svr.cancel()
reconnect_worker = ReconnectWorker()
reconnect_svr = DaemonTreeSvr(name=WORKER_NAME)
reconnect_svr.register(reconnect_worker.start_reconnection)
reconnect_svr.register(reconnect_worker.stop_reconnection)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
reconnect_svr.run()
reconnect_svr.shutdown()
if __name__ == '__main__':
main()
|
test_tls.py | import datetime as dt
import multiprocessing as mp
import os
import random
import socket
import struct
import sys
import time
from functools import partial
try:
from contextlib import suppress
except ImportError:
# Python 2.7
from contextlib2 import suppress
import pytest
import mbedtls.hash as hashlib
from mbedtls.exceptions import TLSError
from mbedtls.pk import RSA, ECC
from mbedtls.x509 import BasicConstraints, CRT, CSR
from mbedtls.tls import _DTLSCookie as DTLSCookie
from mbedtls.tls import *
try:
FileNotFoundError
except NameError:
# Python 2.7
FileNotFoundError = OSError
def block(callback, *args, **kwargs):
counter = 0
while True:
with suppress(WantReadError, WantWriteError):
return callback(*args, **kwargs)
counter += 1
if counter == sys.getrecursionlimit():
raise RuntimeError("maximum recursion depth exceeded.")
class Chain:
@pytest.fixture(scope="class")
def now(self):
return dt.datetime.utcnow()
@pytest.fixture(scope="class")
def digestmod(self):
return hashlib.sha256
@pytest.fixture(scope="class")
def ca0_key(self):
ca0_key = RSA()
ca0_key.generate()
return ca0_key
@pytest.fixture(scope="class")
def ca1_key(self):
ca1_key = RSA()
ca1_key.generate()
return ca1_key
@pytest.fixture(scope="class")
def ee0_key(self):
ee0_key = RSA()
ee0_key.generate()
return ee0_key
@pytest.fixture(scope="class")
def ca0_crt(self, ca0_key, digestmod, now):
ca0_csr = CSR.new(ca0_key, "CN=Trusted CA", digestmod())
return CRT.selfsign(
ca0_csr,
ca0_key,
not_before=now,
not_after=now + dt.timedelta(days=90),
serial_number=0x123456,
basic_constraints=BasicConstraints(True, -1),
)
@pytest.fixture(scope="class")
def ca1_crt(self, ca1_key, ca0_crt, ca0_key, digestmod, now):
ca1_csr = CSR.new(ca1_key, "CN=Intermediate CA", digestmod())
return ca0_crt.sign(
ca1_csr,
ca0_key,
now,
now + dt.timedelta(days=90),
0x234567,
basic_constraints=BasicConstraints(True, -1),
)
@pytest.fixture(scope="class")
def ee0_crt(self, ee0_key, ca1_crt, ca1_key, digestmod, now):
ee0_csr = CSR.new(ee0_key, "CN=End Entity", digestmod())
return ca1_crt.sign(
ee0_csr, ca1_key, now, now + dt.timedelta(days=90), 0x345678
)
class TestTrustStore(Chain):
@pytest.fixture
def store(self):
return TrustStore.system()
def test_eq(self, store):
other = TrustStore(store)
assert store is not other
assert store == other
def test_bool(self, store):
assert not TrustStore()
assert store
def test_len(self, store):
assert len(store) != 0
def test_iter(self, store):
assert store[0] != store[1]
for n, crt in enumerate(store, start=1):
assert crt in store
assert n == len(store)
def test_add_existing_certificate(self, store):
length = len(store)
store.add(store[0])
assert len(store) == length
def test_add_new_certificate(self, store, ca0_crt):
length = len(store)
store.add(ca0_crt)
assert len(store) == length + 1
class TestDTLSCookie:
@pytest.fixture
def cookie(self):
return DTLSCookie()
def test_generate_does_not_raise(self, cookie):
cookie.generate()
def test_timeout(self, cookie):
assert cookie.timeout == 60
cookie.timeout = 1000
assert cookie.timeout == 1000
class _TestBaseConfiguration(Chain):
@pytest.fixture
def conf(self):
raise NotImplementedError
@pytest.mark.parametrize("validate", [True, False])
def test_set_validate_certificates(self, conf, validate):
conf_ = conf.update(validate_certificates=validate)
assert conf_.validate_certificates is validate
@pytest.mark.parametrize("chain", [((), None), None])
def test_set_certificate_chain(
self, conf, chain, ee0_crt, ca1_crt, ee0_key
):
if chain is None:
chain = (ee0_crt, ca1_crt), ee0_key
conf_ = conf.update(certificate_chain=chain)
assert conf_.certificate_chain == chain
@pytest.mark.parametrize("ciphers", [ciphers_available()])
def test_set_ciphers(self, conf, ciphers):
conf_ = conf.update(ciphers=ciphers)
assert conf_.ciphers == ciphers
@pytest.mark.parametrize(
"inner_protocols",
[[], (), [NextProtocol.H2, NextProtocol.H2C], [b"h2", b"h2c", b"ftp"]],
)
def test_set_inner_protocols(self, conf, inner_protocols):
conf_ = conf.update(inner_protocols=inner_protocols)
assert conf_.inner_protocols == tuple(
NextProtocol(_) for _ in inner_protocols
)
@pytest.mark.parametrize("store", [TrustStore.system()])
def test_trust_store(self, conf, store):
conf_ = conf.update(trust_store=store)
assert store
assert conf_.trust_store == store
@pytest.mark.parametrize("callback", [None])
def test_set_sni_callback(self, conf, callback):
assert conf.sni_callback is None
class TestTLSConfiguration(_TestBaseConfiguration):
@pytest.fixture
def conf(self):
return TLSConfiguration()
@pytest.mark.parametrize("version", TLSVersion)
def test_lowest_supported_version(self, conf, version):
conf_ = conf.update(lowest_supported_version=version)
assert conf_.lowest_supported_version is version
@pytest.mark.parametrize("version", TLSVersion)
def test_highest_supported_version(self, conf, version):
conf_ = conf.update(highest_supported_version=version)
assert conf_.highest_supported_version is version
class TestDTLSConfiguration(_TestBaseConfiguration):
@pytest.fixture
def conf(self):
return DTLSConfiguration()
@pytest.mark.parametrize("version", DTLSVersion)
def test_lowest_supported_version(self, conf, version):
conf_ = conf.update(lowest_supported_version=version)
assert conf_.lowest_supported_version is version
@pytest.mark.parametrize("version", DTLSVersion)
def test_highest_supported_version(self, conf, version):
conf_ = conf.update(highest_supported_version=version)
assert conf_.highest_supported_version is version
@pytest.mark.parametrize("anti_replay", [True, False])
def test_set_anti_replay(self, conf, anti_replay):
assert conf.anti_replay is True
conf_ = conf.update(anti_replay=anti_replay)
assert conf_.anti_replay is anti_replay
class TestBaseContext:
@pytest.fixture(params=[Purpose.SERVER_AUTH, Purpose.CLIENT_AUTH])
def purpose(self, request):
return request.param
@pytest.fixture(params=[TLSConfiguration, DTLSConfiguration])
def conf(self, request):
return request.param()
@pytest.fixture(params=[ServerContext, ClientContext])
def context(self, conf, request):
cls = request.param
return cls(conf)
def test_get_configuration(self, context, conf):
assert conf
assert context.configuration is conf
def test_selected_npn_protocol(self, context):
assert context._selected_npn_protocol() is None
def test_cipher(self, context):
assert context._cipher() is None
def test_get_channel_binding(self, context):
assert context._get_channel_binding() is None
# def test_negotiated_tls_version(self, context):
# assert context._negotiated_tls_version() is TLSVersion.SSLv3
class TestClientContext(TestBaseContext):
@pytest.fixture(params=[None, "hostname", "localhost"])
def hostname(self, request):
return request.param
@pytest.fixture
def context(self, conf, hostname):
return ClientContext(conf)
def test_context(self, context):
assert isinstance(context, ClientContext)
def test_hostname(self, context, hostname):
_ = context.wrap_buffers(hostname)
assert context._hostname == hostname
def test_wrap_buffers(self, context):
assert isinstance(context.wrap_buffers(None), TLSWrappedBuffer)
class TestServerContext(TestBaseContext):
@pytest.fixture
def context(self, conf):
return ServerContext(conf)
def test_context(self, context):
assert isinstance(context, ServerContext)
def test_wrap_buffers(self, context):
assert isinstance(context.wrap_buffers(), TLSWrappedBuffer)
class _TestCommunicationBase(Chain):
CLOSE_MESSAGE = b"bye"
@pytest.fixture(scope="class")
def version(self):
raise NotImplementedError
@pytest.fixture(scope="class")
def srv_conf(self):
raise NotImplementedError
@pytest.fixture(scope="class")
def cli_conf(self):
raise NotImplementedError
@pytest.fixture
def step(self):
raise NotImplementedError
def echo(self, sock):
raise NotImplementedError
@pytest.fixture(params=[False])
def buffer(self, request, randbytes):
buffer = randbytes(5 * 16 * 1024)
yield buffer
if request.node.rep_call.failed and request.param:
with open(
"/tmp/dump.%s" % dt.datetime.utcnow().isoformat(), "wb"
) as dump:
dump.write(buffer)
@pytest.fixture
def address(self):
random.seed(hash(sys.version) ^ int(time.time() * 1000000))
return "127.0.0.1", random.randrange(60000, 65000)
@pytest.fixture(scope="class")
def trust_store(self, ca0_crt):
store = TrustStore()
store.add(ca0_crt)
return store
@pytest.fixture
def server(self, srv_conf, address, version):
ctx = ServerContext(srv_conf)
sock = ctx.wrap_socket(socket.socket(socket.AF_INET, self.proto))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(address)
if self.proto == socket.SOCK_STREAM:
sock.listen(1)
runner = mp.Process(target=self.echo, args=(sock,))
runner.start()
yield sock
runner.join(0.1)
with suppress(OSError):
sock.close()
runner.terminate()
@pytest.fixture
def client(self, server, cli_conf, address):
ctx = ClientContext(cli_conf)
sock = ctx.wrap_socket(
socket.socket(socket.AF_INET, self.proto),
server_hostname="End Entity",
)
sock.connect(address)
block(sock.do_handshake)
yield sock
with suppress(OSError):
block(sock.send, self.CLOSE_MESSAGE)
sock.close()
def test_srv_conf(self, srv_conf, ca1_crt, ee0_crt, ee0_key, trust_store):
assert srv_conf.trust_store == trust_store
assert srv_conf.certificate_chain[0] == (ee0_crt, ca1_crt)
assert srv_conf.certificate_chain[1] == ee0_key
assert srv_conf.certificate_chain == ((ee0_crt, ca1_crt), ee0_key)
def test_cli_conf(self, cli_conf, trust_store):
assert cli_conf.trust_store == trust_store
assert cli_conf.validate_certificates == True
class TestTLSCommunication(_TestCommunicationBase):
proto = socket.SOCK_STREAM
@pytest.fixture(
scope="class",
params=[TLSVersion.TLSv1, TLSVersion.TLSv1_1, TLSVersion.TLSv1_2],
)
def version(self, request):
return request.param
@pytest.fixture(scope="class")
def srv_conf(
self, version, ca0_crt, ca1_crt, ee0_crt, ee0_key, trust_store
):
return TLSConfiguration(
trust_store=trust_store,
certificate_chain=([ee0_crt, ca1_crt], ee0_key),
lowest_supported_version=TLSVersion.MINIMUM_SUPPORTED,
highest_supported_version=version,
validate_certificates=False,
)
@pytest.fixture(scope="class")
def cli_conf(self, version, trust_store):
return TLSConfiguration(
trust_store=trust_store,
lowest_supported_version=TLSVersion.MINIMUM_SUPPORTED,
highest_supported_version=version,
validate_certificates=True,
)
@pytest.fixture(params=[1, 10, 100, 1000, 10000, 16384 - 1])
def step(self, request):
return request.param
def echo(self, sock):
conn, addr = sock.accept()
block(conn.do_handshake)
while True:
data = block(conn.recv, 20 * 1024)
if data == self.CLOSE_MESSAGE:
break
amt = block(conn.send, data)
assert amt == len(data)
def test_server_hostname_fails_verification(
self, server, cli_conf, address
):
ctx = ClientContext(cli_conf)
sock = ctx.wrap_socket(
socket.socket(socket.AF_INET, self.proto),
server_hostname="Wrong End Entity",
)
sock.connect(address)
with pytest.raises(TLSError):
block(sock.do_handshake)
def test_client_server(self, client, buffer, step):
received = bytearray()
for idx in range(0, len(buffer), step):
view = memoryview(buffer[idx : idx + step])
amt = block(client.send, view)
assert amt == len(view)
assert block(client.recv, 20 * 1024) == view
class TestDTLSCommunication(_TestCommunicationBase):
proto = socket.SOCK_DGRAM
@pytest.fixture(scope="class", params=DTLSVersion)
def version(self, request):
return request.param
@pytest.fixture(scope="class")
def srv_conf(
self, version, ca0_crt, ca1_crt, ee0_crt, ee0_key, trust_store
):
return DTLSConfiguration(
trust_store=trust_store,
certificate_chain=([ee0_crt, ca1_crt], ee0_key),
lowest_supported_version=TLSVersion.MINIMUM_SUPPORTED,
highest_supported_version=version,
validate_certificates=False,
)
@pytest.fixture(scope="class")
def cli_conf(self, version, trust_store):
return DTLSConfiguration(
trust_store=trust_store,
lowest_supported_version=TLSVersion.MINIMUM_SUPPORTED,
highest_supported_version=version,
validate_certificates=True,
)
@pytest.fixture(params=[10, 100, 1000, 5000])
def step(self, request):
return request.param
def echo(self, sock):
cli, addr = sock.accept()
cli.setcookieparam(addr[0].encode("ascii"))
with pytest.raises(_tls.HelloVerifyRequest):
block(cli.do_handshake)
cli, addr = cli.accept()
cli.setcookieparam(addr[0].encode("ascii"))
block(cli.do_handshake)
while True:
data = block(cli.recv, 4096)
if data == self.CLOSE_MESSAGE:
break
# We must use `send()` instead of `sendto()` because the
# DTLS socket is connected.
amt = block(cli.send, data)
assert amt == len(data)
|
generic_websocket.py | """
Module used as a interfeace to describe a generick websocket client
"""
import asyncio
import concurrent.futures
import websockets
import socket
import json
import time
from threading import Thread, Lock
from pyee import EventEmitter
from ..utils.custom_logger import CustomLogger
# websocket exceptions
from websockets.exceptions import ConnectionClosed
class AuthError(Exception):
"""
Thrown whenever there is a problem with the authentication packet
"""
pass
def is_json(myjson):
try:
json_object = json.loads(myjson)
except ValueError as e:
return False
return True
class Socket():
def __init__(self, sId):
self.ws = None
self.isConnected = False
self.isAuthenticated = False
self.id = sId
self.lock = Lock()
def set_connected(self):
self.isConnected = True
def set_disconnected(self):
self.isConnected = False
def set_authenticated(self):
self.isAuthenticated = True
def set_unauthenticated(self):
self.isAuthenticated = False
def set_websocket(self, ws):
self.ws = ws
async def send(self, data):
with self.lock:
await self.ws.send(data)
def _start_event_worker():
return EventEmitter(scheduler=asyncio.ensure_future)
class GenericWebsocket:
"""
Websocket object used to contain the base functionality of a websocket.
Inlcudes an event emitter and a standard websocket client.
"""
logger = CustomLogger('BfxWebsocket', logLevel="DEBUG")
def __init__(self, host, logLevel='INFO', max_retries=5, create_event_emitter=None):
self.host = host
self.logger.set_level(logLevel)
# overide 'error' event to stop it raising an exception
# self.events.on('error', self.on_error)
self.ws = None
self.max_retries = max_retries
self.attempt_retry = True
self.sockets = {}
# start separate process for the even emitter
create_ee = create_event_emitter or _start_event_worker
self.events = create_ee()
def run(self):
"""
Starte the websocket connection. This functions spawns the initial socket
thread and connection.
"""
self._start_new_socket()
def get_task_executable(self):
"""
Get the run indefinitely asyncio task
"""
return self._run_socket()
def _start_new_socket(self, socketId=None):
if not socketId:
socketId = len(self.sockets)
def start_loop(loop):
asyncio.set_event_loop(loop)
loop.run_until_complete(self._run_socket())
worker_loop = asyncio.new_event_loop()
worker = Thread(target=start_loop, args=(worker_loop,))
worker.start()
return socketId
def _wait_for_socket(self, socket_id):
"""
Block until the given socket connection is open
"""
while True:
socket = self.sockets.get(socket_id, False)
if socket:
if socket.isConnected and socket.ws:
return
time.sleep(0.01)
def get_socket(self, socketId):
return self.sockets[socketId]
def get_authenticated_socket(self):
for socketId in self.sockets:
if self.sockets[socketId].isAuthenticated:
return self.sockets[socketId]
return None
async def _run_socket(self):
retries = 0
sId = len(self.sockets)
s = Socket(sId)
self.sockets[sId] = s
loop = asyncio.get_event_loop()
while retries < self.max_retries and self.attempt_retry:
try:
async with websockets.connect(self.host) as websocket:
self.sockets[sId].set_websocket(websocket)
self.sockets[sId].set_connected()
self.logger.info("Websocket connected to {}".format(self.host))
retries = 0
while True:
# optimization - wait 0 seconds to force the async queue
# to be cleared before continuing
await asyncio.sleep(0)
message = await websocket.recv()
await self.on_message(sId, message)
except (ConnectionClosed, socket.error) as e:
self.sockets[sId].set_disconnected()
if self.sockets[sId].isAuthenticated:
self.sockets[sId].set_unauthenticated()
self._emit('disconnected')
if (not self.attempt_retry):
return
self.logger.error(str(e))
retries += 1
# wait 5 seconds befor retrying
self.logger.info("Waiting 5 seconds before retrying...")
await asyncio.sleep(5)
self.logger.info("Reconnect attempt {}/{}".format(retries, self.max_retries))
self.logger.info("Unable to connect to websocket.")
self._emit('stopped')
async def stop(self):
"""
Stop all websocket connections
"""
self.attempt_retry = False
for key, socket in self.sockets.items():
await socket.ws.close()
self._emit('done')
def remove_all_listeners(self, event):
"""
Remove all listeners from event emitter
"""
self.events.remove_all_listeners(event)
def on(self, event, func=None):
"""
Add a new event to the event emitter
"""
if not func:
return self.events.on(event)
self.events.on(event, func)
def once(self, event, func=None):
"""
Add a new event to only fire once to the event
emitter
"""
if not func:
return self.events.once(event)
self.events.once(event, func)
def _emit(self, event, *args, **kwargs):
self.events.emit(event, *args, **kwargs)
async def on_error(self, error):
"""
On websocket error print and fire event
"""
self.logger.error(error)
async def on_close(self):
"""
This is used by the HF data server.
"""
self.stop()
async def on_open(self):
"""
On websocket open
"""
pass
async def on_message(self, message):
"""
On websocket message
"""
pass
|
agent_state_manager.py | #! /usr/bin/env python3
# _*_ coding:utf-8 _*_
import threading
import time
import logging
from app.server import STATE_UPDATE_INTERVAL, AgentState
from app.server import *
from app.lib import msg_bus, common_msg
logger = logging.getLogger("Server")
def singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@singleton
class AgentStateMonitor(object):
"""
代理状态监视类,为单例模式
"""
def __init__(self):
self.agent_state_dict = dict()
self.__running = threading.Event()
self.__state_monitor_thread = threading.Thread(target=self.__agents_state_monitor)
msg_bus.add_msg_listener(common_msg.MSG_HEARTBEAT, self.add_new_agent_state)
def add_new_agent_state(self, msg):
state_data = msg.data
agent_state = AgentState()
agent_state.gen_from_json_obj(state_data)
agent_state.timestamp = time.time()
# self.update_agent_state(agent_state)
self.agent_state_dict[agent_state.agent_identifier] = agent_state
def update_agent_state(self, agent_state):
self.agent_state_dict[agent_state.agent_identifier] = agent_state
agent_state.print_state()
def start_monitor(self):
self.__running.set()
if self.__state_monitor_thread.is_alive():
pass
else:
self.__state_monitor_thread.daemon = True
self.__state_monitor_thread.start()
def stop_monitor(self):
self.__running.clear()
self.agent_state_dict.clear()
def __agents_state_monitor(self):
while self.__running:
if len(self.agent_state_dict) > 0:
for agent_state in list(self.agent_state_dict.values()):
new_state = self.__check_state(agent_state)
if new_state == "Dead":
logger.info("Agent {0} is dead.\nAgent {1} is removed.".format(
agent_state.agent_identifier,
agent_state.agent_identifier))
agent_state.state = new_state
common_msg.msg_agent_state_update.data = agent_state.gen_json_object()
msg_bus.send_msg(common_msg.msg_agent_state_update)
self.agent_state_dict.pop(agent_state.agent_identifier)
else:
agent_state.state = new_state
self.agent_state_dict[agent_state.agent_identifier] = agent_state
common_msg.msg_agent_state_update.data = agent_state.gen_json_object()
msg_bus.send_msg(common_msg.msg_agent_state_update)
time.sleep(1)
time.sleep(STATE_UPDATE_INTERVAL)
def __check_state(self, agent_state):
"""
根据前后两次时标对比判断代理状态,时标间隔大于2倍更新时间小于3倍更新时间时为离线,更长时间为Dead
:param agent_state: 代理状态对象
:return: 代理状态:Offline、Dead、Online
"""
last_time = time.time() - agent_state.timestamp
if STATE_UPDATE_INTERVAL * 2.0 < last_time <= STATE_UPDATE_INTERVAL * 5.0:
return "Offline"
elif last_time > STATE_UPDATE_INTERVAL * 5.0:
return "Dead"
else:
return "Online"
if __name__ == '__main__':
monitor = AgentStateMonitor()
print(id(monitor))
monitor.start_monitor()
common_msg.msg_heartbeat.data = {
"Name":"tste",
"Address": ("127.0.0.1", 5555),
"Timestamp": time.time(),
"State": "Online"
}
msg_bus.send_msg(common_msg.msg_heartbeat)
time.sleep(20)
|
torrent_downloader.py | #!/usr/bin/python3
import bencode
import sys
import signal
import itertools
import argparse
import dns.resolver
from socket import *
import hashlib
import urllib
import random
import struct
import binascii
from collections import namedtuple
from collections import OrderedDict
import urllib.request
import traceback
import codecs
import urllib.parse
import string
import time
import threading
import numpy as np
import os
import sched
lock = threading.Lock()
def msg_get_id(data):
#0 choke
#1 unchoke
#2 intrested
#3 not intrested
#4 have
#5 bitfield
#6 request
#7 piece
#8 cancel
try:
msg_id = struct.unpack(">B", data[4:5])
return msg_id[0]
except:
return 0
def get_keep_alive():
return struct.pack(">L", 0)
def get_handshake_msg(info_hash, peer_id):
info_hash = hashlib.sha1(bencode.bencode(file_info)).hexdigest()
packet_hashes = ""
packet_hashes = bytearray(packet_hashes, 'utf-8') + binascii.unhexlify(info_hash)
return chr(19).encode() + bytes("BitTorrent protocol", 'utf-8') + (chr(0) * 8).encode() + packet_hashes + peer_id.encode()
def make_piece_request(pieces, piece_length, size):
'''
4 bytes length (13)
1 byte msg_id (6)
4 byte index
4 byte begin(offset)
4 byte length (16 KB)
'''
length = 13
msg_id = 6
lock.acquire()
try:
INDEX = piece_index.index(0)
piece_index[INDEX] = -1
except:
INDEX = piece_index.index(-1)
piece_index[INDEX] = 0
lock.release()
begin_offset = block_offset[INDEX]
x = piece_length - block_offset[INDEX]
if x >= 16384:
req_length = 16384
else:
req_length = x
req_packet = struct.pack(">IBIII", length, msg_id, INDEX, begin_offset, req_length)
return req_packet
def write_into_file(name, data, piece_length):
global downloaded
f = open(name, 'ab')
length, msg_id, index, offset = struct.unpack(">IBII", data[:13])
INDEX = index
f.seek(int((index * piece_length) + offset), 0)
bytes_written = f.write(data[13:])
f.close()
downloaded = downloaded + bytes_written
print(downloaded)
lock.acquire()
if block_offset[INDEX] + bytes_written <= piece_length:
block_offset[INDEX] = block_offset[INDEX] + bytes_written
if block_offset[INDEX] == piece_length:
piece_index[INDEX] = 1
if block_offset[INDEX] + bytes_written > piece_length:
extra = (block_offset[INDEX] + bytes_written) - piece_length
block_offset[INDEX] = piece_length
block_offset[INDEX + 1] = extra
lock.release()
f.close()
def make_intrested_request():
return struct.pack(">IB", 1, 2)
def make_choke():
return struct.pack(">IB", 1, 0)
def make_unchoke():
return struct.pack(">IB", 1, 1)
def send_piece(name, r_index, r_begin, r_length, piece_length):
f = open(name, 'rb')
f.seek(int((r_index * piece_length) + r_begin), 0)
bytes_data = f.read(r_length)
d = struct.pack(">IBII", len(bytes_data) + 9, 7, r_index, r_begin)
return d + bytes_data
def connect_to_peers(ip_port, file_info, peer_id, pieces, piece_length, size, name):
p_choked = True # peer is choked
p_intrested = False # peer is not intrested
m_chocked = True # i am chocked
m_intrested = False # i am not intrested
block = b''
clientSocket = socket(AF_INET, SOCK_STREAM)
u = ip_port.split(':')
ip = u[0]
port = int(u[1])
no_bytes = int(size / (piece_length * 8) + 6)
try:
clientSocket.settimeout(10)
clientSocket.connect((ip, port))
handshake_msg = get_handshake_msg(info_hash, peer_id)
clientSocket.send(handshake_msg)
time.sleep(1)
handshake_response = clientSocket.recv(68)
start = time.time()
try:
bitField = clientSocket.recv(no_bytes)
msg_type = msg_get_id(bitField)
if msg_type == 5:
x = np.fromstring(bitField, dtype=np.uint8)
m = np.unpackbits(x)
a = list(m) #piece list
bit_field = a[40:]
except:
print("Bitfield Not Sent By Peer")
pass
msg_type = 0 #choke
try:
first_msg = clientSocket.recv(5)
msg_type = msg_get_id(first_msg)
except:
pass
if msg_type != 1 :#CHOKE
#send Intrested
intrested_req = make_intrested_request()
clientSocket.send(intrested_req)
first_msg = clientSocket.recv(5)
msg_type = msg_get_id(first_msg)
if msg_type == 0: #still choke
return
if msg_type == 1 :#UNCHOKE
while True:
clientSocket.settimeout(10)
try:
m = clientSocket.recv(5)
if len(m) == 0:
n = -1 #no message from peer
else:
n = msg_get_id(m)
if n == 2:
p_intrested = True
unchoke = make_unchoke()
clientSocket.send(unchoke)
p_choked = False
elif n == 3:
p_intrested = False
choke = make_choke()
clientSocket.send(choke)
p_choked = True
elif n == 4:
p = clientSocket.recv(4)
have_piece_index = struct.unpack(">L", p)
#update in bitfiled
#request this piece
elif n == 6:
try:
print("piece request has arrived")
p = clientSocket.recv(12)
print(m)
print(p)
r_index, r_begin, r_length = struct.unpack(">LLL", p)
print(r_index, r_begin, r_length )
r_piece = send_piece(name, r_index, r_begin, r_length, piece_length)
clientSocket.send(r_piece)
print("piece SentS")
except:
print("error :", traceback.format_exc())
except:
piece_req = make_piece_request(pieces, piece_length, size)
clientSocket.send(piece_req)
time.sleep(3)
piece = clientSocket.recv(16397)
if (len(piece) == 16397):
if msg_get_id(piece) == 7:
write_into_file(name, piece, piece_length)
if downloaded == size:
print("File downloaded successfully")
return
else:
time.sleep(5)
clientSocket.recv(16397)
except timeout:
print("error :", traceback.format_exc())
clientSocket.close()
return
def get_peer_id():
letters = string.ascii_lowercase
return "MS001" +''.join(random.choice(letters) for i in range(15))
def udp_transaction_id():
return random.randint(1,65535)
def download_file():
pass
def update_peers(trak_list, file_info, size, peer_id):
print("trying for new peers")
time.sleep(120)
peers_ = []
for m in tracker_list:
for n in m:
k = connect_to_tracker(j, file_info, size, peer_id)
peers_.append(k)
peer_1 = [i for i in peers_ if i]
peeers_ = list(itertools.chain(*peer_1))
peers_ = list(set(peeers_))
return peers_
def connect_to_tracker(hostname, file_info, size, peer_id):
host = hostname
url_parts = urllib.parse.urlparse(hostname)
port = url_parts.port
print(port)
protocol = url_parts.scheme
url = url_parts.hostname
try:
address = getaddrinfo(url, port)
except:
return
ip_list = []
ip = address[0][4][0]
ip_list.append(ip)
info_hash = hashlib.sha1(bencode.bencode(file_info)).hexdigest()
if protocol == "https":
return
if protocol == "http":
for i in ip_list:
if len(i) > 15:
return
clientSocket = socket(AF_INET, SOCK_STREAM)
try:
x = codecs.decode(info_hash, 'hex')
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}
req_parameters = OrderedDict()
req_parameters['info_hash'] = x
req_parameters['peer_id'] = peer_id
req_parameters['port'] = 6881
req_parameters['uploaded'] = uploaded
req_parameters['downloaded'] = downloaded
req_parameters['left'] = left
req_parameters['numwant'] = 20
req_parameters['key'] = "5fdd3f95"
req_parameters['compact'] = 1
req_parameters['event'] = 'started'
request = urllib.parse.urlencode(req_parameters)
packet = hostname + '?' + request
req = urllib.request.Request(packet, headers = headers)
req1 = urllib.request.urlopen(req)
http_resp = req1.read()
print(http_resp)
try :
http_response = bencode.decode(http_resp)
except:
print("error :", traceback.format_exc())
clientSocket.close()
return
http_peers = http_response['peers']
peer_list = []
n_http_peers = (len(http_peers)) // 6
for x in range(n_http_peers):
m = struct.unpack(">BBBBH", http_peers[x*6:(6 + x*6)])
m = list(m)
r = str(m[0]) + '.' + str(m[1]) + '.' + str(m[2]) + '.' + str(m[3]) + ':' + str(m[4])
peer_list.append(r)
clientSocket.close()
return peer_list
except:
print("error :", traceback.format_exc())
clientSocket.close()
return
print("error :", traceback.format_exc())
clientSocket.close()
else:
for i in ip_list:
clientSocket = socket(AF_INET, SOCK_DGRAM)
s_port = clientSocket.getsockname()[1]
"""CONNECTION_ID REQUEST TO UDP TRACKER"""
try:
protocol_id = 0x41727101980
transaction_id = udp_transaction_id()
packet = struct.pack(">QLL", protocol_id, 0, transaction_id)
# 0 is action to connect
clientSocket.sendto(packet, (i, port))
try:
clientSocket.settimeout(1)
res = clientSocket.recv(16)
except timeout:
clientSocket.close()
return
action, transaction_id_rec, connection_id = struct.unpack(">LLQ", res)
"""ANNOUNCE R and REQUEST TO UDP TRACKERS"""
transaction_id = udp_transaction_id()
peer_id_ = peer_id.encode('utf-8')
packet_hashes = ""
packet_hashes = bytearray(packet_hashes, 'utf-8') + binascii.unhexlify(info_hash)
packet = struct.pack(">QLL20s20sQQQLLLlH", connection_id, 1, transaction_id, packet_hashes, peer_id_, 0, size, 0, 2, 0, 0, -1, s_port)
# 1 is action to announce
clientSocket.sendto(packet, (i, port)) #sending packet
try:
clientSocket.settimeout(2)
res1 = clientSocket.recvfrom(1024) #receiving Response
# unpacking Response from UDP Tracker
action, transaction_id_rec, interval, leechers, seeders = struct.unpack(">LLLLL", res1[0][:20])
l = []
peer_no = (len(res1[0]) - 20) / 6
n = int(peer_no)
for z in range(n):
t = struct.unpack(">BBBBH", res1[0][20+(z*6):26+(z*6)])
t = list(t)
ku = ".".join(map(str,t))
mux = ku[::-1].replace('.',':', 1)
l.append(mux[::-1])
except timeout:
pass
"""SCRAPE REQUEST TO UDP TRACKERS"""
#sending scrape request
packet = struct.pack(">QLL", connection_id, 2, transaction_id) + packet_hashes
clientSocket.sendto(packet, (i, port))
torrent_details = {}
try:
#receiving response for scrape request
clientSocket.settimeout(1)
res = clientSocket.recv(8 + 12* len(info_hash))
except timeout:
clientSocket.close()
return
action_scrape, transaction_id_rec, seeders, completed, leechers = struct.unpack(">LLLLL", res)
torrent_details[info_hash] = (seeders, leechers, completed)
#print("scrape Response :")
#print("seeders, leechers, completed", torrent_details[info_hash])
clientSocket.close()
return l
except:
pass
clientSocket.close()
"""
Execution starts from Below
"""
#extracting file details ->trackers, info, and other
#created by
#info has name, length, piece length, private, pieces
#working on info
'''def keep_listening():
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind(('',6881))
serverSocket.listen(100)
while True:
Connected, addr = serverSocket.accept()
p_list.append(connectionSocket)
x = crea
connectionSocket.close()
server.close()
'''
def make_files(files_details):
time.sleep(1)
if __name__ == '__main__':
file = open(sys.argv[1], "rb")
MetaData = bencode.bdecode(file.read())
file_info = MetaData["info"]
name = file_info["name"]
global pieces
pieces = file_info["pieces"]
pqr = pieces[0]
xyz = hashlib.sha1(bencode.bencode(pqr)).hexdigest()
mno = ""
mno = bytearray(mno, 'utf-8') + binascii.unhexlify(xyz)
if os.path.isfile(name):
os.remove(name)
try:
created_by = MetaData["created by"]
except:
pass
try:
date = MetaData["creation date"]
except:
pass
try:
tracker_list = MetaData["announce-list"]
announce = True
except:
tracker_list = MetaData["announce"]
announce = False
try:
comments = MetaData["comment"]
print("comment: " + comments)
except:
pass
try:
encoding = d["encoding"]
print("encoding :" +str(encoding))
except:
pass
try:
private = file_info["private"]
except:
pass
piece_length = file_info["piece length"]
files_details = []
if "files" in file_info.keys():
#multiple file
single_file = False
files_details = []
mul_file = file_info["files"]
for i in range(len(mul_file)):
file_path = mul_file[i]["path"]
file_length = mul_file[i]['length']
files_details.append([file_length, file_path])
else:
#single file
single_file = True
size = file_info["length"]
files_details.append([size,[name]])
file = open(name, "w")
file.close()
#print(files_details)
#make_files(files_details, name)
file = open(name, "w")
file.close()
print("Name: " + name)
print("Piece Length: " + str(piece_length))
print("tracker_list:")
print(tracker_list)
peer_id = get_peer_id()
try:
print("created_by : " +created_by)
except:
pass
try:
print("comment: " + comments)
except:
pass
try:
print("encoding :" +str(encoding))
except:
pass
try:
print("private_file: " + private)
except:
pass
size = 0
for i in range(len(files_details)):
print(files_details[i])
for k in range(len(files_details)):
x = files_details[k][0]
size = x + size
print("Size :" + str(size))
no_of_pieces = int(size/piece_length)
if no_of_pieces == 0:
no_of_pieces = 1
global piece_index
piece_index = [0 for i in range(no_of_pieces)]
global block_offset
block_offset = [0 for i in range(no_of_pieces)]
global INDEX
global downloaded
downloaded = 0
global left
left = size
global uploaded
uploaded = 0
info_hash = hashlib.sha1(bencode.bencode(file_info)).hexdigest()
print(info_hash)
packet_hashes = ""
packet_hashes = bytearray(packet_hashes, 'utf-8') + binascii.unhexlify(info_hash)
peer_list = []
if announce:
for i in tracker_list:
for j in i:
p = connect_to_tracker(j, file_info, size, peer_id)
#print(p)
peer_list.append(p)
else :
p = connect_to_tracker(tracker_list[0], file_info, size, peer_id)
peer_list.append(p)
peer_list1 = [i for i in peer_list if i]
peeers = list(itertools.chain(*peer_list1))
peers = list(set(peeers))
print(len(peers))
print(peers)
threads = []
for i in peers:
peer_connection = threading.Thread(target = connect_to_peers, args = (i, file_info, peer_id, pieces, piece_length, size, name))
threads.append(peer_connection)
peer_connection.start()
|
atrace_agent.py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import py_utils
import re
import sys
import threading
import zlib
from devil.android import device_utils
from devil.android.sdk import version_codes
from py_trace_event import trace_time as trace_time_module
from systrace import trace_result
from systrace import tracing_agents
from systrace import util
# Text that ADB sends, but does not need to be displayed to the user.
ADB_IGNORE_REGEXP = r'^capturing trace\.\.\. done|^capturing trace\.\.\.'
# The number of seconds to wait on output from ADB.
ADB_STDOUT_READ_TIMEOUT = 0.2
# The adb shell command to initiate a trace.
ATRACE_BASE_ARGS = ['atrace']
# If a custom list of categories is not specified, traces will include
# these categories (if available on the presentation.device).
DEFAULT_CATEGORIES = 'sched,freq,gfx,view,dalvik,webview,'\
'input,disk,am,wm,rs,binder_driver'
# The command to list trace categories.
LIST_CATEGORIES_ARGS = ATRACE_BASE_ARGS + ['--list_categories']
# Minimum number of seconds between displaying status updates.
MIN_TIME_BETWEEN_STATUS_UPDATES = 0.2
# ADB sends this text to indicate the beginning of the trace data.
TRACE_START_REGEXP = r'TRACE\:'
# Plain-text trace data should always start with this string.
TRACE_TEXT_HEADER = '# tracer'
_FIX_THREAD_IDS = True
_FIX_MISSING_TGIDS = True
_FIX_CIRCULAR_TRACES = True
def list_categories(config):
"""List the possible trace event categories.
This function needs the tracing config since it needs to get the serial
number of the presentation.device to send a command to.
Args:
config: Tracing config.
"""
devutils = device_utils.DeviceUtils(config.device_serial_number)
categories = devutils.RunShellCommand(
LIST_CATEGORIES_ARGS, check_return=True)
device_sdk_version = util.get_device_sdk_version()
if device_sdk_version < version_codes.MARSHMALLOW:
# work around platform bug where rs tag would corrupt trace until M(Api23)
categories = [c for c in categories if not re.match(r'^\s*rs\s*-', c)]
print '\n'.join(categories)
if not devutils.HasRoot():
print '\nNOTE: more categories may be available with adb root\n'
def get_available_categories(config, device_sdk_version):
"""Gets the list of atrace categories available for tracing.
Args:
config: Tracing config.
device_sdk_version: Sdk version int of presentation.device to be queried.
"""
devutils = device_utils.DeviceUtils(config.device_serial_number)
categories_output = devutils.RunShellCommand(
LIST_CATEGORIES_ARGS, check_return=True)
categories = [c.split('-')[0].strip() for c in categories_output]
if device_sdk_version < version_codes.MARSHMALLOW:
# work around platform bug where rs tag would corrupt trace until M(Api23)
categories = [c for c in categories if c != 'rs']
return categories
def try_create_agent(config):
"""Create an Atrace agent.
Args:
config: Command line config.
"""
if config.target != 'android':
return None
if config.from_file is not None:
return None
if not config.atrace_categories:
return None
# Check presentation.device SDK version.
device_sdk_version = util.get_device_sdk_version()
if device_sdk_version < version_codes.JELLY_BEAN_MR2:
print ('Device SDK versions < 18 (Jellybean MR2) not supported.\n'
'Your presentation.device SDK version is %d.' % device_sdk_version)
return None
return AtraceAgent(device_sdk_version)
def _construct_extra_atrace_args(config, categories):
"""Construct extra arguments (-a, -k, categories) for atrace command.
Args:
config: Tracing config.
"""
extra_args = []
if config.app_name is not None:
extra_args.extend(['-a', config.app_name])
if config.kfuncs is not None:
extra_args.extend(['-k', config.kfuncs])
extra_args.extend(categories)
return extra_args
def _construct_atrace_args(config, categories):
"""Builds the command used to invoke a trace process.
Returns:
A tuple where the first element is an array of command arguments, and
the second element is a boolean which will be true if the command will
stream trace data.
"""
atrace_args = ATRACE_BASE_ARGS[:]
if config.compress_trace_data:
atrace_args.extend(['-z'])
if (config.trace_time is not None) and (config.trace_time > 0):
atrace_args.extend(['-t', str(config.trace_time)])
if (config.trace_buf_size is not None) and (config.trace_buf_size > 0):
atrace_args.extend(['-b', str(config.trace_buf_size)])
elif 'sched' in categories:
# 'sched' is a high-volume tag, double the default buffer size
# to accommodate that
atrace_args.extend(['-b', '4096'])
extra_args = _construct_extra_atrace_args(config, categories)
atrace_args.extend(extra_args)
return atrace_args
class AtraceAgent(tracing_agents.TracingAgent):
def __init__(self, device_sdk_version):
super(AtraceAgent, self).__init__()
self._device_sdk_version = device_sdk_version
self._adb = None
self._trace_data = None
self._tracer_args = None
self._collection_thread = None
self._device_utils = None
self._device_serial_number = None
self._config = None
self._categories = None
def __repr__(self):
return 'atrace'
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, config, timeout=None):
assert config.atrace_categories, 'Atrace categories are missing!'
self._config = config
self._categories = config.atrace_categories
if isinstance(self._categories, list):
self._categories = ','.join(self._categories)
avail_cats = get_available_categories(config, self._device_sdk_version)
unavailable = [x for x in self._categories.split(',') if
x not in avail_cats]
self._categories = [x for x in self._categories.split(',') if
x in avail_cats]
if unavailable:
print 'These categories are unavailable: ' + ' '.join(unavailable)
self._device_utils = device_utils.DeviceUtils(config.device_serial_number)
self._device_serial_number = config.device_serial_number
self._tracer_args = _construct_atrace_args(config,
self._categories)
self._device_utils.RunShellCommand(
self._tracer_args + ['--async_start'], check_return=True)
return True
def _collect_and_preprocess(self):
"""Collects and preprocesses trace data.
Stores results in self._trace_data.
"""
trace_data = self._collect_trace_data()
self._trace_data = self._preprocess_trace_data(trace_data)
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StopAgentTracing(self, timeout=None):
"""Stops tracing and starts collecting results.
To synchronously retrieve the results after calling this function,
call GetResults().
"""
self._collection_thread = threading.Thread(
target=self._collect_and_preprocess)
self._collection_thread.start()
return True
@py_utils.Timeout(tracing_agents.GET_RESULTS_TIMEOUT)
def GetResults(self, timeout=None):
"""Waits for collection thread to finish and returns trace results."""
self._collection_thread.join()
self._collection_thread = None
return trace_result.TraceResult('systemTraceEvents', self._trace_data)
def SupportsExplicitClockSync(self):
return True
def RecordClockSyncMarker(self, sync_id, did_record_sync_marker_callback):
"""Records a clock sync marker.
Args:
sync_id: ID string for clock sync marker.
"""
cmd = 'echo trace_event_clock_sync: name=%s >' \
' /sys/kernel/debug/tracing/trace_marker' % sync_id
with self._device_utils.adb.PersistentShell(
self._device_serial_number) as shell:
t1 = trace_time_module.Now()
shell.RunCommand(cmd, close=True)
did_record_sync_marker_callback(t1, sync_id)
def _stop_trace(self):
"""Stops atrace.
Note that prior to Api 23, --async-stop may not actually stop tracing.
Thus, this uses a fallback method of running a zero-length synchronous
trace if tracing is still on."""
self._device_utils.RunShellCommand(
self._tracer_args + ['--async_stop'], check_return=True)
is_trace_enabled_file = '/sys/kernel/debug/tracing/tracing_on'
if self._device_sdk_version < version_codes.MARSHMALLOW:
if int(self._device_utils.ReadFile(is_trace_enabled_file)):
# tracing was incorrectly left on, disable it
self._device_utils.RunShellCommand(
self._tracer_args + ['-t 0'], check_return=True)
def _collect_trace_data(self):
"""Reads the output from atrace and stops the trace."""
dump_cmd = self._tracer_args + ['--async_dump']
result = self._device_utils.RunShellCommand(
dump_cmd, raw_output=True, check_return=True)
data_start = re.search(TRACE_START_REGEXP, result)
if data_start:
data_start = data_start.end(0)
else:
raise IOError('Unable to get atrace data. Did you forget adb root?')
output = re.sub(ADB_IGNORE_REGEXP, '', result[data_start:])
self._stop_trace()
return output
def _preprocess_trace_data(self, trace_data):
"""Performs various processing on atrace data.
Args:
trace_data: The raw trace data.
Returns:
The processed trace data.
"""
if trace_data:
trace_data = strip_and_decompress_trace(trace_data)
if not trace_data:
print >> sys.stderr, ('No data was captured. Output file was not '
'written.')
sys.exit(1)
if _FIX_THREAD_IDS:
# Issue ps command to presentation.device and patch thread names
# TODO(catapult:#3215): Migrate to presentation.device.GetPids()
ps_dump = self._device_utils.RunShellCommand(
'ps -T -o USER,TID,PPID,VSIZE,RSS,WCHAN,ADDR=PC,S,CMD || ps -t',
shell=True, check_return=True)
thread_names = extract_thread_list(ps_dump)
trace_data = fix_thread_names(trace_data, thread_names)
if _FIX_MISSING_TGIDS:
# Issue printf command to presentation.device and patch tgids
procfs_dump = self._device_utils.RunShellCommand(
'printf "%s\n" /proc/[0-9]*/task/[0-9]*',
shell=True, check_return=True)
pid2_tgid = extract_tgids(procfs_dump)
trace_data = fix_missing_tgids(trace_data, pid2_tgid)
if _FIX_CIRCULAR_TRACES:
trace_data = fix_circular_traces(trace_data)
return trace_data
def extract_thread_list(trace_lines):
"""Removes the thread list from the given trace data.
Args:
trace_lines: The text portion of the trace
Returns:
a map of thread ids to thread names
"""
threads = {}
# Assume any line that starts with USER is the header
header = -1
for i, line in enumerate(trace_lines):
cols = line.split()
if len(cols) >= 8 and cols[0] == 'USER':
header = i
break
if header == -1:
return threads
for line in trace_lines[header + 1:]:
cols = line.split(None, 8)
if len(cols) == 9:
tid = int(cols[1])
name = cols[8]
threads[tid] = name
return threads
def extract_tgids(trace_lines):
"""Removes the procfs dump from the given trace text
Args:
trace_lines: The text portion of the trace
Returns:
a map of pids to their tgid.
"""
tgid_2pid = {}
for line in trace_lines:
result = re.match('^/proc/([0-9]+)/task/([0-9]+)', line)
if result:
parent_pid, tgid = result.group(1, 2)
tgid_2pid[tgid] = parent_pid
return tgid_2pid
def strip_and_decompress_trace(trace_data):
"""Fixes new-lines and decompresses trace data.
Args:
trace_data: The trace data returned by atrace.
Returns:
The decompressed trace data.
"""
# Collapse CRLFs that are added by adb shell.
if trace_data.startswith('\r\n'):
trace_data = trace_data.replace('\r\n', '\n')
elif trace_data.startswith('\r\r\n'):
# On windows, adb adds an extra '\r' character for each line.
trace_data = trace_data.replace('\r\r\n', '\n')
# Skip the initial newline.
if trace_data[0] == '\n':
trace_data = trace_data[1:]
if not trace_data.startswith(TRACE_TEXT_HEADER):
# No header found, so assume the data is compressed.
trace_data = zlib.decompress(trace_data)
# Enforce Unix line-endings.
trace_data = trace_data.replace('\r', '')
# Skip any initial newlines.
while trace_data and trace_data[0] == '\n':
trace_data = trace_data[1:]
return trace_data
def fix_thread_names(trace_data, thread_names):
"""Replaces thread ids with their names.
Args:
trace_data: The atrace data.
thread_names: A mapping of thread ids to thread names.
Returns:
The updated trace data.
"""
def repl(m):
tid = int(m.group(2))
if tid > 0:
name = thread_names.get(tid)
if name is None:
name = m.group(1)
if name == '<...>':
name = '<' + str(tid) + '>'
thread_names[tid] = name
return name + '-' + m.group(2)
else:
return m.group(0)
# matches something like:
# Binder_2-895, or com.google.android.inputmethod.latin-1078 etc...
trace_data = re.sub(r'^\s*(\S+)-(\d+)', repl, trace_data,
flags=re.MULTILINE)
return trace_data
def fix_missing_tgids(trace_data, pid2_tgid):
"""Replaces missing TGIDs from the trace data with those found in procfs
Args:
trace_data: the atrace data
Returns:
The updated trace data with missing TGIDs replaced with the correct TGID
"""
def repl(m):
tid = m.group(2)
if (int(tid) > 0 and m.group(1) != '<idle>' and m.group(3) == '(-----)'
and tid in pid2_tgid):
# returns Proc_name-PID (TGID)
# Binder_2-381 (-----) becomes Binder_2-381 (128)
return m.group(1) + '-' + m.group(2) + ' ( ' + pid2_tgid[tid] + ')'
return m.group(0)
# matches something like:
# Binder_2-895 (-----)
trace_data = re.sub(r'^\s*(\S+)-(\d+)\s+(\(\S+\))', repl, trace_data,
flags=re.MULTILINE)
return trace_data
def fix_circular_traces(out):
"""Fix inconsistentcies in traces due to circular buffering.
The circular buffers are kept per CPU, so it is not guaranteed that the
beginning of a slice is overwritten before the end. To work around this, we
throw away the prefix of the trace where not all CPUs have events yet.
Args:
out: The data to fix.
Returns:
The updated trace data.
"""
# If any of the CPU's buffers have filled up and
# older events have been dropped, the kernel
# emits markers of the form '##### CPU 2 buffer started ####' on
# the line before the first event in the trace on that CPU.
#
# No such headers are emitted if there were no overflows or the trace
# was captured with non-circular buffers.
buffer_start_re = re.compile(r'^#+ CPU \d+ buffer started', re.MULTILINE)
start_of_full_trace = 0
while True:
result = buffer_start_re.search(out, start_of_full_trace + 1)
if result:
start_of_full_trace = result.start()
else:
break
if start_of_full_trace > 0:
# Need to keep the header intact to make the importer happy.
end_of_header = re.search(r'^[^#]', out, re.MULTILINE).start()
out = out[:end_of_header] + out[start_of_full_trace:]
return out
class AtraceConfig(tracing_agents.TracingConfig):
def __init__(self, atrace_categories, trace_buf_size, kfuncs,
app_name, compress_trace_data, from_file,
device_serial_number, trace_time, target):
tracing_agents.TracingConfig.__init__(self)
self.atrace_categories = atrace_categories
self.trace_buf_size = trace_buf_size
self.kfuncs = kfuncs
self.app_name = app_name
self.compress_trace_data = compress_trace_data
self.from_file = from_file
self.device_serial_number = device_serial_number
self.trace_time = trace_time
self.target = target
def add_options(parser):
options = optparse.OptionGroup(parser, 'Atrace options')
options.add_option('--atrace-categories', dest='atrace_categories',
help='Select atrace categories with a comma-delimited '
'list, e.g. --atrace-categories=cat1,cat2,cat3')
options.add_option('-k', '--ktrace', dest='kfuncs', action='store',
help='specify a comma-separated list of kernel functions '
'to trace')
options.add_option('--no-compress', dest='compress_trace_data',
default=True, action='store_false',
help='Tell the presentation.device not to send the trace data in '
'compressed form.')
options.add_option('-a', '--app', dest='app_name', default=None,
type='string', action='store',
help='enable application-level tracing for '
'comma-separated list of app cmdlines')
options.add_option('--from-file', dest='from_file',
action='store', help='read the trace from a '
'file (compressed) rather than running a '
'live trace')
return options
def get_config(options):
return AtraceConfig(options.atrace_categories,
options.trace_buf_size, options.kfuncs,
options.app_name, options.compress_trace_data,
options.from_file, options.device_serial_number,
options.trace_time, options.target)
|
scraper.py | # -*- coding: utf-8 -*-
"""
小米论坛爬虫
@author Jia Tan
"""
import io
import requests
import re
import threading
from datetime import datetime
sub_list = [('266', u'相册')]
reply_type = [u'已收录', u'已答复', u'请补充', u'待讨论', u'确认解决']
# regex to remove whitespaces`
white_re = re.compile(r'\s+', re.U)
# regex to get page count
page_cnt_re = re.compile(r'class=\"last\">... ([0-9]+)', re.U)
# regex to get thread content
thread_re = re.compile(r'<divclass=\"avatarbox-info\">'
r'<divclass=\"sub-tit\">(.+?)</div>'
r'<divclass="sub-infos">(.+?)</div></div>', re.U)
# regex to get name of subsection
sub_name_re = re.compile(r'<em>.+?>(.+?)</a>\]</em>', re.U)
# regex to get thread title
thread_title_re = re.compile(r'class=\"sxst\">(.+?)</a>', re.U)
# regex to get view count
view_re = re.compile(r'<spanclass=\"number_d\">(\d+)</span>', re.U)
# regex to get reply count
reply_re = re.compile(r'<spanclass=\"number_d\"><ahref=.+?(\d+)</a></span>', re.U)
# regex to get url for thread page
thread_page_re = re.compile(r'</em><ahref=\"(.+?)\"onclick', re.U)
# regex to get post time for thread
date_time_re = re.compile(ur'发表于 <span title=\"(\d+-\d+-\d+ \d+:\d+:\d+)\">|发表于 (\d+-\d+-\d+ \d+:\d+:\d+)</em>', re.U)
# global thread content queue
thread_queue = []
# global lock for thread queue
queue_lock = threading.Lock()
def get_content(url):
res = requests.get(url)
while res.status_code != 200:
if res.status_code == 404:
print '404 not found: ' + url
return ''
res = requests.get(url)
return res.text
def grep(s, *args):
ret = []
for r in args:
ret.append(re.findall(r, s)[0])
return ret
def get_reply(s):
for r in reply_type:
if s.find(r) >= 0:
return r
return u'无回复'
def produce():
for sub_str, sub_name in sub_list:
first_page = get_content('http://www.miui.com/type-38-' + sub_str + '.html')
if not first_page:
continue
page_cnt = int(re.search(page_cnt_re, first_page).group(1))
print '%d pages for %s' % (page_cnt, sub_name)
print 'page,'
for page_num in range(1, page_cnt + 1):
print '%d ' % page_num,
page_url = 'http://www.miui.com/forum.php?mod=forumdisplay&fid=38&typeid=' \
+ sub_str + '&filter=typeid&page=' + str(page_num)
page = get_content(page_url)
with queue_lock:
thread_queue.append(page)
print ''
with queue_lock:
thread_queue.append('')
def consume(fout):
while True:
page = None
queue_lock.acquire()
if len(thread_queue) > 0:
page = thread_queue.pop(0)
queue_lock.release()
if page is None:
continue
if not page:
break
page = re.sub(white_re, '', page)
threads = re.findall(thread_re, page)
for thread in threads:
sub_name, thread_title, thread_url = grep(thread[0], sub_name_re, thread_title_re, thread_page_re)
if_attach = u'是' if thread[0].find(u'附件') >= 0 else u'否'
if_extra_points = u'是' if thread[0].find(u'加分') >= 0 else u'否'
reply = get_reply(thread[0])
thread_content = get_content('http://www.miui.com/' + thread_url)
try:
date_time_match = re.search(date_time_re, thread_content).groups()
except AttributeError as e:
with io.open('error_page.html', 'w', encoding='utf-8') as ferr:
ferr.write(thread_content)
print '\nhttp://www.miui.com/' + thread_url
date_time = date_time_match[0] if date_time_match[0] else date_time_match[1]
view_num, reply_num = grep(thread[1], view_re, reply_re)
fout.write(','.join([date_time, sub_name, view_num, reply_num, reply,
if_attach, if_extra_points, thread_title]) + '\n')
fout.flush()
fout.close()
if __name__ == "__main__":
fout_name = 'miui_' + '_'.join(map(lambda x: x[-1], sub_list)) + '_' + str(datetime.now().date()) + '.csv'
fout = io.open(fout_name, 'w', encoding='utf-8')
fout.write(u'发布日期,分类,浏览数,回复数,小米回复类型,是否有附件,是否被加分,标题\n')
producer = threading.Thread(target=produce)
consumer = threading.Thread(target=consume, args=(fout,))
producer.start()
consumer.start()
|
interactive.py | # Copyright (C) 2011 Jeff Forcier <jeff@bitprophet.org>
#
# This file is part of ssh.
#
# 'ssh' is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# 'ssh' is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with 'ssh'; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA.
import socket
import sys
# windows does not have termios...
try:
import termios
import tty
has_termios = True
except ImportError:
has_termios = False
def interactive_shell(chan):
if has_termios:
posix_shell(chan)
else:
windows_shell(chan)
def posix_shell(chan):
import select
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
while True:
r, w, e = select.select([chan, sys.stdin], [], [])
if chan in r:
try:
x = chan.recv(1024)
if len(x) == 0:
print '\r\n*** EOF\r\n',
break
sys.stdout.write(x)
sys.stdout.flush()
except socket.timeout:
pass
if sys.stdin in r:
x = sys.stdin.read(1)
if len(x) == 0:
break
chan.send(x)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
# thanks to Mike Looijmans for this code
def windows_shell(chan):
import threading
sys.stdout.write("Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n")
def writeall(sock):
while True:
data = sock.recv(256)
if not data:
sys.stdout.write('\r\n*** EOF ***\r\n\r\n')
sys.stdout.flush()
break
sys.stdout.write(data)
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(chan,))
writer.start()
try:
while True:
d = sys.stdin.read(1)
if not d:
break
chan.send(d)
except EOFError:
# user hit ^Z or F6
pass
|
sum.py | # Ultradesu 2017
# Because of GIL which doesn't allow to implement simultaneous computing
# i had to use forks instead on threads for full utilization system resources.
# This version much slower rather C.
import multiprocessing
import time
# calculating summ between start and end.
def sumumatoru_kun(start, end, number, return_dict):
t = time.process_time()
res = 0
ss = start
ee = end
while start <= end:
khui = start
while khui > 0:
res = res + khui % 10
khui = int(khui / 10)
start += 1
return_dict[number] = res
elapsed_time = time.process_time() - t
print(" %s\t|%10d\t|%10d\t|%10d\t|\t%6.2f" % (number, ss, ee, res, elapsed_time))
return res
def sum():
start = 0
end = 1000000000
thread_count = multiprocessing.cpu_count() * 2
shift = int((end - start) / thread_count)
result = 0
print('Found %s CPUs. Going to spawn %s forks.' % (int(thread_count/2), thread_count))
t = time.process_time()
print("proc #\t|\tfrom\t|\ttill\t|\tsumm\t| elapsed time per child")
print("-"*80)
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
for i in range(thread_count):
t_start = start + shift * i
if i == (thread_count - 1):
t_end = end
else:
t_end = t_start + shift - 1
p = multiprocessing.Process(target=sumumatoru_kun, args=(t_start,t_end, i, return_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
for value in return_dict.values():
result = result + value
print("\n\tFinal result is %d" % result)
if __name__ == '__main__':
sum()
|
test_partition_20.py | import threading
import pytest
from base.partition_wrapper import ApiPartitionWrapper
from base.client_base import TestcaseBase
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from common.code_mapping import PartitionErrorMessage
prefix = "partition_"
class TestPartitionParams(TestcaseBase):
""" Test case of partition interface in parameters"""
@pytest.mark.tags(CaseLabel.L0)
def test_partition_default(self):
"""
target: verify create a partition
method: create a partition
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str("desc_")
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
# check that the partition has been created
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [""])
def test_partition_empty_name(self, partition_name):
"""
target: verify create a partition with empty name
method: create a partition with empty name
expected: raise exception
"""
# create a collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Partition name should not be empty"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_empty_description(self):
"""
target: verify create a partition with empty description
method: create a partition with empty description
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# init partition
partition_name = cf.gen_unique_str(prefix)
description = ""
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
# check that the partition has been created
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_partition_max_description_length(self):
"""
target: verify create a partition with 255 length name and 1024 length description
method: create a partition with 255 length name and 1024 length description
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# init partition
partition_name = cf.gen_str_by_length(255)
description = cf.gen_str_by_length(2048)
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True}
)
@pytest.mark.tags(CaseLabel.L1)
def test_partition_dup_name(self):
"""
target: verify create partitions with duplicate names
method: create partitions with duplicate names
expected: 1. create successfully
2. the same partition returned with diff object ids
"""
# create a collection
collection_w = self.init_collection_wrap()
# create two partitions
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str()
partition_w1 = self.init_partition_wrap(collection_w, partition_name, description)
partition_w2 = self.init_partition_wrap(collection_w, partition_name, description)
# public check func to be extracted
assert id(partition_w1.partition) != id(partition_w2.partition)
assert partition_w1.name == partition_w2.name
assert partition_w1.description == partition_w2.description
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("description", ct.get_invalid_strs)
def test_partition_special_chars_description(self, description):
"""
target: verify create a partition with special characters in description
method: create a partition with special characters in description
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L0)
def test_partition_default_name(self):
"""
target: verify create a partition with default name
method: 1. get the _default partition
2. create a partition with _default name
expected: the same partition returned
"""
# create collection
collection_w = self.init_collection_wrap()
# check that the default partition exists
assert collection_w.has_partition(ct.default_partition_name)[0]
# check that can get the _default partition
collection, _ = collection_w.partition(ct.default_partition_name)
# check that init the _default partition object
partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name)
assert collection.name == partition_w.name
@pytest.mark.tags(CaseLabel.L1)
def test_partition_max_length_name(self):
"""
target: verify create a partition with max length(256) name
method: create a partition with max length name
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_str_by_length(256)
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", ct.get_invalid_strs)
def test_partition_invalid_name(self, partition_name):
"""
target: verify create a partition with invalid name
method: create a partition with invalid names
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
)
# TODO: need an error code issue #5144 and assert independently
@pytest.mark.tags(CaseLabel.L1)
def test_partition_none_collection(self):
"""
target: verify create a partition with none collection
method: create a partition with none collection
expected: raise exception
"""
# create partition with collection is None
partition_name = cf.gen_unique_str(prefix)
self.partition_wrap.init_partition(collection=None, name=partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "must be pymilvus.Collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_drop(self):
"""
target: verify drop a partition in one collection
method: 1. create a partition in one collection
2. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
# check that the partition exists
assert collection_w.has_partition(partition_name)[0]
# drop partition
partition_w.drop()
# check that the partition not exists
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release(self):
"""
target: verify release partition
method: 1. create a collection and two partitions
2. insert data into each partition
3. flush and load the both partitions
4. release partition1
5. release partition1 twice
expected: 1. the 1st partition is released
2. the 2nd partition is not released
"""
# create collection
collection_w = self.init_collection_wrap()
# create two partitions
partition_w1 = self.init_partition_wrap(collection_w)
partition_w2 = self.init_partition_wrap(collection_w)
# insert data to two partition
partition_w1.insert(cf.gen_default_list_data())
partition_w2.insert(cf.gen_default_list_data())
# load two partitions
partition_w1.load()
partition_w2.load()
# search two partitions
search_vectors = cf.gen_vectors(1, ct.default_dim)
res1, _ = partition_w1.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
res2, _ = partition_w2.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res1) == 1 and len(res2) == 1
# release the first partition
partition_w1.release()
# check result
res1, _ = partition_w1.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1,
check_task=ct.CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "partitions have been released"})
res2, _ = partition_w2.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res2) == 1
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("data", [cf.gen_default_dataframe_data(10),
cf.gen_default_list_data(10),
cf.gen_default_tuple_data(10)])
def test_partition_insert(self, data):
"""
target: verify insert entities multiple times
method: 1. create a collection and a partition
2. partition.insert(data)
3. insert data again
expected: insert data successfully
"""
nums = 10
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name,
"is_empty": True, "num_entities": 0}
)
# insert data
partition_w.insert(data)
# self._connect().flush([collection_w.name]) # don't need flush for issue #5737
assert not partition_w.is_empty
assert partition_w.num_entities == nums
# insert data
partition_w.insert(data)
# self._connect().flush([collection_w.name])
assert not partition_w.is_empty
assert partition_w.num_entities == (nums + nums)
class TestPartitionOperations(TestcaseBase):
""" Test case of partition interface in operations """
@pytest.mark.tags(CaseLabel.L1)
def test_partition_dropped_collection(self):
"""
target: verify create partition against a dropped collection
method: 1. create collection1
2. drop collection1
3. create partition in collection1
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# drop collection
collection_w.drop()
# create partition failed
self.partition_wrap.init_partition(collection_w.collection, cf.gen_unique_str(prefix),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_same_name_in_diff_collections(self):
"""
target: verify create partitions with same name in diff collections
method: 1. create a partition in collection1
2. create a partition in collection2
expected: create successfully
"""
# create two collections
collection_w1 = self.init_collection_wrap()
collection_w2 = self.init_collection_wrap()
# create 2 partitions in 2 diff collections
partition_name = cf.gen_unique_str(prefix)
self.init_partition_wrap(collection_wrap=collection_w1, name=partition_name)
self.init_partition_wrap(collection_wrap=collection_w2, name=partition_name)
# check result
assert collection_w1.has_partition(partition_name)[0]
assert collection_w2.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_partition_multi_partitions_in_collection(self):
"""
target: verify create multiple partitions in one collection
method: create multiple partitions in one collection
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
for _ in range(10):
partition_name = cf.gen_unique_str(prefix)
# create partition with different names and check the partition exists
self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="skip temporarily for debug")
def test_partition_maximum_partitions(self):
"""
target: verify create maximum partitions
method: 1. create maximum partitions
2. create one more partition
expected: raise exception
"""
threads_num = 8
threads = []
def create_partition(collection, threads_n):
for _ in range(ct.max_partition_num // threads_n):
name = cf.gen_unique_str(prefix)
par_wrap = ApiPartitionWrapper()
par_wrap.init_partition(collection, name, check_task=CheckTasks.check_nothing)
collection_w = self.init_collection_wrap()
for _ in range(threads_num):
t = threading.Thread(target=create_partition, args=(collection_w.collection, threads_num))
threads.append(t)
t.start()
for t in threads:
t.join()
p_name = cf.gen_unique_str()
self.partition_wrap.init_partition(
collection_w.collection, p_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "maximum partition's number should be limit to 4096"})
@pytest.mark.tags(CaseLabel.L0)
def test_partition_drop_default_partition(self):
"""
target: verify drop the _default partition
method: drop the _default partition
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# get the default partition
default_partition, _ = collection_w.partition(ct.default_partition_name)
partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name)
assert default_partition.name == partition_w.name
# verify that drop partition with error
partition_w.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "default partition cannot be deleted"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_drop_partition_twice(self):
"""
target: verify drop the same partition twice
method: 1.create a partition with default schema
2. drop the partition
3. drop the same partition again
expected: raise exception when 2nd time
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
collection_w.has_partition(partition_name)
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
# verify that drop the partition again with exception
partition_w.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_create_and_drop_multi_times(self):
"""
target: verify create and drop for times
method: 1.create a partition with default schema
2. drop the partition
3. loop #1 and #2 for times
expected: create and drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# range for 5 times
partition_name = cf.gen_unique_str(prefix)
for i in range(5):
# create partition and check that the partition exists
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop partition and check that the partition not exists
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_partition_drop_non_empty_partition(self):
"""
target: verify drop a partition which has data inserted
method: 1.create a partition with default schema
2. insert some data
3. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data())
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.parametrize("flush", [True, False])
@pytest.mark.parametrize("data", [cf.gen_default_list_data(nb=3000)])
@pytest.mark.parametrize("index_param", cf.gen_simple_index())
def test_partition_drop_indexed_partition(self, data, index_param):
"""
target: verify drop an indexed partition
method: 1. create a partition
2. insert same data
3. create an index
4. flush or not flush (remove flush step for issue # 5837)
5. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
ins_res, _ = partition_w.insert(data)
assert len(ins_res.primary_keys) == len(data[0])
# create index of collection
collection_w.create_index(ct.default_float_vec_field_name, index_param)
# # flush
# if flush:
# self._connect().flush([collection_w.name])
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_empty_partition(self):
"""
target: verify release an empty partition
method: 1. create a partition
2. release the partition
expected: release successfully
"""
# create partition
partition_w = self.init_partition_wrap()
assert partition_w.is_empty
# release partition
partition_w.release()
# TODO: assert no more memory consumed
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_dropped_partition(self):
"""
target: verify release a dropped partition
method: 1. create a partition
2. drop the partition
3. release the partition
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
# drop partition
partition_w.drop()
# release the dropped partition and check err response
partition_w.release(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_dropped_collection(self):
"""
target: verify release a dropped collection
method: 1. create a collection and partition
2. drop the collection
3. release the partition
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop collection
collection_w.drop()
# release the partition and check err response
partition_w.release(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_after_collection_released(self):
"""
target: verify release a partition after the collection released
method: 1. create a collection and partition
2. insert some data
3. release the collection
4. release the partition
expected: partition released successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
data = cf.gen_default_list_data()
partition_w.insert(data)
assert partition_w.num_entities == len(data[0])
assert collection_w.num_entities == len(data[0])
# load partition
partition_w.load()
# search of partition
search_vectors = cf.gen_vectors(1, ct.default_dim)
res_1, _ = partition_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res_1) == 1
# release collection
collection_w.release()
# search of partition
res_2, _ = partition_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1,
check_task=ct.CheckTasks.err_res,
check_items={ct.err_code: 0,
ct.err_msg: "not loaded into memory"})
# release partition
partition_w.release()
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_default_partition(self):
"""
target: verify insert data into _default partition
method: 1. create a collection
2. insert some data into _default partition
expected: insert successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# get the default partition
partition_name = ct.default_partition_name
assert collection_w.has_partition(partition_name)[0]
partition_w = self.init_partition_wrap(collection_w, partition_name)
# insert data to partition
data = cf.gen_default_dataframe_data()
partition_w.insert(data)
# self._connect().flush([collection_w.name])
assert partition_w.num_entities == len(data)
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_dropped_partition(self):
"""
target: verify insert data into a dropped partition
method: 1. create a collection
2. insert some data into a dropped partition
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
# drop partition
partition_w.drop()
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data(),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Partition not exist"})
# TODO: update the assert error
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_dropped_collection(self):
"""
target: verify insert data into a dropped collection
method: 1. create a collection
2. insert some data into a dropped collection
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop collection
collection_w.drop()
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data(),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "None Type"})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_insert_maximum_size_data(self):
"""
target: verify insert maximum size data(256M?) a time
method: 1. create a partition
2. insert maximum size data
expected: insert successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w)
# insert data to partition
max_size = 100000 # TODO: clarify the max size of data
ins_res, _ = partition_w.insert(cf.gen_default_dataframe_data(max_size), timeout=40)
assert len(ins_res.primary_keys) == max_size
# self._connect().flush([collection_w.name])
assert partition_w.num_entities == max_size
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dim", [ct.default_dim - 1, ct.default_dim + 1])
def test_partition_insert_mismatched_dimensions(self, dim):
"""
target: verify insert maximum size data(256M?) a time
method: 1. create a collection with default dim
2. insert dismatch dim data
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
data = cf.gen_default_list_data(nb=10, dim=dim)
# insert data to partition
partition_w.insert(data, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "but entities field dim"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("sync", [True, False])
def test_partition_insert_sync(self, sync):
"""
target: verify insert sync
method: 1. create a partition
2. insert data in sync
expected: insert successfully
"""
pass
|
PyV8.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import sys, os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import json
except ImportError:
import simplejson as json
import _PyV8
__author__ = 'Flier Lu <flier.lu@gmail.com>'
__version__ = '1.0'
__all__ = ["JSError", "JSArray", "JSClass", "JSEngine", "JSContext", \
"JSStackTrace", "JSStackFrame", \
"JSExtension", "JSLocker", "JSUnlocker", "debugger", "profiler"]
class JSError(Exception):
def __init__(self, impl):
Exception.__init__(self)
self._impl = impl
def __str__(self):
return str(self._impl)
def __unicode__(self):
return unicode(self._impl)
def __getattribute__(self, attr):
impl = super(JSError, self).__getattribute__("_impl")
try:
return getattr(impl, attr)
except AttributeError:
return super(JSError, self).__getattribute__(attr)
_PyV8._JSError._jsclass = JSError
JSArray = _PyV8.JSArray
JSExtension = _PyV8.JSExtension
class JSLocker(_PyV8.JSLocker):
def __enter__(self):
self.enter()
if JSContext.entered:
self.leave()
raise RuntimeError("Lock should be acquired before enter the context")
return self
def __exit__(self, exc_type, exc_value, traceback):
if JSContext.entered:
self.leave()
raise RuntimeError("Lock should be released after leave the context")
self.leave()
def __nonzero__(self):
return self.entered()
class JSUnlocker(_PyV8.JSUnlocker):
def __enter__(self):
self.enter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.leave()
def __nonzero__(self):
return self.entered()
class JSClass(object):
def __getattr__(self, name):
if name == 'constructor':
return JSClassConstructor(self.__class__)
raise AttributeError(name)
def toString(self):
"Returns a string representation of an object."
return "[object %s]" % self.__class__.__name__
def toLocaleString(self):
"Returns a value as a string value appropriate to the host environment's current locale."
return self.toString()
def valueOf(self):
"Returns the primitive value of the specified object."
return self
def hasOwnProperty(self, name):
"Returns a Boolean value indicating whether an object has a property with the specified name."
return hasattr(self, name)
def isPrototypeOf(self, obj):
"Returns a Boolean value indicating whether an object exists in the prototype chain of another object."
raise NotImplementedError()
def __defineGetter__(self, name, getter):
"Binds an object's property to a function to be called when that property is looked up."
if hasattr(type(self), name):
setter = getattr(type(self), name).fset
else:
setter = None
setattr(type(self), name, property(fget=getter, fset=setter))
def __lookupGetter__(self, name):
"Return the function bound as a getter to the specified property."
return self.name.fget
def __defineSetter__(self, name, setter):
"Binds an object's property to a function to be called when an attempt is made to set that property."
if hasattr(type(self), name):
getter = getattr(type(self), name).fget
else:
getter = None
setattr(type(self), name, property(fget=getter, fset=setter))
def __lookupSetter__(self, name):
"Return the function bound as a setter to the specified property."
return self.name.fset
class JSClassConstructor(JSClass):
def __init__(self, cls):
self.cls = cls
@property
def name(self):
return self.cls.__name__
def toString(self):
return "function %s() {\n [native code]\n}" % self.name
def __call__(self, *args, **kwds):
return self.cls(*args, **kwds)
class JSDebug(object):
class FrameData(object):
def __init__(self, frame, count, name, value):
self.frame = frame
self.count = count
self.name = name
self.value = value
def __len__(self):
return self.count(self.frame)
def __iter__(self):
for i in xrange(self.count(self.frame)):
yield (self.name(self.frame, i), self.value(self.frame, i))
class Frame(object):
def __init__(self, frame):
self.frame = frame
@property
def index(self):
return int(self.frame.index())
@property
def function(self):
return self.frame.func()
@property
def receiver(self):
return self.frame.receiver()
@property
def isConstructCall(self):
return bool(self.frame.isConstructCall())
@property
def isDebuggerFrame(self):
return bool(self.frame.isDebuggerFrame())
@property
def argumentCount(self):
return int(self.frame.argumentCount())
def argumentName(self, idx):
return str(self.frame.argumentName(idx))
def argumentValue(self, idx):
return self.frame.argumentValue(idx)
@property
def arguments(self):
return FrameData(self, self.argumentCount, self.argumentName, self.argumentValue)
@property
def localCount(self, idx):
return int(self.frame.localCount())
def localName(self, idx):
return str(self.frame.localName(idx))
def localValue(self, idx):
return self.frame.localValue(idx)
@property
def locals(self):
return FrameData(self, self.localCount, self.localName, self.localValue)
@property
def sourcePosition(self):
return self.frame.sourcePosition()
@property
def sourceLine(self):
return int(self.frame.sourceLine())
@property
def sourceColumn(self):
return int(self.frame.sourceColumn())
@property
def sourceLineText(self):
return str(self.frame.sourceLineText())
def evaluate(self, source, disable_break = True):
return self.frame.evaluate(source, disable_break)
@property
def invocationText(self):
return str(self.frame.invocationText())
@property
def sourceAndPositionText(self):
return str(self.frame.sourceAndPositionText())
@property
def localsText(self):
return str(self.frame.localsText())
def __str__(self):
return str(self.frame.toText())
class Frames(object):
def __init__(self, state):
self.state = state
def __len__(self):
return self.state.frameCount
def __iter__(self):
for i in xrange(self.state.frameCount):
yield self.state.frame(i)
class State(object):
def __init__(self, state):
self.state = state
@property
def frameCount(self):
return int(self.state.frameCount())
def frame(self, idx = None):
return JSDebug.Frame(self.state.frame(idx))
@property
def selectedFrame(self):
return int(self.state.selectedFrame())
@property
def frames(self):
return JSDebug.Frames(self)
def __repr__(self):
s = StringIO()
try:
for frame in self.frames:
s.write(str(frame))
return s.getvalue()
finally:
s.close()
class DebugEvent(object):
pass
class StateEvent(DebugEvent):
__state = None
@property
def state(self):
if not self.__state:
self.__state = JSDebug.State(self.event.executionState())
return self.__state
class BreakEvent(StateEvent):
type = _PyV8.JSDebugEvent.Break
def __init__(self, event):
self.event = event
class ExceptionEvent(StateEvent):
type = _PyV8.JSDebugEvent.Exception
def __init__(self, event):
self.event = event
class NewFunctionEvent(DebugEvent):
type = _PyV8.JSDebugEvent.NewFunction
def __init__(self, event):
self.event = event
class Script(object):
def __init__(self, script):
self.script = script
@property
def source(self):
return self.script.source()
@property
def id(self):
return self.script.id()
@property
def name(self):
return self.script.name()
@property
def lineOffset(self):
return self.script.lineOffset()
@property
def lineCount(self):
return self.script.lineCount()
@property
def columnOffset(self):
return self.script.columnOffset()
@property
def type(self):
return self.script.type()
def __repr__(self):
return "<%s script %s @ %d:%d> : '%s'" % (self.type, self.name,
self.lineOffset, self.columnOffset,
self.source)
class CompileEvent(StateEvent):
def __init__(self, event):
self.event = event
@property
def script(self):
if not hasattr(self, "_script"):
setattr(self, "_script", JSDebug.Script(self.event.script()))
return self._script
def __str__(self):
return str(self.script)
class BeforeCompileEvent(CompileEvent):
type = _PyV8.JSDebugEvent.BeforeCompile
def __init__(self, event):
JSDebug.CompileEvent.__init__(self, event)
def __repr__(self):
return "before compile script: %s\n%s" % (repr(self.script), repr(self.state))
class AfterCompileEvent(CompileEvent):
type = _PyV8.JSDebugEvent.AfterCompile
def __init__(self, event):
JSDebug.CompileEvent.__init__(self, event)
def __repr__(self):
return "after compile script: %s\n%s" % (repr(self.script), repr(self.state))
onMessage = None
onBreak = None
onException = None
onNewFunction = None
onBeforeCompile = None
onAfterCompile = None
def __init__(self):
self.seq = 0
def nextSeq(self):
seq = self.seq
self.seq += 1
return seq
def isEnabled(self):
return _PyV8.debug().enabled
def setEnabled(self, enable):
dbg = _PyV8.debug()
if enable:
dbg.onDebugEvent = self.onDebugEvent
dbg.onDebugMessage = self.onDebugMessage
dbg.onDispatchDebugMessages = self.onDispatchDebugMessages
else:
dbg.onDebugEvent = None
dbg.onDebugMessage = None
dbg.onDispatchDebugMessages = None
dbg.enabled = enable
enabled = property(isEnabled, setEnabled)
def onDebugMessage(self, msg):
if self.onMessage:
self.onMessage(json.loads(msg))
def onDebugEvent(self, type, evt):
if type == _PyV8.JSDebugEvent.Break:
if self.onBreak: self.onBreak(JSDebug.BreakEvent(evt))
elif type == _PyV8.JSDebugEvent.Exception:
if self.onException: self.onException(JSDebug.ExceptionEvent(evt))
elif type == _PyV8.JSDebugEvent.NewFunction:
if self.onNewFunction: self.onNewFunction(JSDebug.NewFunctionEvent(evt))
elif type == _PyV8.JSDebugEvent.BeforeCompile:
if self.onBeforeCompile: self.onBeforeCompile(JSDebug.BeforeCompileEvent(evt))
elif type == _PyV8.JSDebugEvent.AfterCompile:
if self.onAfterCompile: self.onAfterCompile(JSDebug.AfterCompileEvent(evt))
def onDispatchDebugMessages(self):
return True
def breakForDebug(self):
_PyV8.debug().debugBreak()
def breakForCommand(self):
_PyV8.debug().debugBreakForCommand()
def sendCommand(self, cmd, *args, **kwds):
request = json.dumps({
'seq': self.nextSeq(),
'type': 'request',
'command': cmd,
'arguments': kwds
})
_PyV8.debug().sendCommand(request)
return request
def debugContinue(self, action='next', steps=1):
return self.sendCommand('continue', stepaction=action)
def stepNext(self, steps=1):
"""Step to the next statement in the current function."""
return self.debugContinue(action='next', steps=steps)
def stepIn(self, steps=1):
"""Step into new functions invoked or the next statement in the current function."""
return self.debugContinue(action='in', steps=steps)
def stepOut(self, steps=1):
"""Step out of the current function."""
return self.debugContinue(action='out', steps=steps)
def stepMin(self, steps=1):
"""Perform a minimum step in the current function."""
return self.debugContinue(action='out', steps=steps)
debugger = JSDebug()
class JSProfiler(_PyV8.JSProfiler):
Modules = _PyV8.JSProfilerModules
@property
def logs(self):
pos = 0
while True:
size, buf = self.getLogLines(pos)
if size == 0:
break
for line in buf.split('\n'):
yield line
pos += size
profiler = JSProfiler()
class JSEngine(_PyV8.JSEngine):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
del self
JSStackTrace = _PyV8.JSStackTrace
JSStackTrace.Options = _PyV8.JSStackTraceOptions
JSStackFrame = _PyV8.JSStackFrame
class JSContext(_PyV8.JSContext):
def __init__(self, obj=None, extensions=[]):
if JSLocker.actived:
self.lock = JSLocker()
self.lock.enter()
_PyV8.JSContext.__init__(self, obj, extensions)
def __enter__(self):
self.enter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.leave()
if hasattr(JSLocker, 'lock'):
self.lock.leave()
self.lock = None
del self
# contribute by marc boeker <http://code.google.com/u/marc.boeker/>
def convert(obj):
if type(obj) == _PyV8.JSArray:
return [convert(v) for v in obj]
if type(obj) == _PyV8.JSObject:
return dict([[str(k), convert(obj.__getattr__(str(k)))] for k in obj.__members__])
return obj
if hasattr(_PyV8, 'AstScope'):
class AST:
Scope = _PyV8.AstScope
Var = _PyV8.AstVariable
Node = _PyV8.AstNode
Statement = _PyV8.AstStatement
Expression = _PyV8.AstExpression
Expression.Context = _PyV8.AstExpressionContext
Breakable = _PyV8.AstBreakableStatement
Block = _PyV8.AstBlock
Declaration = _PyV8.AstDeclaration
Iteration = _PyV8.AstIterationStatement
DoWhile = _PyV8.AstDoWhileStatement
While = _PyV8.AstWhileStatement
For = _PyV8.AstForStatement
ForIn = _PyV8.AstForInStatement
ExpressionStatement = _PyV8.AstExpressionStatement
Continue = _PyV8.AstContinueStatement
Break = _PyV8.AstBreakStatement
Return = _PyV8.AstReturnStatement
WithEnter = _PyV8.AstWithEnterStatement
WithExit = _PyV8.AstWithExitStatement
Case = _PyV8.AstCaseClause
Switch = _PyV8.AstSwitchStatement
Try = _PyV8.AstTryStatement
TryCatch = _PyV8.AstTryCatchStatement
TryFinally = _PyV8.AstTryFinallyStatement
Debugger = _PyV8.AstDebuggerStatement
Empty = _PyV8.AstEmptyStatement
Literal = _PyV8.AstLiteral
MaterializedLiteral = _PyV8.AstMaterializedLiteral
Object = _PyV8.AstObjectLiteral
RegExp = _PyV8.AstRegExpLiteral
Array = _PyV8.AstArrayLiteral
CatchExtension = _PyV8.AstCatchExtensionObject
VarProxy = _PyV8.AstVariableProxy
Slot = _PyV8.AstSlot
Property = _PyV8.AstProperty
Call = _PyV8.AstCall
CallNew = _PyV8.AstCallNew
CallRuntime = _PyV8.AstCallRuntime
Op = _PyV8.AstOperation
UnaryOp = _PyV8.AstUnaryOperation
BinOp = _PyV8.AstBinaryOperation
CountOp = _PyV8.AstCountOperation
CompOp = _PyV8.AstCompareOperation
Conditional = _PyV8.AstConditional
Assignment = _PyV8.AstAssignment
Throw = _PyV8.AstThrow
Function = _PyV8.AstFunctionLiteral
SharedFunction = _PyV8.AstSharedFunctionInfoLiteral
This = _PyV8.AstThisFunction
__all__ += ['AST']
class PrettyPrint():
def __init__(self):
self.out = StringIO()
def onFunction(func):
print >>self.out, "function ", func.name, "(",
for i in range(func.scope.num_parameters):
if i > 0: print ", ",
print >>self.out, func.scope.parameter(i).name
print >>self.out, ")"
print >>self.out, "{"
print >>self.out, "}"
def __str__(self):
return self.out.getvalue()
import datetime
import unittest
import logging
import traceback
class TestContext(unittest.TestCase):
def testMultiNamespace(self):
self.assert_(not bool(JSContext.inContext))
self.assert_(not bool(JSContext.entered))
class Global(object):
name = "global"
g = Global()
with JSContext(g) as ctxt:
self.assert_(bool(JSContext.inContext))
self.assertEquals(g.name, str(JSContext.entered.locals.name))
self.assertEquals(g.name, str(JSContext.current.locals.name))
class Local(object):
name = "local"
l = Local()
with JSContext(l):
self.assert_(bool(JSContext.inContext))
self.assertEquals(l.name, str(JSContext.entered.locals.name))
self.assertEquals(l.name, str(JSContext.current.locals.name))
self.assert_(bool(JSContext.inContext))
self.assertEquals(g.name, str(JSContext.entered.locals.name))
self.assertEquals(g.name, str(JSContext.current.locals.name))
self.assert_(not bool(JSContext.entered))
self.assert_(not bool(JSContext.inContext))
def _testMultiContext(self):
# Create an environment
with JSContext() as ctxt0:
ctxt0.securityToken = "password"
global0 = ctxt0.locals
global0.custom = 1234
self.assertEquals(1234, int(global0.custom))
# Create an independent environment
with JSContext() as ctxt1:
ctxt1.securityToken = ctxt0.securityToken
global1 = ctxt1.locals
global1.custom = 1234
self.assertEquals(1234, int(global0.custom))
self.assertEquals(1234, int(global1.custom))
# Now create a new context with the old global
with JSContext(global1) as ctxt2:
ctxt2.securityToken = ctxt1.securityToken
self.assertRaises(AttributeError, int, global1.custom)
self.assertRaises(AttributeError, int, global2.custom)
def _testSecurityChecks(self):
with JSContext() as env1:
env1.securityToken = "foo"
# Create a function in env1.
env1.eval("spy=function(){return spy;}")
spy = env1.locals.spy
self.assert_(isinstance(spy, _PyV8.JSFunction))
# Create another function accessing global objects.
env1.eval("spy2=function(){return 123;}")
spy2 = env1.locals.spy2
self.assert_(isinstance(spy2, _PyV8.JSFunction))
# Switch to env2 in the same domain and invoke spy on env2.
env2 = JSContext()
env2.securityToken = "foo"
with env2:
result = spy.apply(env2.locals)
self.assert_(isinstance(result, _PyV8.JSFunction))
env2.securityToken = "bar"
# Call cross_domain_call, it should throw an exception
with env2:
self.assertRaises(JSError, spy2.apply, env2.locals)
def _testCrossDomainDelete(self):
with JSContext() as env1:
env2 = JSContext()
# Set to the same domain.
env1.securityToken = "foo"
env2.securityToken = "foo"
env1.locals.prop = 3
env2.locals.env1 = env1.locals
# Change env2 to a different domain and delete env1.prop.
#env2.securityToken = "bar"
self.assertEquals(3, int(env1.eval("prop")))
print env1.eval("env1")
with env2:
self.assertEquals(3, int(env2.eval("this.env1.prop")))
self.assertEquals("false", str(e.eval("delete env1.prop")))
# Check that env1.prop still exists.
self.assertEquals(3, int(env1.locals.prop))
class TestWrapper(unittest.TestCase):
def testObject(self):
with JSContext() as ctxt:
o = ctxt.eval("new Object()")
self.assert_(hash(o) > 0)
o1 = o.clone()
self.assertEquals(hash(o1), hash(o))
self.assert_(o != o1)
def testAutoConverter(self):
with JSContext() as ctxt:
ctxt.eval("""
var_i = 1;
var_f = 1.0;
var_s = "test";
var_b = true;
""")
vars = ctxt.locals
var_i = vars.var_i
self.assert_(var_i)
self.assertEquals(1, int(var_i))
var_f = vars.var_f
self.assert_(var_f)
self.assertEquals(1.0, float(vars.var_f))
var_s = vars.var_s
self.assert_(var_s)
self.assertEquals("test", str(vars.var_s))
var_b = vars.var_b
self.assert_(var_b)
self.assert_(bool(var_b))
attrs = dir(ctxt.locals)
self.assert_(attrs)
self.assert_("var_i" in attrs)
self.assert_("var_f" in attrs)
self.assert_("var_s" in attrs)
self.assert_("var_b" in attrs)
def testExactConverter(self):
class MyInteger(int, JSClass):
pass
class MyString(str, JSClass):
pass
class MyUnicode(unicode, JSClass):
pass
class MyDateTime(datetime.time, JSClass):
pass
class Global(JSClass):
var_bool = True
var_int = 1
var_float = 1.0
var_str = 'str'
var_unicode = u'unicode'
var_datetime = datetime.datetime.now()
var_date = datetime.date.today()
var_time = datetime.time()
var_myint = MyInteger()
var_mystr = MyString('mystr')
var_myunicode = MyUnicode('myunicode')
var_mytime = MyDateTime()
with JSContext(Global()) as ctxt:
typename = ctxt.eval("(function (name) { return this[name].constructor.name; })")
typeof = ctxt.eval("(function (name) { return typeof(this[name]); })")
self.assertEquals('Boolean', typename('var_bool'))
self.assertEquals('Number', typename('var_int'))
self.assertEquals('Number', typename('var_float'))
self.assertEquals('String', typename('var_str'))
self.assertEquals('String', typename('var_unicode'))
self.assertEquals('Date', typename('var_datetime'))
self.assertEquals('Date', typename('var_date'))
self.assertEquals('Date', typename('var_time'))
self.assertEquals('MyInteger', typename('var_myint'))
self.assertEquals('MyString', typename('var_mystr'))
self.assertEquals('MyUnicode', typename('var_myunicode'))
self.assertEquals('MyDateTime', typename('var_mytime'))
self.assertEquals('object', typeof('var_myint'))
self.assertEquals('object', typeof('var_mystr'))
self.assertEquals('object', typeof('var_myunicode'))
self.assertEquals('object', typeof('var_mytime'))
def testFunction(self):
with JSContext() as ctxt:
func = ctxt.eval("""
(function ()
{
function a()
{
return "abc";
}
return a();
})
""")
self.assertEquals("abc", str(func()))
self.assert_(func != None)
self.assertFalse(func == None)
func = ctxt.eval("(function test() {})")
self.assertEquals("test", func.name)
#TODO fix me, why the setter doesn't work?
func.name = "hello"
#self.assertEquals("hello", func.name)
def testCall(self):
class Hello(object):
def __call__(self, name):
return "hello " + name
class Global(JSClass):
hello = Hello()
with JSContext(Global()) as ctxt:
self.assertEquals("hello flier", ctxt.eval("hello('flier')"))
def testJSError(self):
with JSContext() as ctxt:
try:
ctxt.eval('throw "test"')
self.fail()
except:
self.assert_(JSError, sys.exc_type)
def testErrorInfo(self):
with JSContext() as ctxt:
with JSEngine() as engine:
try:
engine.compile("""
function hello()
{
throw Error("hello world");
}
hello();""", "test", 10, 10).run()
self.fail()
except JSError, e:
self.assert_(str(e).startswith('JSError: Error: hello world ( test @ 14 : 34 ) ->'))
self.assertEqual("Error", e.name)
self.assertEqual("hello world", e.message)
self.assertEqual("test", e.scriptName)
self.assertEqual(14, e.lineNum)
self.assertEqual(102, e.startPos)
self.assertEqual(103, e.endPos)
self.assertEqual(34, e.startCol)
self.assertEqual(35, e.endCol)
self.assertEqual('throw Error("hello world");', e.sourceLine.strip())
self.assertEqual('Error: hello world\n' +
' at Error (unknown source)\n' +
' at hello (test:14:35)\n' +
' at test:17:25', e.stackTrace)
def testStackTrace(self):
class Global(JSClass):
def GetCurrentStackTrace(self, limit):
return JSStackTrace.GetCurrentStackTrace(4, JSStackTrace.Options.Detailed)
with JSContext(Global()) as ctxt:
st = ctxt.eval("""
function a()
{
return GetCurrentStackTrace(10);
}
function b()
{
return eval("a()");
}
function c()
{
return new b();
}
c();""", "test")
self.assertEquals(4, len(st))
self.assertEquals("\tat a (test:4:28)\n\tat (eval)\n\tat b (test:8:28)\n\tat c (test:12:28)\n", str(st))
self.assertEquals("test.a (4:28)\n. (1:1) eval\ntest.b (8:28) constructor\ntest.c (12:28)",
"\n".join(["%s.%s (%d:%d)%s%s" % (
f.scriptName, f.funcName, f.lineNum, f.column,
' eval' if f.isEval else '',
' constructor' if f.isConstructor else '') for f in st]))
def testPythonException(self):
class Global(JSClass):
def raiseException(self):
raise RuntimeError("Hello")
with JSContext(Global()) as ctxt:
r = ctxt.eval("""
msg ="";
try
{
this.raiseException()
}
catch(e)
{
msg += "catch " + e + ";";
}
finally
{
msg += "finally";
}""")
self.assertEqual("catch Error: Hello;finally", str(ctxt.locals.msg))
def testExceptionMapping(self):
class Global(JSClass):
def raiseIndexError(self):
return [1, 2, 3][5]
def raiseAttributeError(self):
None.hello()
def raiseSyntaxError(self):
eval("???")
def raiseTypeError(self):
int(sys)
def raiseNotImplementedError(self):
raise NotImplementedError("Not support")
with JSContext(Global()) as ctxt:
ctxt.eval("try { this.raiseIndexError(); } catch (e) { msg = e; }")
self.assertEqual("RangeError: list index out of range", str(ctxt.locals.msg))
ctxt.eval("try { this.raiseAttributeError(); } catch (e) { msg = e; }")
self.assertEqual("ReferenceError: 'NoneType' object has no attribute 'hello'", str(ctxt.locals.msg))
ctxt.eval("try { this.raiseSyntaxError(); } catch (e) { msg = e; }")
self.assertEqual("SyntaxError: invalid syntax", str(ctxt.locals.msg))
ctxt.eval("try { this.raiseTypeError(); } catch (e) { msg = e; }")
self.assertEqual("TypeError: int() argument must be a string or a number, not 'module'", str(ctxt.locals.msg))
ctxt.eval("try { this.raiseNotImplementedError(); } catch (e) { msg = e; }")
self.assertEqual("Error: Not support", str(ctxt.locals.msg))
def testArray(self):
with JSContext() as ctxt:
array = ctxt.eval("""
var array = new Array();
for (i=0; i<10; i++)
{
array[i] = 10-i;
}
array;
""")
self.assert_(isinstance(array, _PyV8.JSArray))
self.assertEqual(10, len(array))
self.assert_(5 in array)
self.assertFalse(15 in array)
l = list(array)
self.assertEqual(10, len(l))
for i in xrange(10):
self.assertEqual(10-i, array[i])
self.assertEqual(10-i, l[i])
array[5] = 0
self.assertEqual(0, array[5])
del array[5]
self.assertRaises(IndexError, lambda: array[5])
ctxt.locals.array1 = JSArray(5)
ctxt.locals.array2 = JSArray([1, 2, 3, 4, 5])
for i in xrange(len(ctxt.locals.array2)):
ctxt.locals.array1[i] = ctxt.locals.array2[i] * 10
ctxt.eval("""
var sum = 0;
for (i=0; i<array1.length; i++)
sum += array1[i]
for (i=0; i<array2.length; i++)
sum += array2[i]
""")
self.assertEqual(165, ctxt.locals.sum)
ctxt.locals.array3 = [1, 2, 3, 4, 5]
self.assert_(ctxt.eval('array3[1] === 2'))
self.assert_(ctxt.eval('array3[9] === undefined'))
def testMultiDimArray(self):
with JSContext() as ctxt:
ret = ctxt.eval("""
({
'test': function(){
return [
[ 1, 'abla' ],
[ 2, 'ajkss' ],
]
}
})
""").test()
self.assertEquals([[1, 'abla'], [2, 'ajkss']], convert(ret))
def testLazyConstructor(self):
class Globals(JSClass):
def __init__(self):
self.array=JSArray([1,2,3])
with JSContext(Globals()) as ctxt:
self.assertEqual(2, ctxt.eval("""array[1]"""))
def testForEach(self):
class NamedClass(JSClass):
foo = 1
def __init__(self):
self.bar = 2
def gen(x):
yield 0
yield 1
yield 2
with JSContext() as ctxt:
func = ctxt.eval("""(function (k) {
var result = [];
for (var prop in k) {
result.push(prop);
}
return result;
})""")
self.assertEquals(["bar"], list(func(NamedClass())))
self.assertEquals(["0", "1", "2"], list(func([1, 2, 3])))
self.assertEquals(["1", "2", "3"], list(func({1:1, 2:2, 3:3})))
self.assertEquals(["0", "1", "2"], list(func(gen(3))))
def testDict(self):
import UserDict
with JSContext() as ctxt:
obj = ctxt.eval("var r = { 'a' : 1, 'b' : 2 }; r")
self.assertEqual(1, obj.a)
self.assertEqual(2, obj.b)
self.assertEqual({ 'a' : 1, 'b' : 2 }, dict(obj))
self.assertEqual({ 'a': 1,
'b': [1, 2, 3],
'c': { 'str' : 'goofy',
'float' : 1.234,
'obj' : { 'name': 'john doe' }},
'd': True,
'e': None },
convert(ctxt.eval("""var x =
{ a: 1,
b: [1, 2, 3],
c: { str: 'goofy',
float: 1.234,
obj: { name: 'john doe' }},
d: true,
e: null }; x""")))
def testDate(self):
with JSContext() as ctxt:
now1 = ctxt.eval("new Date();")
self.assert_(now1)
now2 = datetime.datetime.utcnow()
delta = now2 - now1 if now2 > now1 else now1 - now2
self.assert_(delta < datetime.timedelta(seconds=1))
func = ctxt.eval("(function (d) { return d.toString(); })")
now = datetime.datetime.now()
self.assert_(str(func(now)).startswith(now.strftime("%a %b %d %Y %H:%M:%S")))
def testUnicode(self):
with JSContext() as ctxt:
self.assertEquals(u"人", unicode(ctxt.eval("\"人\""), "utf-8"))
self.assertEquals(u"é", unicode(ctxt.eval("\"é\""), "utf-8"))
func = ctxt.eval("(function (msg) { return msg.length; })")
self.assertEquals(2, func(u"测试"))
def testClassicStyleObject(self):
class FileSystemWarpper:
@property
def cwd(self):
return os.getcwd()
class Global:
@property
def fs(self):
return FileSystemWarpper()
with JSContext(Global()) as ctxt:
self.assertEquals(os.getcwd(), ctxt.eval("fs.cwd"))
def testRefCount(self):
count = sys.getrefcount(None)
class Global(JSClass):
pass
with JSContext(Global()) as ctxt:
ctxt.eval("""
var none = null;
""")
self.assertEquals(count+1, sys.getrefcount(None))
ctxt.eval("""
var none = null;
""")
self.assertEquals(count+1, sys.getrefcount(None))
def testProperty(self):
class Global(JSClass):
def __init__(self, name):
self._name = name
def getname(self):
return self._name
def setname(self, name):
self._name = name
def delname(self):
self._name = 'deleted'
name = property(getname, setname, delname)
with JSContext(Global('world')) as ctxt:
self.assertEquals('world', ctxt.eval("name"))
self.assertEquals('flier', ctxt.eval("name = 'flier';"))
self.assertEquals('flier', ctxt.eval("name"))
self.assert_(ctxt.eval("delete name")) # FIXME
#self.assertEquals('deleted', ctxt.eval("name"))
ctxt.eval("__defineGetter__('name', function() { return 'fixed'; });")
self.assertEquals('fixed', ctxt.eval("name"))
def testDestructor(self):
import gc
owner = self
owner.deleted = False
class Hello(object):
def say(self):
pass
def __del__(self):
owner.deleted = True
def test():
with JSContext() as ctxt:
fn = ctxt.eval("(function (obj) { obj.say(); })")
obj = Hello()
self.assert_(2, sys.getrefcount(obj))
fn(obj)
self.assert_(3, sys.getrefcount(obj))
del obj
test()
self.assertFalse(owner.deleted)
JSEngine.collect()
gc.collect()
self.assert_(self.deleted)
def testNullInString(self):
with JSContext() as ctxt:
fn = ctxt.eval("(function (s) { return s; })")
self.assertEquals("hello \0 world", fn("hello \0 world"))
class TestMultithread(unittest.TestCase):
def testLocker(self):
self.assertFalse(JSLocker.actived)
self.assertFalse(JSLocker.locked)
with JSLocker() as outter_locker:
self.assertTrue(JSLocker.actived)
self.assertTrue(JSLocker.locked)
self.assertTrue(outter_locker)
with JSLocker() as inner_locker:
self.assertTrue(JSLocker.locked)
self.assertTrue(outter_locker)
self.assertTrue(inner_locker)
with JSUnlocker() as unlocker:
self.assertFalse(JSLocker.locked)
self.assertTrue(outter_locker)
self.assertTrue(inner_locker)
self.assertTrue(JSLocker.locked)
self.assertTrue(JSLocker.actived)
self.assertFalse(JSLocker.locked)
locker = JSLocker()
with JSContext():
self.assertRaises(RuntimeError, locker.__enter__)
self.assertRaises(RuntimeError, locker.__exit__, None, None, None)
del locker
def testMultiPythonThread(self):
import time, threading
class Global:
count = 0
started = threading.Event()
finished = threading.Semaphore(0)
def sleep(self, ms):
time.sleep(ms / 1000.0)
self.count += 1
g = Global()
def run():
with JSContext(g) as ctxt:
ctxt.eval("""
started.wait();
for (i=0; i<10; i++)
{
sleep(100);
}
finished.release();
""")
threading.Thread(target=run).start()
now = time.time()
self.assertEqual(0, g.count)
g.started.set()
g.finished.acquire()
self.assertEqual(10, g.count)
self.assert_((time.time() - now) >= 1)
def testMultiJavascriptThread(self):
import time, thread, threading
class Global:
result = []
def add(self, value):
with JSUnlocker() as unlocker:
time.sleep(0.1)
self.result.append(value)
g = Global()
def run():
with JSContext(g) as ctxt:
ctxt.eval("""
for (i=0; i<10; i++)
add(i);
""")
threads = [threading.Thread(target=run), threading.Thread(target=run)]
with JSLocker():
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(20, len(g.result))
def _testPreemptionJavascriptThreads(self):
import time, thread, threading
class Global:
result = []
def add(self, value):
# we use preemption scheduler to switch between threads
# so, just comment the JSUnlocker
#
# with JSUnlocker() as unlocker:
time.sleep(0.1)
self.result.append(value)
g = Global()
def run():
with JSContext(g) as ctxt:
ctxt.eval("""
for (i=0; i<10; i++)
add(i);
""")
threads = [threading.Thread(target=run), threading.Thread(target=run)]
with JSLocker() as locker:
JSLocker.startPreemption(100)
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(20, len(g.result))
class TestEngine(unittest.TestCase):
def testClassProperties(self):
with JSContext() as ctxt:
self.assert_(str(JSEngine.version).startswith("2."))
self.assertFalse(JSEngine.dead)
def testCompile(self):
with JSContext() as ctxt:
with JSEngine() as engine:
s = engine.compile("1+2")
self.assert_(isinstance(s, _PyV8.JSScript))
self.assertEquals("1+2", s.source)
self.assertEquals(3, int(s.run()))
def testPrecompile(self):
with JSContext() as ctxt:
with JSEngine() as engine:
data = engine.precompile("1+2")
self.assert_(data)
self.assertEquals(28, len(data))
s = engine.compile("1+2", precompiled=data)
self.assert_(isinstance(s, _PyV8.JSScript))
self.assertEquals("1+2", s.source)
self.assertEquals(3, int(s.run()))
def testExtension(self):
extSrc = """function hello(name) { return "hello " + name + " from javascript"; }"""
extJs = JSExtension("hello/javascript", extSrc)
self.assert_(extJs)
self.assertEqual("hello/javascript", extJs.name)
self.assertEqual(extSrc, extJs.source)
self.assertFalse(extJs.autoEnable)
self.assertTrue(extJs.registered)
TestEngine.extJs = extJs
with JSContext(extensions=['hello/javascript']) as ctxt:
self.assertEqual("hello flier from javascript", ctxt.eval("hello('flier')"))
# test the auto enable property
with JSContext() as ctxt:
self.assertRaises(JSError, ctxt.eval, "hello('flier')")
extJs.autoEnable = True
self.assertTrue(extJs.autoEnable)
with JSContext() as ctxt:
self.assertEqual("hello flier from javascript", ctxt.eval("hello('flier')"))
extJs.autoEnable = False
self.assertFalse(extJs.autoEnable)
with JSContext() as ctxt:
self.assertRaises(JSError, ctxt.eval, "hello('flier')")
def testNativeExtension(self):
extSrc = "native function hello();"
extPy = JSExtension("hello/python", extSrc, lambda func: lambda name: "hello " + name + " from python", register=False)
self.assert_(extPy)
self.assertEqual("hello/python", extPy.name)
self.assertEqual(extSrc, extPy.source)
self.assertFalse(extPy.autoEnable)
self.assertFalse(extPy.registered)
extPy.register()
self.assertTrue(extPy.registered)
TestEngine.extPy = extPy
with JSContext(extensions=['hello/python']) as ctxt:
self.assertEqual("hello flier from python", ctxt.eval("hello('flier')"))
def _testSerialize(self):
data = None
self.assertFalse(JSContext.entered)
with JSContext() as ctxt:
self.assert_(JSContext.entered)
#ctxt.eval("function hello(name) { return 'hello ' + name; }")
data = JSEngine.serialize()
self.assert_(data)
self.assert_(len(data) > 0)
self.assertFalse(JSContext.entered)
#JSEngine.deserialize()
self.assert_(JSContext.entered)
self.assertEquals('hello flier', JSContext.current.eval("hello('flier');"))
def testEval(self):
with JSContext() as ctxt:
self.assertEquals(3, int(ctxt.eval("1+2")))
def testGlobal(self):
class Global(JSClass):
version = "1.0"
with JSContext(Global()) as ctxt:
vars = ctxt.locals
# getter
self.assertEquals(Global.version, str(vars.version))
self.assertEquals(Global.version, str(ctxt.eval("version")))
self.assertEquals(None, ctxt.eval("nonexists"))
# setter
self.assertEquals(2.0, float(ctxt.eval("version = 2.0")))
self.assertEquals(2.0, float(vars.version))
def testThis(self):
class Global(JSClass):
version = 1.0
with JSContext(Global()) as ctxt:
self.assertEquals("[object Global]", str(ctxt.eval("this")))
self.assertEquals(1.0, float(ctxt.eval("this.version")))
def testObjectBuildInMethods(self):
class Global(JSClass):
version = 1.0
with JSContext(Global()) as ctxt:
self.assertEquals("[object Global]", str(ctxt.eval("this.toString()")))
self.assertEquals("[object Global]", str(ctxt.eval("this.toLocaleString()")))
self.assertEquals(Global.version, float(ctxt.eval("this.valueOf()").version))
self.assert_(bool(ctxt.eval("this.hasOwnProperty(\"version\")")))
self.assertFalse(ctxt.eval("this.hasOwnProperty(\"nonexistent\")"))
def testPythonWrapper(self):
class Global(JSClass):
s = [1, 2, 3]
d = {'a': {'b': 'c'}, 'd': ['e', 'f']}
g = Global()
with JSContext(g) as ctxt:
ctxt.eval("""
s[2] = s[1] + 2;
s[0] = s[1];
delete s[1];
""")
self.assertEquals([2, 4], g.s)
self.assertEquals('c', ctxt.eval("d.a.b"))
self.assertEquals(['e', 'f'], ctxt.eval("d.d"))
ctxt.eval("""
d.a.q = 4
delete d.d
""")
self.assertEquals(4, g.d['a']['q'])
self.assertEquals(None, ctxt.eval("d.d"))
class TestDebug(unittest.TestCase):
def setUp(self):
self.engine = JSEngine()
def tearDown(self):
del self.engine
events = []
def processDebugEvent(self, event):
try:
logging.debug("receive debug event: %s", repr(event))
self.events.append(repr(event))
except:
logging.error("fail to process debug event")
logging.debug(traceback.extract_stack())
def testEventDispatch(self):
global debugger
self.assert_(not debugger.enabled)
debugger.onBreak = lambda evt: self.processDebugEvent(evt)
debugger.onException = lambda evt: self.processDebugEvent(evt)
debugger.onNewFunction = lambda evt: self.processDebugEvent(evt)
debugger.onBeforeCompile = lambda evt: self.processDebugEvent(evt)
debugger.onAfterCompile = lambda evt: self.processDebugEvent(evt)
with JSContext() as ctxt:
debugger.enabled = True
self.assertEquals(3, int(ctxt.eval("function test() { text = \"1+2\"; return eval(text) } test()")))
debugger.enabled = False
self.assertRaises(JSError, JSContext.eval, ctxt, "throw 1")
self.assert_(not debugger.enabled)
self.assertEquals(4, len(self.events))
class _TestProfile(unittest.TestCase):
def testStart(self):
self.assertFalse(profiler.started)
profiler.start()
self.assert_(profiler.started)
profiler.stop()
self.assertFalse(profiler.started)
def testResume(self):
self.assert_(profiler.paused)
self.assertEquals(profiler.Modules.cpu, profiler.modules)
profiler.resume()
profiler.resume(profiler.Modules.heap)
# TODO enable profiler with resume
#self.assertFalse(profiler.paused)
if 'AST' in __all__:
class TestAST(unittest.TestCase):
def testPrettyPrint(self):
pp = PrettyPrint()
with JSContext() as ctxt:
script = JSEngine().compile("function hello(name) { return 'hello ' + name; }")
script.visit(pp)
self.assertEquals("", str(pp))
if __name__ == '__main__':
if "-v" in sys.argv:
level = logging.DEBUG
else:
level = logging.WARN
if "-p" in sys.argv:
sys.argv.remove("-p")
print "Press any key to continue..."
raw_input()
logging.basicConfig(level=level, format='%(asctime)s %(levelname)s %(message)s')
logging.info("testing PyV8 module %s with V8 v%s", __version__, JSEngine.version)
unittest.main()
|
test_concurrency.py | import re
import time
import pytest
import subprocess
from multiprocessing import Process
from server import run_server
@pytest.fixture
def redis_server():
p = Process(target=run_server, args=["redis"])
p.start()
yield
p.terminate()
time.sleep(2)
@pytest.fixture
def memcached_server():
p = Process(target=run_server, args=["memcached"])
p.start()
yield
p.terminate()
time.sleep(2)
@pytest.fixture
def memory_server():
p = Process(target=run_server, args=["memory"])
p.start()
yield
p.terminate()
time.sleep(2)
@pytest.fixture(params=["memcached_server", "memory_server", "redis_server"])
def server(request):
return request.getfuncargvalue(request.param)
def test_concurrency_error_rates(server):
total_requests = 1500
result = subprocess.run(
["ab", "-n", str(total_requests), "-c", "500", "http://127.0.0.1:8080/"],
stdout=subprocess.PIPE,
)
failed_requests = total_requests
m = re.search("Failed requests:\s+([0-9]+)", str(result.stdout))
if m:
failed_requests = int(m.group(1))
non_200 = 0
m = re.search("Non-2xx responses:\s+([0-9]+)", str(result.stdout))
if m:
non_200 = int(m.group(1))
print("Failed requests: {}%".format(failed_requests / total_requests * 100))
print("Non 200 requests: {}%".format(non_200 / total_requests * 100))
assert (
failed_requests / total_requests < 0.75
) # aioredis is the problem here, need to improve it
assert non_200 / total_requests < 0.75
|
chatroom_server.py | #! /usr/bin/python3
import socket
import threading
import sys
import chat_message as msg
import chat_command as cmd
# Constant Variable
HOST = '0.0.0.0'
PORT = int(sys.argv[1]) if len(sys.argv) > 1 else 5000
PAR = 10
SIZE = 1024
SERVER = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SERVER.bind((HOST, PORT))
# Static Variable
ClientList = {}
ClientAddress = {}
def LOG(message) -> None:
print("[LOG]:", message)
def acceptClient() -> None:
while True:
client_socket, client_address = SERVER.accept()
LOG(f"Connected From {client_address[0]}:{client_address[1]}")
client_socket.send(msg.BANNER)
client_socket.send(msg.CLIENT_GREETING_MESSAGE)
ClientAddress[client_socket] = client_address
threading.Thread(target=handleClient, args=(client_socket, client_address)).start()
pass
def handleClient(client_socket, client_address) -> None:
display_name = client_socket.recv(SIZE).decode('utf-8').strip()
client_socket.send(msg.CLIENT_WELCOME_MESSAGE)
client_socket.send(msg.CLIENT_HELP_MESSAGE)
broadcastMessage(msg.CHAT_JOIN_MESSAGE.format(display_name).encode('utf-8'))
ClientList[client_socket] = display_name
name_tag = f"[{display_name}]: ".encode('utf-8')
while True:
try:
message = client_socket.recv(SIZE)
LOG(message)
# FIXME
# Use dict switch instead of if-else
if message == b"!help\n":
cmd.help(client_socket)
elif message == b"!q\n":
cmd.quit(client_socket, ClientList)
broadcastMessage(message=msg.CHAT_LEAVE_MESSAGE.format(display_name).encode('utf-8'))
LOG(f"{client_address[0]}:{client_address[1]} is Leaved")
break
elif message == b'!g\n':
cmd.greet()
elif message == b'!l\n':
cmd.list(client_socket, ClientList)
elif message == b'':
LOG("empty")
client_socket.close()
del ClientList[client_socket]
broadcastMessage(message=msg.CHAT_LEAVE_MESSAGE.format(display_name).encode('utf-8'))
break
elif not message.isspace():
broadcastMessage(name_tag, message)
except:
client_socket.close()
del ClientList[client_socket]
broadcastMessage(message=msg.CHAT_LEAVE_MESSAGE.format(display_name).encode('utf-8'))
break
pass
def broadcastMessage(NameTag=b"", message=b"") -> None:
for client in ClientList:
client.send(NameTag + message)
pass
def mainProcess() -> None:
MAIN_THREAD = threading.Thread(target=acceptClient).start().join()
pass
if __name__ == '__main__':
SERVER.listen(PAR)
LOG(f"Hosting Port: {PORT}")
LOG("Waiting for Connection...\n")
mainProcess()
LOG("Server Shutdown...\n")
SERVER.close()
|
runtime_manager_dialog.py | #!/usr/bin/env python
"""
Copyright (c) 2015, Nagoya University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Autoware nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import wx
import wx.lib.buttons
import wx.lib.agw.customtreectrl as CT
import gettext
import os
import re
import sys
import fcntl
import threading
import Queue
import time
import socket
import struct
import shlex
import signal
import subprocess
import psutil
import pty
import yaml
import datetime
import syslog
import rtmgr
import rospy
import std_msgs.msg
from std_msgs.msg import Bool
from decimal import Decimal
from autoware_config_msgs.msg import ConfigSsd
from autoware_config_msgs.msg import ConfigCarDpm
from autoware_config_msgs.msg import ConfigPedestrianDpm
from autoware_config_msgs.msg import ConfigNdt
from autoware_config_msgs.msg import ConfigNdtMapping
from autoware_config_msgs.msg import ConfigApproximateNdtMapping
from autoware_config_msgs.msg import ConfigNdtMappingOutput
from autoware_config_msgs.msg import ConfigICP
from autoware_config_msgs.msg import ConfigVoxelGridFilter
from autoware_config_msgs.msg import ConfigRingFilter
from autoware_config_msgs.msg import ConfigDistanceFilter
from autoware_config_msgs.msg import ConfigRandomFilter
from autoware_config_msgs.msg import ConfigRingGroundFilter
from autoware_config_msgs.msg import ConfigRayGroundFilter
from autoware_config_msgs.msg import ConfigWaypointLoader
from autoware_config_msgs.msg import ConfigWaypointFollower
from autoware_config_msgs.msg import ConfigTwistFilter
from autoware_config_msgs.msg import ConfigVelocitySet
from autoware_config_msgs.msg import ConfigLatticeVelocitySet
from autoware_config_msgs.msg import ConfigCarKf
from autoware_config_msgs.msg import ConfigPedestrianKf
from autoware_config_msgs.msg import ConfigLaneRule
from autoware_config_msgs.msg import ConfigLaneSelect
from autoware_config_msgs.msg import ConfigLaneStop
from autoware_config_msgs.msg import ConfigCarFusion
from autoware_config_msgs.msg import ConfigPedestrianFusion
from autoware_config_msgs.msg import ConfigPlannerSelector
from autoware_config_msgs.msg import ConfigDecisionMaker
from autoware_config_msgs.msg import ConfigCompareMapFilter
from tablet_socket_msgs.msg import mode_cmd
from tablet_socket_msgs.msg import gear_cmd
from tablet_socket_msgs.msg import Waypoint
from tablet_socket_msgs.msg import route_cmd
from geometry_msgs.msg import TwistStamped
from geometry_msgs.msg import Vector3
from autoware_msgs.msg import AccelCmd
from autoware_msgs.msg import SteerCmd
from autoware_msgs.msg import BrakeCmd
from autoware_msgs.msg import IndicatorCmd
from autoware_msgs.msg import LampCmd
from autoware_msgs.msg import TrafficLight
from autoware_msgs.msg import AdjustXY
from types import MethodType
SCHED_OTHER = 0
SCHED_FIFO = 1
SCHED_RR = 2
PROC_MANAGER_SOCK="/tmp/autoware_proc_manager"
class MyFrame(rtmgr.MyFrame):
def __init__(self, *args, **kwds):
rtmgr.MyFrame.__init__(self, *args, **kwds)
self.all_procs = []
self.all_cmd_dics = []
self.load_dic = self.load_yaml('param.yaml', def_ret={})
self.config_dic = {}
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.params = []
self.all_tabs = []
self.all_th_infs = []
self.log_que = Queue.Queue()
self.log_que_stdout = Queue.Queue()
self.log_que_stderr = Queue.Queue()
self.log_que_show = Queue.Queue()
#
# ros
#
rospy.init_node('runime_manager', anonymous=True)
rospy.Subscriber('to_rtmgr', std_msgs.msg.String, self.RosCb)
self.pub = rospy.Publisher('from_rtmgr', std_msgs.msg.String, queue_size=10)
#
# for Quick Start tab
#
tab = self.tab_qs
self.all_tabs.append(tab)
self.qs_cmd = {}
self.all_cmd_dics.append(self.qs_cmd)
self.qs_dic = self.load_yaml('qs.yaml')
self.add_params(self.qs_dic.get('params', []))
self.setup_buttons(self.qs_dic.get('buttons', {}), self.qs_cmd)
for nm in [ 'map', 'sensing', 'localization', 'detection', 'mission_planning', 'motion_planning' ]:
for key in self.qs_dic.get('exec_time', {}).get(nm, {}).keys():
(topic, msg, attr) = ( key.split('.') + [ None, None, None ] )[:3]
msg = globals().get(msg)
msg = msg if msg else std_msgs.msg.Float32
attr = attr if attr else 'data'
rospy.Subscriber(topic, msg, self.exec_time_callback, callback_args=(key, attr))
#
# for Setup tab
#
tab = self.tab_setup
self.all_tabs.append(tab)
setup_cmd = {}
self.all_cmd_dics.append(setup_cmd)
dic = self.load_yaml('setup.yaml')
self.add_params(dic.get('params', []))
self.setup_buttons(dic.get('buttons', {}), setup_cmd)
#
# for Map tab
#
tab = self.tab_map
self.all_tabs.append(tab)
self.map_cmd = {}
self.all_cmd_dics.append(self.map_cmd)
self.map_dic = self.load_yaml('map.yaml')
self.add_params(self.map_dic.get('params', []))
self.setup_buttons(self.map_dic.get('buttons', {}), self.map_cmd)
self.tc_point_cloud = self.obj_to_varpanel_tc(self.button_point_cloud, 'path_pcd')
self.tc_area_list = self.obj_to_varpanel_tc(self.button_area_lists, 'path_area_list')
self.label_point_cloud_bar.Destroy()
self.label_point_cloud_bar = BarLabel(tab, ' Loading... ')
self.label_point_cloud_bar.Enable(False)
def hook1G(args):
for f in args.get('func')().split(','):
sz = os.path.getsize(f)
if sz > 1024*1024*1024:
wx.MessageBox("Over 1GB\n\n{}\n({:,})".format(f, sz), caption='Warning')
args = { 'func':self.tc_point_cloud.GetValue }
hook_var = { 'hook':hook1G, 'args':args, 'flags':['every_time'] }
obj = self.button_point_cloud
gdic_v = self.obj_to_gdic(obj, {}).get('path_pcd', {})
gdic_v['hook_var'] = hook_var
#
# for Sensing tab
#
tab = self.tab_sensing
self.all_tabs.append(tab)
self.drv_probe_cmd = {}
self.sensing_cmd = {}
self.all_cmd_dics.append(self.sensing_cmd)
dic = self.load_yaml('sensing.yaml')
self.add_params(dic.get('params', []))
self.create_checkboxes(dic, self.panel_sensing, None, self.drv_probe_cmd, self.sensing_cmd, self.OnSensingDriver)
self.setup_buttons(dic.get('buttons', {}), self.sensing_cmd)
#self.timer = wx.Timer(self)
#self.Bind(wx.EVT_TIMER, self.OnProbe, self.timer)
#self.probe_interval = 10*1000
#if self.checkbox_auto_probe.GetValue():
# self.OnProbe(None)
# self.timer.Start(self.probe_interval)
self.dlg_rosbag_record = MyDialogRosbagRecord(self, cmd_dic=self.sensing_cmd)
buttons_color_hdr_setup(self.dlg_rosbag_record)
sense_cmds_dic = dic.get('cmds', {})
#
# for Computing tab
#
tab = self.tab_computing
self.all_tabs.append(tab)
parent = self.tree_ctrl_0.GetParent()
for i in range(2):
self.obj_get('tree_ctrl_' + str(i)).Destroy()
items = self.load_yaml('computing.yaml')
self.add_params(items.get('params', []))
self.sys_gdic = items.get('sys_gui')
self.sys_gdic['update_func'] = self.update_func
self.computing_cmd = {}
self.all_cmd_dics.append(self.computing_cmd)
for i in range(2):
tree_ctrl = self.create_tree(parent, items['subs'][i], None, None, self.computing_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
setattr(self, 'tree_ctrl_' + str(i), tree_ctrl)
self.Bind(CT.EVT_TREE_ITEM_CHECKED, self.OnTreeChecked)
self.setup_buttons(items.get('buttons', {}), self.computing_cmd)
#
# for Sensing tab (cmds)
#
parent = self.tree_ctrl_sense.GetParent()
self.tree_ctrl_sense.Destroy()
tree_ctrl = self.create_tree(parent, sense_cmds_dic, None, None, self.sensing_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
self.tree_ctrl_sense = tree_ctrl
#
# for Interface tab
#
tab = self.tab_interface
self.all_tabs.append(tab)
self.interface_cmd = {}
self.all_cmd_dics.append(self.interface_cmd)
self.interface_dic = self.load_yaml('interface.yaml')
self.add_params(self.interface_dic.get('params', []))
self.setup_buttons(self.interface_dic.get('buttons', {}), self.interface_cmd)
self.setup_buttons(self.interface_dic.get('checkboxs', {}), self.interface_cmd)
szr = wx.BoxSizer(wx.VERTICAL)
for cc in self.interface_dic.get('control_check', []):
pdic = {}
prm = self.get_param(cc.get('param'))
for var in prm['vars']:
pdic[ var['name'] ] = var['v']
gdic = self.gdic_get_1st(cc)
panel = ParamPanel(self.panel_interface_cc, frame=self, pdic=pdic, gdic=gdic, prm=prm)
szr.Add(panel, 0, wx.EXPAND)
self.panel_interface_cc.SetSizer(szr)
#
# for Database tab
#
tab = self.tab_database
self.all_tabs.append(tab)
self.data_cmd = {}
self.all_cmd_dics.append(self.data_cmd)
dic = self.load_yaml('data.yaml')
self.add_params(dic.get('params', []))
parent = self.tree_ctrl_data.GetParent()
self.tree_ctrl_data.Destroy()
tree_ctrl = self.create_tree(parent, dic, None, None, self.data_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
self.tree_ctrl_data = tree_ctrl
#self.setup_config_param_pdic()
if 'buttons' in dic:
self.setup_buttons(dic['buttons'], self.data_cmd)
#
# for Simulation Tab
#
tab = self.tab_simulation
self.all_tabs.append(tab)
self.simulation_cmd = {}
self.all_cmd_dics.append(self.simulation_cmd)
dic = self.load_yaml('simulation.yaml')
self.add_params(dic.get('params', []))
self.setup_buttons(dic.get('buttons'), self.simulation_cmd)
btn = self.button_play_rosbag_play
# setup for rosbag info
gdic = self.obj_to_gdic(btn, {})
gdic_v = dic_getset(gdic, 'file', {})
gdic_v['update_hook'] = self.rosbag_info_hook
tc = self.obj_to_varpanel_tc(btn, 'file')
if tc:
self.rosbag_info_hook( tc.GetValue() )
#vp = self.obj_to_varpanel(btn, 'sim_time')
#self.checkbox_sim_time = vp.obj
#try:
# cmd = ['rosparam', 'get', '/use_sim_time']
# if subprocess.check_output(cmd, stderr=open(os.devnull, 'wb')).strip() == 'true':
# self.checkbox_sim_time.SetValue(True)
#except subprocess.CalledProcessError:
# pass
self.label_rosbag_play_bar.Destroy()
self.label_rosbag_play_bar = BarLabel(tab, ' Playing... ')
self.label_rosbag_play_bar.Enable(False)
#
# for Status tab
#
tab = self.tab_status
self.all_tabs.append(tab)
self.status_cmd = {}
self.all_cmd_dics.append(self.status_cmd)
self.status_dic = self.load_yaml('status.yaml')
self.add_params(self.status_dic.get('params', []))
self.setup_buttons(self.status_dic.get('buttons', {}), self.status_cmd)
font = wx.Font(10, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
self.label_top_cmd.SetFont(font)
#
# for Topics tab
#
tab = self.tab_topics
self.all_tabs.append(tab)
#
# for State tab
#
tab = self.tab_states
self.all_tabs.append(tab)
self.state_dic = self.load_yaml('state.yaml')
self.mainstate_dic = self.state_dic["mainstate"]
self.substate_dic = self.state_dic["substate"]
#
# for All
#
self.bitmap_logo.Destroy()
bm = scaled_bitmap(wx.Bitmap(rtmgr_src_dir() + 'images/autoware_logo_1.png'), 0.2)
self.bitmap_logo = wx.StaticBitmap(self, wx.ID_ANY, bm)
rtmgr.MyFrame.__do_layout(self)
cond = lambda s : s.startswith('tab_')
self.tab_names = [ self.name_get_cond(tab, cond=cond, def_ret='').replace('tab_', '', 1) for tab in self.all_tabs ]
new_btn_grps = ( lambda btn_names, tab_names=self.tab_names :
[ [ self.obj_get('button_{}_{}'.format(bn, tn)) for tn in tab_names ] for bn in btn_names ] )
self.alias_grps = new_btn_grps( ('rosbag', 'rviz', 'rqt') )
self.alias_grps += new_btn_grps( ('android_tablet', 'oculus_rift', 'vehicle_gateway', 'remote_control', 'auto_pilot'),
('qs', 'interface') )
for grp in self.alias_grps:
wx.CallAfter(self.alias_sync, get_top(grp))
s = get_tooltip_obj(grp[0])
if s:
for obj in grp[1:]:
set_tooltip_str(obj, s)
# Topics tab (need, after layout for sizer)
self.topics_dic = self.load_yaml('topics.yaml')
self.topics_list = []
self.topics_echo_curr_topic = None
self.topics_echo_proc = None
self.topics_echo_thinf = None
self.topics_echo_que = Queue.Queue()
self.topics_echo_sum = 0
thinf = th_start(self.topics_echo_show_th)
self.all_th_infs.append(thinf)
self.refresh_topics_list()
# waypoint
self.route_cmd_waypoint = [ Waypoint(0,0), Waypoint(0,0) ]
rospy.Subscriber('route_cmd', route_cmd, self.route_cmd_callback)
# topic /xxx_stat
self.stat_dic = {}
for k in [ 'gnss', 'pmap', 'vmap', 'lf' ]:
self.stat_dic[k] = False
name = k + '_stat'
rospy.Subscriber(name, std_msgs.msg.Bool, self.stat_callback, callback_args=k)
# top command thread setup
toprc = os.path.expanduser('~/.toprc')
backup = os.path.expanduser('~/.toprc-autoware-backup')
self.toprc_setup(toprc, backup)
cpu_ibls = [ InfoBarLabel(self, 'CPU'+str(i)) for i in range(get_cpu_count())]
sz = sizer_wrap(cpu_ibls, wx.HORIZONTAL, 1, wx.EXPAND, 0)
self.sizer_cpuinfo.Add(sz, 8, wx.ALL | wx.EXPAND, 4)
self.lb_top5 = []
for i in range(5):
lb = wx.StaticText(self, wx.ID_ANY, '')
change_font_point_by_rate(lb, 0.75)
self.lb_top5.append(lb)
line = wx.StaticLine(self, wx.ID_ANY)
ibl = InfoBarLabel(self, 'Memory', bar_orient=wx.HORIZONTAL)
szr = sizer_wrap(self.lb_top5 + [ line, ibl ], flag=wx.EXPAND | wx.FIXED_MINSIZE)
self.sizer_cpuinfo.Add(szr, 2, wx.ALL | wx.EXPAND, 4)
th_arg = { 'setting':self.status_dic.get('top_cmd_setting', {}),
'cpu_ibls':cpu_ibls, 'mem_ibl':ibl,
'toprc':toprc, 'backup':backup }
thinf = th_start(self.top_cmd_th, th_arg)
self.all_th_infs.append(thinf)
# ps command thread
#thinf = th_start(self.ps_cmd_th, { 'interval':5 })
#self.all_th_infs.append(thinf)
# logout thread
interval = self.status_dic.get('gui_update_interval_ms', 100) * 0.001
tc = self.text_ctrl_stdout
thinf = th_start(self.logout_th, { 'que':self.log_que_stdout, 'interval':interval, 'tc':tc } )
self.all_th_infs.append(thinf)
thinf = th_start(self.logout_th, { 'que':self.log_que_stderr, 'interval':interval, 'tc':tc } )
self.all_th_infs.append(thinf)
thinf = th_start(self.logout_th, { 'que':self.log_que, 'interval':interval, 'tc':tc } )
self.all_th_infs.append(thinf)
if interval > 0:
thinf = th_start(self.logshow_th, { 'que':self.log_que_show , 'interval':interval , 'tc':tc })
self.all_th_infs.append(thinf)
else:
self.checkbox_stdout.Enable(False)
tc.Enable(False)
# mkdir
paths = [ os.environ['HOME'] + '/.autoware/data/tf',
os.environ['HOME'] + '/.autoware/data/map/pointcloud_map',
os.environ['HOME'] + '/.autoware/data/map/vector_map' ]
for path in paths:
if not os.path.exists(path):
subprocess.call([ 'mkdir', '-p', path ])
# icon
bm = scaled_bitmap(wx.Bitmap(rtmgr_src_dir() + 'images/autoware_logo_2_white.png'), 0.5)
icon = wx.EmptyIcon()
icon.CopyFromBitmap(bm)
self.SetIcon(icon)
wx.CallAfter( self.boot_booted_cmds )
def __do_layout(self):
pass
def boot_booted_cmds(self):
if not self.load_dic.get('booted_cmds', {}).get('enable', False):
return
names = self.load_dic.get('booted_cmds', {}).get('names', [])
lst = [ ( name, self.cfg_dic( { 'name': name } ).get('obj') ) for name in names ]
lst = [ (name, obj) for (name, obj) in lst if obj ]
if not lst:
return
choices = [ obj.GetLabel() if hasattr(obj, 'GetLabel') else name for (name, obj) in lst ]
dlg = wx.MultiChoiceDialog(self, 'boot command ?', '', choices)
dlg.SetSelections( range( len(names) ) )
if dlg.ShowModal() != wx.ID_OK:
return
for i in dlg.GetSelections():
(_, obj) = lst[i]
post_evt_toggle_obj(self, obj, True)
def OnClose(self, event):
if self.quit_select() != 'quit':
return
# kill_all
for proc in self.all_procs[:]: # copy
(_, obj) = self.proc_to_cmd_dic_obj(proc)
self.launch_kill(False, 'dmy', proc, obj=obj)
shutdown_proc_manager()
shutdown_sh = self.get_autoware_dir() + '/ros/shutdown'
if os.path.exists(shutdown_sh):
os.system(shutdown_sh)
for thinf in self.all_th_infs:
th_end(thinf)
self.Destroy()
def quit_select(self):
def timer_func():
if self.quit_timer:
self.quit_timer = 'timeout'
evt = wx.PyCommandEvent( wx.EVT_CLOSE.typeId, self.GetId() )
wx.PostEvent(self, evt)
if not hasattr(self, 'quit_timer') or not self.quit_timer:
self.quit_timer = threading.Timer(2.0, timer_func)
self.quit_timer.start()
return 'not quit'
if self.quit_timer == 'timeout':
self.save_param_yaml()
return 'quit'
self.quit_timer.cancel()
self.quit_timer = None
lst = [
( 'Save and Quit', [ 'save', 'quit' ] ),
( 'Save to param.yaml', [ 'save' ] ),
( 'Quit without saving', [ 'quit' ] ),
( 'Reload computing.yaml', [ 'reload' ] ),
( self.get_booted_cmds_enable_msg()[1], [ 'toggle_booted_cmds' ] ),
]
choices = [ s for (s, _) in lst ]
dlg = wx.SingleChoiceDialog(self, 'select command', '', choices)
if dlg.ShowModal() != wx.ID_OK:
return 'not quit'
i = dlg.GetSelection() # index of choices
(_, f) = lst[i]
if 'save' in f:
self.save_param_yaml()
if 'reload' in f:
self.reload_computing_yaml()
if 'toggle_booted_cmds' in f:
self.toggle_booted_cmds()
return 'quit' if 'quit' in f else 'not quit'
def save_param_yaml(self):
save_dic = {}
for (name, pdic) in self.load_dic.items():
if pdic and pdic != {}:
prm = self.cfg_dic( {'name':name, 'pdic':pdic} ).get('param', {})
no_saves = prm.get('no_save_vars', [])
pdic = pdic.copy()
for k in pdic.keys():
if k in no_saves:
del pdic[k]
save_dic[name] = pdic
names = []
for proc in self.all_procs:
(_, obj) = self.proc_to_cmd_dic_obj(proc)
name = self.cfg_dic( { 'obj': obj } ).get('name')
names.append(name)
if 'booted_cmds' not in save_dic:
save_dic['booted_cmds'] = {}
save_dic.get('booted_cmds')['names'] = names
if save_dic != {}:
dir = rtmgr_src_dir()
print('saving param.yaml')
f = open(dir + 'param.yaml', 'w')
s = yaml.dump(save_dic, default_flow_style=False)
#print 'save\n', s # for debug
f.write(s)
f.close()
def reload_computing_yaml(self):
parent = self.tree_ctrl_0.GetParent()
sizer = self.tree_ctrl_0.GetContainingSizer()
items = self.load_yaml('computing.yaml')
# backup cmd_dic proc
cmd_dic = self.computing_cmd
to_name = lambda obj: next( ( d.get('name') for d in self.config_dic.values() if d.get('obj') == obj ), None )
procs = [ ( to_name(obj), proc ) for (obj, (cmd, proc)) in cmd_dic.items() if proc ]
# remove old tree ctrl
for i in range(2):
self.obj_get('tree_ctrl_' + str(i)).Destroy()
# remove old params
names = [ prm.get('name') for prm in items.get('params', []) ]
for prm in self.params[:]: # copy
if prm.get('name') in names:
self.params.remove(prm)
self.add_params(items.get('params', []))
# overwrite sys_gdic
old = self.sys_gdic
self.sys_gdic = items.get('sys_gui')
self.sys_gdic['update_func'] = self.update_func
for d in self.config_dic.values():
if d.get('gdic') == old:
d['gdic'] = self.sys_gdic
# listing update names
def subs_names(subs):
f2 = lambda s: subs_names( s.get('subs') ) if 'subs' in s else [ s.get('name') ]
f = lambda lst, s: lst + f2(s)
return reduce(f, subs, [])
names = subs_names( items.get('subs') )
names += items.get('buttons', {}).keys()
# remove old data of name in config_dic
for (k, v) in self.config_dic.items():
if v.get('name') in names:
self.config_dic.pop(k, None)
# rebuild tree ctrl
cmd_dic.clear()
for i in range(2):
tree_ctrl = self.create_tree(parent, items['subs'][i], None, None, self.computing_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
setattr(self, 'tree_ctrl_' + str(i), tree_ctrl)
sizer.Add(tree_ctrl, 1, wx.EXPAND, 0)
self.setup_buttons(items.get('buttons', {}), self.computing_cmd)
# restore cmd_dic proc
to_obj = lambda name: next( ( d.get('obj') for d in self.config_dic.values() if d.get('name') == name ), None )
for (name, proc) in procs:
obj = to_obj(name)
if obj and obj in cmd_dic:
cmd_dic[ obj ] = ( cmd_dic.get(obj)[0], proc )
set_val(obj, True)
parent.Layout()
def toggle_booted_cmds(self):
(enable, msg) = self.get_booted_cmds_enable_msg()
style = wx.OK | wx.CANCEL | wx.ICON_QUESTION
dlg = wx.MessageDialog(self, msg, '', style)
if dlg.ShowModal() != wx.ID_OK:
return
if 'booted_cmds' not in self.load_dic:
self.load_dic['booted_cmds'] = {}
self.load_dic.get('booted_cmds')['enable'] = not enable
def get_booted_cmds_enable_msg(self):
enable = self.load_dic.get('booted_cmds', {}).get('enable', False)
s = 'Enable' if not enable else 'Disable'
msg = '{} booted commands menu ?'.format(s)
return (enable, msg)
def RosCb(self, data):
print('recv topic msg : ' + data.data)
r = rospy.Rate(10)
rospy.is_shutdown()
r.sleep()
self.pub.publish(data.data)
r.sleep()
def setup_buttons(self, d, run_dic):
for (k,d2) in d.items():
pfs = [ 'button_', 'checkbox_' ]
obj = next( (self.obj_get(pf+k) for pf in pfs if self.obj_get(pf+k)), None)
if not obj:
s = 'button_' + k
obj = StrValObj(s, False)
setattr(self, s, obj)
if not d2 or type(d2) is not dict:
continue
if 'run' in d2:
run_dic[obj] = (d2['run'], None)
set_tooltip(obj, d2)
gdic = self.gdic_get_1st(d2)
if 'param' in d2:
pdic = self.load_dic_pdic_setup(k, d2)
prm = self.get_param(d2.get('param'))
for var in prm.get('vars'):
name = var.get('name')
if name not in pdic and 'v' in var:
pdic[name] = var.get('v')
for (name, v) in pdic.items():
restore = eval( gdic.get(name, {}).get('restore', 'lambda a : None') )
restore(v)
self.add_cfg_info(obj, obj, k, pdic, gdic, False, prm)
pnls = [ gdic.get(var.get('name'), {}).get('panel') for var in prm.get('vars') ]
for pnl in [ gdic.get('panel') ] + pnls:
if pnl:
self.set_param_panel(obj, eval_if_str(self, pnl))
else:
self.add_cfg_info(obj, obj, k, None, gdic, False, None)
def OnGear(self, event):
grp = { self.button_statchk_d : 1,
self.button_statchk_r : 2,
self.button_statchk_b : 3,
self.button_statchk_n : 4,
self.button_statchk_p : 5 }
self.radio_action(event, grp.keys())
v = grp.get(event.GetEventObject())
if v is not None:
pub = rospy.Publisher('gear_cmd', gear_cmd, queue_size=10)
pub.publish(gear_cmd(gear=v))
def OnLamp(self, event):
pub = rospy.Publisher('lamp_cmd', LampCmd, queue_size=10)
msg = LampCmd()
msg.l = self.button_statchk_lamp_l.GetValue()
msg.r = self.button_statchk_lamp_r.GetValue()
pub.publish(msg)
def OnIndi(self, event):
pub = rospy.Publisher('indicator_cmd', IndicatorCmd, queue_size=10)
msg = IndicatorCmd()
msg.l = self.button_statchk_indi_l.GetValue()
msg.r = self.button_statchk_indi_r.GetValue()
pub.publish(msg)
def OnAutoPilot(self, event):
obj = event.GetEventObject()
self.alias_sync(obj)
v = obj.GetValue()
pub = rospy.Publisher('mode_cmd', mode_cmd, queue_size=10)
pub.publish(mode_cmd(mode=v))
def radio_action(self, event, grp):
push = event.GetEventObject()
for b in grp:
v = b.GetValue()
act = None
act = True if b is push and not v else act
act = False if b is not push and v else act
if act is not None:
set_val(b, act)
def stat_label_off(self, obj):
qs_nms = [ 'map', 'sensing', 'localization', 'detection', 'mission_planning', 'motion_planning' ]
exec_time = self.qs_dic.get('exec_time', {})
gdic = self.obj_to_gdic(obj, {})
msg = std_msgs.msg.Bool(False)
for k in gdic.get('stat_topic', []):
# exec_time off
if next( (dic for dic in exec_time.values() if k in dic), None):
self.exec_time_callback(std_msgs.msg.Float32(0), (k, 'data'))
else:
self.stat_callback(msg, k)
# Quick Start tab, exec_time off
obj_nm = self.name_get(obj)
nm = next( (nm for nm in qs_nms if 'button_' + nm + '_qs' == obj_nm), None)
for key in exec_time.get(nm, {}):
self.exec_time_callback(std_msgs.msg.Float32(0), (key, 'data'))
def route_cmd_callback(self, data):
self.route_cmd_waypoint = data.point
def stat_callback(self, msg, k):
self.stat_dic[k] = msg.data
if k == 'pmap':
v = self.stat_dic.get(k)
wx.CallAfter(self.label_point_cloud.SetLabel, 'OK' if v else '')
if k in [ 'pmap', 'vmap' ]:
v = self.stat_dic.get('pmap') and self.stat_dic.get('vmap')
wx.CallAfter(self.label_map_qs.SetLabel, 'OK' if v else '')
def exec_time_callback(self, msg, (key, attr)):
msec = int(getattr(msg, attr, 0))
exec_time = self.qs_dic.get('exec_time', {})
(nm, dic) = next( ( (nm, dic) for (nm, dic) in exec_time.items() if key in dic), None)
dic[ key ] = msec
lb = self.obj_get('label_' + nm + '_qs')
if lb:
sum = reduce( lambda a,b:a+(b if b else 0), dic.values(), 0 )
wx.CallAfter(lb.SetLabel, str(sum)+' ms' if sum > 0 else '')
# update Status tab
lb = ''
for nm in [ 'map', 'sensing', 'localization', 'detection', 'mission_planning', 'motion_planning' ]:
dic = exec_time.get(nm, {})
sum = reduce( lambda a,b:a+(b if b else 0), dic.values(), 0 )
if sum > 0:
s = nm + ' : ' + str(sum) + ' ms'
lb += s + '\n'
wx.CallAfter(self.label_node_time.SetLabel, lb)
wx.CallAfter(self.label_node_time.GetParent().FitInside)
#
# Setup tab
#
def OnSetupLocalizer(self, event):
obj = self.button_setup_tf
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
self.update_func(pdic, gdic, prm)
#
# Computing Tab
#
def OnTreeMotion(self, event):
tree = event.GetEventObject()
pt = event.GetPosition()
event.Skip()
(item, flags) = tree.HitTest(pt)
if flags & CT.TREE_HITTEST_ONITEMLABEL == 0:
return
text = item.GetData()
if not text:
return
x = item.GetX()
y = item.GetY()
w = item.GetWidth()
h = item.GetHeight()
(x, y) = tree.CalcScrolledPosition(x, y)
iw = tree.GetItemWindow(item)
w -= iw.GetSize()[0] if iw else 0
if not wx.Rect(x, y, w, h).Contains(pt):
return
(x, y) = tree.ClientToScreen((x, y))
self.tip_info = (tree, text, wx.Rect(x, y, w, h))
if getattr(self, 'tip_timer', None) is None:
self.tip_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnTipTimer, self.tip_timer)
self.tip_timer.Start(200, oneShot=True)
def OnTipTimer(self, event):
if getattr(self, 'tip_info', None):
(tree, text, rect) = self.tip_info
(w, h) = self.GetSize()
wx.TipWindow(tree, text, maxLength=w, rectBound=rect)
def OnTreeChecked(self, event):
self.OnChecked_obj(event.GetItem())
def OnChecked_obj(self, obj):
self.OnLaunchKill_obj(obj)
def OnHyperlinked(self, event):
self.OnHyperlinked_obj(event.GetEventObject())
def OnHyperlinked_obj(self, obj):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
if pdic is None or prm is None:
return
dic_list_push(gdic, 'dialog_type', 'config')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
def obj_to_add_args(self, obj, msg_box=True):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
if pdic is None or prm is None:
return None
if 'need_camera_info' in gdic.get('flags', []) and msg_box:
ids = self.camera_ids()
if ids:
var = self.get_var(prm, 'camera_id', {})
var['choices'] = ids
dic_list_push(gdic, 'dialog_type', 'sel_cam')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
if dlg_ret != 0:
return False
else:
pdic['camera_id'] = ''
if 'open_dialog' in gdic.get('flags', []) and msg_box:
dic_list_push(gdic, 'dialog_type', 'open')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
if dlg_ret != 0:
return False
self.update_func(pdic, gdic, prm)
s = ''
vars = []
for var in prm.get('vars'):
cmd_param = var.get('cmd_param')
if cmd_param:
vars.append(var)
for var in vars[:]: # copy
cmd_param = var.get('cmd_param')
if cmd_param.get('tail'):
vars.remove(var)
vars.append(var)
for var in vars[:]: # copy
name = var.get('name')
flags = gdic.get(name, {}).get('flags', [])
if 'hide' in flags or 'disable' in flags:
vars.remove(var)
for var in vars:
cmd_param = var.get('cmd_param')
name = var.get('name')
v = pdic.get(name)
if (v is None or v == '') and 'default' in cmd_param:
v = cmd_param.get('default')
if dic_eval_if_str(self, cmd_param, 'must') and (v is None or v == ''):
print 'cmd_param', name, 'is required'
if msg_box:
wx.MessageBox('cmd_param ' + name + ' is required')
return False
if dic_eval_if_str(self, cmd_param, 'only_enable') and not v:
continue
if dic_eval_if_str(self, cmd_param, 'only_disable') and v:
continue
name = cmd_param.get('var_name', name)
unpack = cmd_param.get('unpack')
if unpack is not None:
v = ' '.join( v.split(unpack) )
add = ''
dash = cmd_param.get('dash')
if dash is not None:
add += dash + name
delim = cmd_param.get('delim')
if delim is not None:
str_v = str(v)
if var.get('kind') is None:
str_v = adjust_num_str(str_v)
if var.get('kind') == 'path':
str_v = path_expand_cmd(str_v)
str_v = os.path.expandvars(os.path.expanduser(str_v))
relpath_from = var.get('relpath_from')
if relpath_from:
relpath_from = path_expand_cmd(relpath_from)
relpath_from = os.path.expandvars(os.path.expanduser(relpath_from))
str_v = os.path.relpath(str_v, relpath_from)
add += delim + str_v
if add != '':
s += add + ' '
return s.strip(' ').split(' ') if s != '' else None
def obj_to_pdic_gdic_prm(self, obj, sys=False):
info = self.config_dic.get(obj)
if info is None:
sys_prm = self.get_param('sys')
prm_chk = lambda prm : prm is sys_prm if sys else prm is not sys_prm
info = next( ( v for v in self.config_dic.values() if v.get('obj') is obj and prm_chk(v.get('param')) ), None)
if info is None:
return (None, None, None)
pdic = info.get('pdic')
prm = info.get('param')
gdic = info.get('gdic')
return (pdic, gdic, prm)
def obj_to_gdic(self, obj, def_ret=None):
(_, gdic, _) = self.obj_to_pdic_gdic_prm(obj) if obj else (None, None, None)
return gdic if gdic else def_ret
def cfg_obj_dic(self, arg_dic, sys=False, def_ret=(None,{})):
sys_prm = self.get_param('sys')
prm_chk = {
True : (lambda prm : prm is sys_prm),
False : (lambda prm : prm is not sys_prm),
None : (lambda prm : True) }.get(sys)
arg_dic_chk = lambda dic: all( [ dic.get(k) == v for (k,v) in arg_dic.items() ] )
return next( ( (cfg_obj, dic) for (cfg_obj, dic) in self.config_dic.items() \
if arg_dic_chk(dic) and prm_chk(dic.get('param')) ), def_ret)
def cfg_dic(self, arg_dic, sys=False, def_ret={}):
(_, dic) = self.cfg_obj_dic(arg_dic, sys=sys, def_ret=(None, def_ret))
return dic
def cfg_prm_to_obj(self, arg_dic, sys=False):
return self.cfg_dic(arg_dic, sys=sys).get('obj')
def name_to_pdic_gdic_prm(self, name, sys=False):
d = self.cfg_dic( {'name':name}, sys=sys )
return ( d.get('pdic'), d.get('gdic'), d.get('param') )
def update_func(self, pdic, gdic, prm):
for var in prm.get('vars', []):
name = var.get('name')
gdic_v = gdic.get(name, {})
func = gdic_v.get('func')
if func is None and name in pdic:
continue
v = var.get('v')
if func is not None:
v = eval(func) if type(func) is str else func()
pdic[ name ] = v
hook = gdic_v.get('update_hook')
if hook:
hook(v)
hook_var = gdic_v.get('hook_var', {})
every_time = 'every_time' in hook_var.get('flags', [])
if var == gdic.get('update_func_arg_var') or every_time:
hook = hook_var.get('hook')
if hook:
hook(hook_var.get('args', {}))
if 'pub' in prm:
self.publish_param_topic(pdic, prm)
self.rosparam_set(pdic, prm)
self.update_depend_enable(pdic, gdic, prm)
d = self.cfg_dic( {'pdic':pdic, 'gdic':gdic, 'param':prm}, sys=True )
self.update_proc_cpu(d.get('obj'), d.get('pdic'), d.get('param'))
def update_proc_cpu(self, obj, pdic=None, prm=None):
if obj is None or not obj.GetValue():
return
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(obj)
if proc is None:
return
if pdic is None or prm is None:
(pdic, _, prm) = self.obj_to_pdic_gdic_prm(obj, sys=True)
cpu_chks = self.param_value_get(pdic, prm, 'cpu_chks')
cpu_chks = cpu_chks if cpu_chks else [ True for i in range(get_cpu_count()) ]
cpus = [ i for i in range(get_cpu_count()) if cpu_chks[i] ]
nice = self.param_value_get(pdic, prm, 'nice', 0)
d = { 'OTHER':SCHED_OTHER, 'FIFO':SCHED_FIFO, 'RR':SCHED_RR }
policy = SCHED_OTHER
priority = 0
if self.param_value_get(pdic, prm, 'real_time', False):
policy = d.get(self.param_value_get(pdic, prm, 'policy', 'FIFO'), SCHED_FIFO)
priority = self.param_value_get(pdic, prm, 'prio', 0)
procs = [ proc ] + get_proc_children(proc, r=True)
for proc in procs:
print 'pid={}'.format(proc.pid)
if get_proc_nice(proc) != nice:
print 'nice {} -> {}'.format(get_proc_nice(proc), nice)
if set_process_nice(proc, nice) is False:
print 'Err set_process_nice()'
if get_proc_cpu_affinity(proc) != cpus:
print 'cpus {} -> {}'.format(get_proc_cpu_affinity(proc), cpus)
if set_process_cpu_affinity(proc, cpus) is False:
print 'Err set_process_cpu_affinity()'
policy_str = next( (k for (k,v) in d.items() if v == policy), '?')
print 'sched policy={} prio={}'.format(policy_str, priority)
if set_scheduling_policy(proc, policy, priority) is False:
print 'Err scheduling_policy()'
def param_value_get(self, pdic, prm, name, def_ret=None):
def_ret = self.param_default_value_get(prm, name, def_ret)
return pdic.get(name, def_ret) if pdic else def_ret
def param_default_value_get(self, prm, name, def_ret=None):
return next( (var.get('v') for var in prm.get('vars') if var.get('name') == name ), def_ret) \
if prm else def_ret
def update_depend_enable(self, pdic, gdic, prm):
for var in prm.get('vars', []):
name = var.get('name')
gdic_v = gdic.get(name, {})
depend = gdic_v.get('depend')
if depend is None:
continue
vp = gdic_v.get('var')
if vp is None:
continue
v = pdic.get(depend)
if v is None:
continue
depend_bool = eval( gdic_v.get('depend_bool', 'lambda v : bool(v)') )
v = depend_bool(v)
enables_set(vp, 'depend', v)
def publish_param_topic(self, pdic, prm):
pub = prm['pub']
klass_msg = globals()[ prm['msg'] ]
msg = klass_msg()
for (name, v) in pdic.items():
if prm.get('topic') == '/twist_cmd' and name == 'twist.angular.z':
v = -v
(obj, attr) = msg_path_to_obj_attr(msg, name)
if obj and attr in obj.__slots__:
type_str = obj._slot_types[ obj.__slots__.index(attr) ]
setattr(obj, attr, str_to_rosval(v, type_str, v))
if 'stamp' in prm.get('flags', []):
(obj, attr) = msg_path_to_obj_attr(msg, 'header.stamp')
setattr(obj, attr, rospy.get_rostime())
pub.publish(msg)
def rosparam_set(self, pdic, prm):
rosparams = None
for var in prm.get('vars', []):
name = var['name']
if 'rosparam' not in var or name not in pdic:
continue
rosparam = var['rosparam']
v = pdic.get(name)
v = str(v)
cvdic = { 'True':'true', 'False':'false' }
if v in cvdic:
v = cvdic.get(v)
if rosparams is None:
cmd = [ 'rosparam', 'list' ]
rosparams = subprocess.check_output(cmd).strip().split('\n')
nm = rosparam
nm = ('/' if len(nm) > 0 and nm[0] != '/' else '') + nm
exist = nm in rosparams
if exist:
cmd = [ 'rosparam', 'get', rosparam ]
ov = subprocess.check_output(cmd).strip()
if ov == v:
continue
elif v == '':
continue
cmd = [ 'rosparam', 'set', rosparam, v ] if v != '' else [ 'rosparam', 'delete', rosparam ]
print(cmd)
subprocess.call(cmd)
#
# Sensing Tab
#
def OnSensingDriver(self, event):
self.OnChecked_obj(event.GetEventObject())
def OnRosbagRecord(self, event):
self.dlg_rosbag_record.show()
obj = event.GetEventObject()
set_val(obj, False)
def create_checkboxes(self, dic, panel, sizer, probe_dic, run_dic, bind_handler):
if 'name' not in dic:
return
obj = None
bdr_flg = wx.ALL
if 'subs' in dic:
lst = []
for d in dic['subs']:
self.create_checkboxes(d, panel, lst, probe_dic, run_dic, bind_handler)
if dic['name']:
obj = static_box_sizer(panel, dic.get('name'))
set_tooltip(obj.GetStaticBox(), dic)
else:
obj = wx.BoxSizer(wx.VERTICAL)
for (o, flg) in lst:
obj.Add(o, 0, wx.EXPAND | flg, 4)
else:
obj = wx.CheckBox(panel, wx.ID_ANY, dic['name'])
set_tooltip(obj, dic)
self.Bind(wx.EVT_CHECKBOX, bind_handler, obj)
bdr_flg = wx.LEFT | wx.RIGHT
if 'probe' in dic:
probe_dic[obj] = (dic['probe'], None)
if 'run' in dic:
run_dic[obj] = (dic['run'], None)
if 'param' in dic:
obj = self.add_config_link(dic, panel, obj)
else:
gdic = self.gdic_get_1st(dic)
self.add_cfg_info(obj, obj, dic.get('name'), None, gdic, False, None)
if sizer is not None:
sizer.append((obj, bdr_flg))
else:
panel.SetSizer(obj)
def add_config_link(self, dic, panel, obj):
cfg_obj = wx.HyperlinkCtrl(panel, wx.ID_ANY, '[config]', '')
fix_link_color(cfg_obj)
self.Bind(wx.EVT_HYPERLINK, self.OnConfig, cfg_obj)
add_objs = (obj, wx.StaticText(panel, wx.ID_ANY, ' '), cfg_obj)
hszr = sizer_wrap(add_objs, wx.HORIZONTAL)
name = dic['name']
pdic = self.load_dic_pdic_setup(name, dic)
gdic = self.gdic_get_1st(dic)
prm = self.get_param(dic.get('param'))
self.add_cfg_info(cfg_obj, obj, name, pdic, gdic, True, prm)
return hszr
def camera_ids(self):
if self.button_synchronization.GetValue():
return []
cmd = "rostopic list | sed -n 's|/image_raw||p' | sed 's/^$/\//'"
return subprocess.check_output(cmd, shell=True).strip().split()
def cam_id_to_obj(self, cam_id, v):
cam_id_obj = self.cfg_prm_to_obj( {'name':cam_id} )
if cam_id_obj is None:
cam_id_obj = StrValObj(cam_id, v)
cam_id_obj.SetValue(v)
return cam_id_obj
def camera_id_hook(self, args):
new_id = args.get('pdic', {}).get('camera_id', '')
ids = args.get('ids', [])
if new_id not in ids:
return
idx = ids.index(new_id)
pp = args.get('param_panel')
if pp:
pp.detach_func()
dlg = args.get('dlg')
if dlg:
dlg.EndModal(idx + 100)
def OnCalibrationPublisher(self, event):
obj = event.GetEventObject()
(_, gdic_org, prm) = self.obj_to_pdic_gdic_prm(obj)
if obj.GetValue():
gdic_org['ids'] = self.camera_ids()
ids = gdic_org.get('ids', [])
if ids == []:
self.OnLaunchKill(event)
return
#
# setup
#
(cmd_dic, cmd, _) = self.obj_to_cmd_dic_cmd_proc(obj)
flags = gdic_org.get('flags', [])[:] # copy
if 'open_dialog' in flags:
flags.remove('open_dialog')
pdic_baks = {}
for cam_id in ids:
(pdic_a, gdic_a, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic = pdic_a if pdic_a else self.load_dic_pdic_setup(cam_id, {})
pdic_baks[cam_id] = pdic.copy()
gdic = gdic_a if gdic_a else gdic_org.copy()
gdic['flags'] = flags
cam_id_obj = self.cam_id_to_obj(cam_id, obj.GetValue())
if not hasattr(cam_id_obj, 'enables_proxy'):
cam_id_obj.enables_proxy = (obj, cam_id_obj.s)
if not pdic_a or not gdic_a:
self.add_cfg_info(cam_id_obj, cam_id_obj, cam_id, pdic, gdic, False, prm)
if not cam_id_obj in cmd_dic:
cmd_dic[ cam_id_obj ] = (cmd, None)
var = self.get_var(prm, 'camera_id', {})
var['choices'] = ids
#
# Dialog
#
cam_id = ids[0]
while obj.GetValue():
(pdic, gdic, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic['camera_id'] = cam_id
dic_list_push(gdic, 'dialog_type', 'open2')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
gdic_v = dic_getset(gdic, 'camera_id', {})
args = { 'pdic':pdic, 'ids':ids, 'param_panel':gdic.get('param_panel'), 'dlg':dlg }
gdic_v['hook_var'] = { 'hook':self.camera_id_hook, 'args':args }
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
pdic['camera_id'] = cam_id # restore
if dlg_ret == 0: # OK
break
idx = dlg_ret - 100
if idx < 0 or len(ids) <= idx: # Cancel
for cam_id in ids:
(pdic, _, _) = self.name_to_pdic_gdic_prm(cam_id)
pdic.update(pdic_baks.get(cam_id))
set_val(obj, False)
return
# Menu changed
cam_id = ids[idx]
#
# Launch / Kill
#
for cam_id in ids:
cam_id_obj = self.cfg_prm_to_obj( {'name':cam_id} )
(pdic, _, _) = self.obj_to_pdic_gdic_prm(cam_id_obj)
pdic['solo_camera'] = False
#print '@', cam_id, cam_id_obj.GetValue()
self.OnLaunchKill_obj(cam_id_obj)
#
# Simulation Tab
#
def rosbag_info_hook(self, v):
if not v:
return
th_start(self.rosbag_info_hook_th, {'v':v} )
def rosbag_info_hook_th(self, ev, v): # thread
err = subprocess.STDOUT
s = subprocess.check_output([ 'rosbag', 'info', v ], stderr=err).strip()
wx.CallAfter(self.label_rosbag_info.SetLabel, s)
wx.CallAfter(self.label_rosbag_info.GetParent().FitInside)
#
# Data Tab
#
#
# Stauts tab
#
def info_col(self, v, v_yellow, v_red, col_normal, col_red):
if v < v_yellow:
return col_normal
if v < v_red:
(nr,ng,nb) = col_normal
(rr,rg,rb) = col_red
return ( (nr+rr)/2, (ng+rg)/2, (nb+rb)/2 )
return col_red
def mem_kb_info(self):
lines = subprocess.check_output('cat /proc/meminfo', shell=True).strip().split(os.linesep)
cvt = lambda (k, v): ( k.replace(':', ''), int(v) )
d = dict( map( lambda s: cvt( filter( lambda s: s!='kB', s.split() ) ), lines ) )
total = d.get('MemTotal')
free = d.get('MemFree') + d.get('Buffers') + d.get('Cached')
return (total, total - free)
def toprc_create(self):
(child_pid, fd) = pty.fork()
if child_pid == 0: # child
os.execvp('top', ['top'])
else: #parent
sec = 0.2
for s in ['1', 'c', 'W', 'q']:
time.sleep(sec)
os.write(fd, s)
def toprc_setup(self, toprc, backup):
if os.path.exists(toprc):
os.rename(toprc, backup)
self.toprc_create()
def toprc_restore(self, toprc, backup):
os.remove(toprc)
if os.path.exists(backup):
os.rename(backup, toprc)
# top command thread
def top_cmd_th(self, ev, setting, cpu_ibls, mem_ibl, toprc, backup):
interval = setting.get('interval', 3)
alert_level = setting.get('alert_level', {})
rate_per_cpu = alert_level.get('rate_per_cpu', 80)
rate_per_cpu_yellow = alert_level.get('rate_per_cpu_yellow', 80)
rate_cpu = alert_level.get('rate_cpu', 80)
rate_mem = alert_level.get('rate_mem', 80)
rate_mem_yellow = alert_level.get('rate_mem_yellow', 80)
for ibl in cpu_ibls:
ibl.lmt_bar_prg = rate_per_cpu
mem_ibl.lmt_bar_prg = rate_mem
alerted = False
cpu_n = get_cpu_count()
while not ev.wait(interval):
s = subprocess.check_output(['sh', '-c', 'env COLUMNS=512 top -b -n 2 -d 0.1']).strip()
i = s.rfind('\ntop -') + 1
s = s[i:]
wx.CallAfter(self.label_top_cmd.SetLabel, s)
wx.CallAfter(self.label_top_cmd.GetParent().FitInside)
k = '%Cpu'
fv_sum = 0
i = 0
for t in s.split('\n'):
if t[:len(k)] != k:
continue
lst = t[1:].split()
v = lst[1] if lst[1] != ':' else lst[2]
if v[0] == ':':
v = v[1:]
fv = str_to_float(v)
col = self.info_col(fv, rate_per_cpu_yellow, rate_per_cpu, (64,64,64), (200,0,0))
if i < cpu_n:
ibl = cpu_ibls[i]
wx.CallAfter(ibl.lb_set, v+'%', col)
wx.CallAfter(ibl.bar_set, int(fv))
fv_sum += fv
i += 1
k = 'KiB Mem:'
(total, used) = self.mem_kb_info()
rate = 100 * used / total
for u in [ 'KB', 'MB', 'GB', 'TB' ]:
if total <= 10 * 1024 or used <= 10:
break
total /= 1024
used /= 1024
col = self.info_col(rate, rate_mem_yellow, rate_mem, (64,64,64), (200,0,0))
tx = str(used) + u + '/' + str(total) + u + '(' + str(rate) + '%)'
wx.CallAfter(mem_ibl.lb_set, tx, col)
wx.CallAfter(mem_ibl.bar_set, rate)
is_alert = (fv_sum >= rate_cpu * cpu_n) or rate >= rate_mem
# --> for test
if os.path.exists('/tmp/alert_test_on'):
is_alert = True
if os.path.exists('/tmp/alert_test_off'):
is_alert = False
# <-- for test
if is_alert and not alerted:
thinf = th_start(self.alert_th, {'bgcol':(200,50,50)})
alerted = True
if not is_alert and alerted:
th_end(thinf)
alerted = False
# top5
i = s.find('\n\n') + 2
lst = s[i:].split('\n')
hd = lst[0]
top5 = lst[1:1+5]
i = hd.rfind('COMMAND')
cmds = [ line[i:].split(' ')[0] for line in top5 ]
i = hd.find('%CPU')
loads = [ line[i-1:].strip().split(' ')[0] for line in top5 ]
for (lb, cmd, load) in zip(self.lb_top5, cmds, loads):
col = self.info_col(str_to_float(load), rate_per_cpu_yellow, rate_per_cpu, (64,64,64), (200,0,0))
wx.CallAfter(lb.SetForegroundColour, col)
wx.CallAfter(lb.SetLabel, cmd + ' (' + load + ' %CPU)')
self.toprc_restore(toprc, backup)
def alert_th(self, bgcol, ev):
wx.CallAfter(self.RequestUserAttention)
c = bgcol
o = wx.NullColour
while not ev.wait(0.5):
for col in [ c, o, c, o, c, o ]:
wx.CallAfter(self.set_bg_all_tabs, col)
time.sleep(0.05)
def log_th(self, file, que, ev):
while not ev.wait(0):
s = file.readline()
if not s:
break
que.put(s)
def logout_th(self, que, interval, tc, ev):
if que == self.log_que_stdout or que == self.log_que_stderr:
while not ev.wait(0):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
self.log_que.put(s)
if interval <= 0:
continue
ckbox = self.checkbox_stdout if que == self.log_que_stdout else self.checkbox_stderr
if ckbox.GetValue():
self.log_que_show.put( cut_esc(s) )
else: # == self.log_que
f = None
path = self.status_dic.get('log_path')
is_syslog = (path == 'syslog')
if is_syslog:
ident = sys.argv[0].split('/')[-1]
syslog.openlog(ident, syslog.LOG_PID | syslog.LOG_CONS)
elif path:
path = os.path.expandvars(os.path.expanduser(path))
f = open(path, 'a') if path else None
while not ev.wait(0):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
print s.strip()
sys.stdout.flush()
s = cut_esc(s)
if is_syslog:
syslog.syslog(s)
elif f:
f.write(s)
f.flush()
if is_syslog:
syslog.closelog()
if f:
f.close()
def logshow_th(self, que, interval, tc, ev):
while not ev.wait(interval):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
wx.CallAfter(append_tc_limit, tc, s)
# que clear
if self.checkbox_stdout.GetValue() is False and \
self.checkbox_stderr.GetValue() is False and \
que.qsize() > 0:
que_clear(que)
wx.CallAfter(tc.Clear)
#
# for Topics tab
#
def OnRefreshTopics(self, event):
self.refresh_topics_list()
def refresh_topics_list(self):
lst = subprocess.check_output([ 'rostopic', 'list' ]).strip().split('\n')
panel = self.panel_topics_list
szr = self.sizer_topics_list
for obj in self.topics_list:
szr.Remove(obj)
obj.Destroy()
self.topics_list = []
for topic in lst:
obj = wx.HyperlinkCtrl(panel, wx.ID_ANY, topic, '')
self.Bind(wx.EVT_HYPERLINK, self.OnTopicLink, obj)
szr.Add(obj, 0, wx.LEFT, 4)
fix_link_color(obj)
self.topics_list.append(obj)
szr.Layout()
panel.SetVirtualSize(szr.GetMinSize())
# info clear
lb = self.label_topics_info
lb.SetLabel('')
# echo clear
self.topics_proc_th_end()
# wait que clear
while self.topics_echo_que.qsize() > 0:
time.sleep(0.1)
tc = self.text_ctrl_topics_echo
tc.Enable(False)
wx.CallAfter(tc.Clear)
wx.CallAfter(tc.Enable, True)
self.topics_echo_sum = 0
self.topic_echo_curr_topic = None
def OnEcho(self, event):
if self.checkbox_topics_echo.GetValue() and self.topic_echo_curr_topic:
self.topics_proc_th_start(self.topic_echo_curr_topic)
else:
self.topics_proc_th_end()
def OnTopicLink(self, event):
obj = event.GetEventObject()
topic = obj.GetLabel()
self.topic_echo_curr_topic = topic
# info
info = subprocess.check_output([ 'rostopic', 'info', topic ]).strip()
lb = self.label_topics_info
lb.SetLabel(info)
lb.GetParent().FitInside()
# echo
self.topics_proc_th_end()
if self.checkbox_topics_echo.GetValue():
self.topics_proc_th_start(topic)
def topics_proc_th_start(self, topic):
out = subprocess.PIPE
err = subprocess.STDOUT
self.topics_echo_proc = psutil.Popen([ 'rostopic', 'echo', topic ], stdout=out, stderr=err)
self.topics_echo_thinf = th_start(self.topics_echo_th)
def topics_proc_th_end(self):
thinf = self.topics_echo_thinf
if thinf:
th_end(thinf)
self.topics_echo_thinf = None
proc = self.topics_echo_proc
if proc:
terminate_children(proc)
terminate(proc)
#proc.wait()
self.topics_echo_proc = None
def topics_echo_th(self, ev):
if not self.topics_echo_proc:
return
file = self.topics_echo_proc.stdout
fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL)
fcntl.fcntl(file.fileno(), fcntl.F_SETFL, fl | os.O_NONBLOCK)
while not ev.wait(0):
try:
s = file.read(1)
except:
continue
if not s:
break
if self.checkbox_topics_echo.GetValue():
self.topics_echo_que.put(s)
que_clear(self.topics_echo_que)
def topics_echo_show_th(self, ev):
que = self.topics_echo_que
interval = self.topics_dic.get('gui_update_interval_ms', 100) * 0.001
chars_limit = self.topics_dic.get('gui_chars_limit', 10000)
tc = self.text_ctrl_topics_echo
while not ev.wait(interval):
qsz = que.qsize()
if qsz <= 0:
continue
if qsz > chars_limit:
over = qsz - chars_limit
for i in range(over):
try:
que.get(timeout=1)
except Queue.Empty:
break
qsz = chars_limit
arr = []
for i in range(qsz):
try:
s = que.get(timeout=1)
except Queue.Empty:
s = ''
arr.append(s)
s = ''.join(arr)
self.topics_echo_sum += len(s)
rm_chars = 0
if self.topics_echo_sum > chars_limit:
rm_chars = self.topics_echo_sum - chars_limit
self.topics_echo_sum = chars_limit
if self.checkbox_topics_echo.GetValue():
wx.CallAfter(append_tc_limit, tc, s, rm_chars)
#
# State Tabs
#
def getStateId(self, s_text):
if(self.mainstate_dic.has_key(s_text)):
return self.mainstate_dic[s_text]
elif(self.substate_dic.has_key(s_text)):
return self.substate_dic[s_text]
else :
return -99
def OnState(self, event):
pub = rospy.Publisher('state_cmd', std_msgs.msg.Int32, queue_size=10)
msg = std_msgs.msg.Int32()
clicked_event = event.GetEventObject()
msg.data = self.getStateId(clicked_event.GetLabel())
pub.publish(msg)
#
# Common Utils
#
def set_param_panel(self, obj, parent):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
panel = ParamPanel(parent, frame=self, pdic=pdic, gdic=gdic, prm=prm)
sizer_wrap((panel,), wx.VERTICAL, 0, wx.EXPAND, 0, parent)
k = 'ext_toggle_enables'
gdic[ k ] = gdic.get(k, []) + [ panel ]
def obj_to_varpanel(self, obj, var_name):
gdic = self.obj_to_gdic(obj, {})
return gdic.get(var_name, {}).get('var')
def obj_to_varpanel_tc(self, obj, var_name):
vp = self.obj_to_varpanel(obj, var_name)
return vp.tc if vp and vp.tc else None
def OnConfig(self, event):
self.OnHyperlinked_obj(event.GetEventObject())
def add_params(self, params):
for prm in params:
if 'topic' in prm and 'msg' in prm:
klass_msg = globals()[ prm['msg'] ]
prm['pub'] = rospy.Publisher(prm['topic'], klass_msg, latch=True, queue_size=10)
self.params += params
def gdic_get_1st(self, dic):
gdic = dic.get('gui', {})
gdic['update_func'] = self.update_func
return gdic
def add_cfg_info(self, cfg_obj, obj, name, pdic, gdic, run_disable, prm):
self.config_dic[ cfg_obj ] = { 'obj':obj , 'name':name , 'pdic':pdic , 'gdic':gdic,
'run_disable':run_disable , 'param':prm }
def get_param(self, prm_name):
return next( (prm for prm in self.params if prm['name'] == prm_name), None)
def get_var(self, prm, var_name, def_ret=None):
return next( (var for var in prm.get('vars') if var.get('name') == var_name), def_ret)
def obj_to_cmd_dic(self, obj):
return next( (cmd_dic for cmd_dic in self.all_cmd_dics if obj in cmd_dic), None)
def obj_to_cmd_dic_cmd_proc(self, obj):
cmd_dic = self.obj_to_cmd_dic(obj)
if cmd_dic is None:
return (None, None, None)
(cmd, proc) = cmd_dic.get(obj, (None, None))
return (cmd_dic, cmd, proc)
def OnLaunchKill(self, event):
self.OnLaunchKill_obj(event.GetEventObject())
def OnLaunchKill_obj(self, obj):
self.alias_sync(obj)
obj = self.alias_grp_top_obj(obj)
v = obj.GetValue()
add_args = self.obj_to_add_args(obj, msg_box=v) # no open dialog at kill
if add_args is False:
set_val(obj, not v)
return
(cmd_dic, _, proc_bak) = self.obj_to_cmd_dic_cmd_proc(obj)
self.launch_kill_proc(obj, cmd_dic, add_args=add_args)
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(obj)
if proc != proc_bak:
self.toggle_enable_obj(obj)
if proc:
self.update_proc_cpu(obj)
def OnRosbagPlay(self, event):
obj = event.GetEventObject()
play = self.button_play_rosbag_play
stop = self.button_stop_rosbag_play
pause = self.button_pause_rosbag_play
(_, _, prm) = self.obj_to_pdic_gdic_prm(play)
var = self.get_var(prm, 'sim_time', {})
if obj == play:
var['v'] = True
self.OnLaunchKill_obj(play)
button_color_change(play)
set_val(stop, False)
set_val(pause, False)
elif obj == stop:
set_val(stop, True)
set_val(play, False)
set_val(pause, False)
var['v'] = False
self.OnLaunchKill_obj(play)
button_color_change(stop)
elif obj == pause:
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(play)
if proc:
proc.stdin.write(' ')
def OnFtrace(self, event):
obj = event.GetEventObject()
cmd = 'rosrun runtime_manager ftrace.py'
v = obj.GetValue()
self.ftrace_proc_ = self.launch_kill(v, cmd,
None if v else self.ftrace_proc_, obj=obj)
def stdout_file_search(self, file, k):
s = ''
while True:
c = file.read(1)
if not c:
return None
if c != '\r' and c != '\n':
s += c
continue
s = s.strip()
if k in s:
break
s = ''
i = s.find(k) + len(k)
return s[i:]
# thread
def point_cloud_progress_bar(self, file, ev):
obj = self.button_point_cloud
(pdic, _, _) = self.obj_to_pdic_gdic_prm(obj)
n = len(pdic.get('path_pcd', '').split(','))
if n == 0:
return
i = 0
while not ev.wait(0):
s = self.stdout_file_search(file, 'load ')
if not s:
break
err_key = 'failed '
if s[:len(err_key)] != err_key:
i += 1
else:
i -= 1
print s
wx.CallAfter(self.label_point_cloud_bar.set, 100 * i / n)
wx.CallAfter(self.label_point_cloud_bar.clear)
# thread
def rosbag_play_progress_bar(self, file, ev):
while not ev.wait(0):
s = self.stdout_file_search(file, 'Duration:')
if not s:
break
lst = s.split()
pos = str_to_float(lst[0])
# lst[1] is '/'
total = str_to_float(lst[2])
if total == 0:
continue
prg = int(100 * pos / total + 0.5)
pos = str(int(pos))
total = str(int(total))
wx.CallAfter(self.label_rosbag_play_bar.set, prg)
wx.CallAfter(self.label_rosbag_play_pos.SetLabel, pos)
wx.CallAfter(self.label_rosbag_play_total.SetLabel, total)
wx.CallAfter(self.label_rosbag_play_bar.clear)
wx.CallAfter(self.label_rosbag_play_pos.SetLabel, '')
wx.CallAfter(self.label_rosbag_play_total.SetLabel, '')
def alias_sync(self, obj, v=None):
en = None
if getattr(obj, 'IsEnabled', None):
(key, en) = enables_get_last(obj)
if not key:
en = obj.IsEnabled()
grp = self.alias_grp_get(obj)
if getattr(obj, 'GetValue', None):
v = obj.GetValue()
for o in grp:
if o is obj:
continue
if en is not None and o.IsEnabled() != en and not self.is_toggle_button(o):
if key:
enable_set(o, key, en)
else:
o.Enable(en)
if v is not None and getattr(o, 'SetValue', None):
set_val(o, v)
if getattr(o, 'SetInsertionPointEnd', None):
o.SetInsertionPointEnd()
def alias_grp_top_obj(self, obj):
return get_top(self.alias_grp_get(obj), obj)
def alias_grp_get(self, obj):
return next( (grp for grp in self.alias_grps if obj in grp), [])
def create_tree(self, parent, items, tree, item, cmd_dic):
name = items.get('name', '')
if tree is None:
style = wx.TR_HAS_BUTTONS | wx.TR_NO_LINES | wx.TR_HIDE_ROOT | wx.TR_DEFAULT_STYLE | wx.SUNKEN_BORDER
tree = CT.CustomTreeCtrl(parent, wx.ID_ANY, agwStyle=style)
# for disable wrong scrolling at checked
tree.AcceptsFocus = MethodType(lambda self: False, tree, CT.CustomTreeCtrl)
item = tree.AddRoot(name, data=tree)
tree.Bind(wx.EVT_MOTION, self.OnTreeMotion)
else:
ct_type = 1 if 'cmd' in items else 0 # 1:checkbox type
item = tree.AppendItem(item, name, ct_type=ct_type)
if 'desc' in items:
item.SetData(items.get('desc'))
if 'cmd' in items:
cmd_dic[item] = (items['cmd'], None)
pdic = self.load_dic_pdic_setup(name, items)
pnl = wx.Panel(tree, wx.ID_ANY)
add_objs = []
self.new_link(item, name, pdic, self.sys_gdic, pnl, 'sys', 'sys', add_objs)
gdic = self.gdic_get_1st(items)
if 'param' in items:
self.new_link(item, name, pdic, gdic, pnl, 'app', items.get('param'), add_objs)
else:
self.add_cfg_info(item, item, name, None, gdic, False, None)
szr = sizer_wrap(add_objs, wx.HORIZONTAL, flag=wx.ALIGN_CENTER_VERTICAL, parent=pnl)
szr.Fit(pnl)
tree.SetItemWindow(item, pnl)
for sub in items.get('subs', []):
self.create_tree(parent, sub, tree, item, cmd_dic)
return tree
def new_link(self, item, name, pdic, gdic, pnl, link_str, prm_name, add_objs):
lkc = None
if 'no_link' not in gdic.get('flags', []):
lkc = wx.HyperlinkCtrl(pnl, wx.ID_ANY, link_str, "")
if hasattr(lkc, 'SetCanFocus'):
lkc.SetCanFocus(False)
fix_link_color(lkc)
self.Bind(wx.EVT_HYPERLINK, self.OnHyperlinked, lkc)
if len(add_objs) > 0:
add_objs += [ wx.StaticText(pnl, wx.ID_ANY, ' ') ]
add_objs += [ wx.StaticText(pnl, wx.ID_ANY, '['), lkc, wx.StaticText(pnl, wx.ID_ANY, ']') ]
prm = self.get_param(prm_name)
self.add_cfg_info(lkc if lkc else item, item, name, pdic, gdic, False, prm)
def load_dic_pdic_setup(self, name, dic):
name = dic.get('share_val', dic.get('name', name))
pdic = self.load_dic.get(name, {})
self.load_dic[ name ] = pdic
return pdic
def launch_kill_proc(self, obj, cmd_dic, add_args=None):
if obj not in cmd_dic:
set_val(obj, False)
print('not implemented.')
return
v = obj.GetValue()
(cmd, proc) = cmd_dic[obj]
if not cmd:
set_val(obj, False)
proc = self.launch_kill(v, cmd, proc, add_args, obj=obj)
(cfg_obj, dic) = self.cfg_obj_dic( {'obj':obj} )
if cfg_obj and dic.get('run_disable'):
cfg_obj.Enable(not v)
cmd_dic[obj] = (cmd, proc)
if not v:
self.stat_label_off(obj)
def proc_to_cmd_dic_obj(self, proc):
for cmd_dic in self.all_cmd_dics:
obj = next( (obj for (obj, v) in cmd_dic.items() if proc in v), None)
if obj:
return (cmd_dic, obj)
return (None, None)
def launch_kill(self, v, cmd, proc, add_args=None, sigint=None, obj=None, kill_children=None):
msg = None
msg = 'already launched.' if v and proc else msg
msg = 'already terminated.' if not v and proc is None else msg
msg = 'cmd not implemented.' if not cmd else msg
if msg is not None:
print(msg)
return proc
if v:
args = shlex.split(cmd)
if add_args:
args += add_args
print(args) # for debug
f = self.obj_to_gdic(obj, {}).get('stdout_func')
f = eval_if_str(self, f)
f = f if f else self.log_th
out = subprocess.PIPE if f else None
err = subprocess.STDOUT if f else None
if f == self.log_th:
err = subprocess.PIPE
shell = ( len(args) > 0 and args[0] == 'do_shell_exec' )
if shell:
args = ' '.join( args[1:] )
proc = psutil.Popen(args, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=shell)
self.all_procs.append(proc)
if f == self.log_th:
thinf = th_start(f, {'file':proc.stdout, 'que':self.log_que_stdout})
self.all_th_infs.append(thinf)
thinf = th_start(f, {'file':proc.stderr, 'que':self.log_que_stderr})
self.all_th_infs.append(thinf)
elif f:
thinf = th_start(f, {'file':proc.stdout})
self.all_th_infs.append(thinf)
else:
flags = self.obj_to_gdic(obj, {}).get('flags', [])
if sigint is None:
sigint = 'SIGTERM' not in flags
if kill_children is None:
kill_children = 'kill_children' in flags
if kill_children:
terminate_children(proc, sigint)
terminate(proc, sigint)
enables_set(obj, 'proc_wait', False)
th_start( proc_wait_thread, {'proc': proc, 'obj': obj} )
if proc in self.all_procs:
self.all_procs.remove(proc)
proc = None
return proc
def roslaunch_to_nodes(self, cmd):
try:
s = subprocess.check_output(cmd).strip()
return s.split('\n') if s != '' else []
except subprocess.CalledProcessError:
return []
def set_bg_all_tabs(self, col=wx.NullColour):
add_pnls = [
self,
self.tree_ctrl_0,
self.tree_ctrl_1,
self.tree_ctrl_data ]
for tab in self.all_tabs + add_pnls:
tab.SetBackgroundColour(col)
def get_autoware_dir(self):
dir = rtmgr_src_dir() + '../../../../../../'
return os.path.abspath(dir)
def load_yaml(self, filename, def_ret=None):
return load_yaml(filename, def_ret)
def toggle_enable_obj(self, obj):
objs = []
pfs = [ 'button_play_', 'button_stop_', 'button_pause_',
'button_ref_', 'text_ctrl_' ]
key = self.obj_key_get(obj, pfs)
if key:
objs += self.key_objs_get(pfs, key)
gdic = self.obj_to_gdic(obj, {})
objs += [ eval_if_str(self, e) for e in gdic.get('ext_toggle_enables', []) ]
self.toggle_enables(objs)
def toggle_enables(self, objs):
for obj in objs:
if getattr(obj, 'IsEnabled', None):
en = enables_get(obj, 'toggle', obj.IsEnabled())
enables_set(obj, 'toggle', not en)
self.alias_sync(obj)
def is_toggle_button(self, obj):
return self.name_get(obj).split('_')[0] == 'button' and getattr(obj, 'GetValue', None)
def obj_name_split(self, obj, pfs):
name = self.name_get(obj)
if name is None:
return (None, None)
return next( ( ( name[:len(pf)], name[len(pf):] ) for pf in pfs if name[:len(pf)] == pf ), None)
def obj_key_get(self, obj, pfs):
name = self.name_get(obj)
if name is None:
return None
return next( (name[len(pf):] for pf in pfs if name[:len(pf)] == pf), None)
def key_objs_get(self, pfs, key):
return [ self.obj_get(pf + key) for pf in pfs if self.obj_get(pf + key) ]
def name_get(self, obj):
return next( (nm for nm in dir(self) if getattr(self, nm) is obj), None)
def name_get_cond(self, obj, cond=(lambda s : True), def_ret=None):
return next( (nm for nm in dir(self) if cond(nm) and getattr(self, nm) is obj), def_ret)
def val_get(self, name):
obj = self.obj_get(name)
if obj is None:
return None
return obj.GetValue() if getattr(obj, 'GetValue', None) else None
def obj_get(self, name):
return getattr(self, name, None)
def gdic_dialog_type_chk(gdic, name):
dlg_type = dic_list_get(gdic, 'dialog_type', 'config')
tail = '_dialog_only'
lst = [ (k, k[:-len(tail)]) for k in gdic.keys() if k[-len(tail):] == tail ]
only_chk = next( (False for (k,type) in lst if type != dlg_type and name in gdic.get(k, [])), True)
tail = '_dialog_allow'
lst = [ (k, k[:-len(tail)]) for k in gdic.keys() if k[-len(tail):] == tail ]
allow_chk = next( (False for (k,type) in lst if type == dlg_type and name not in gdic.get(k, [])), True)
return only_chk and allow_chk
def gdic_dialog_name_get(gdic):
dlg_type = dic_list_get(gdic, 'dialog_type', 'config')
return gdic.get(dlg_type + '_dialog', gdic.get('dialog', 'MyDialogParam') )
class ParamPanel(wx.Panel):
def __init__(self, *args, **kwds):
self.frame = kwds.pop('frame')
self.pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
wx.Panel.__init__(self, *args, **kwds)
self.gdic['param_panel'] = self
obj = self.frame.cfg_prm_to_obj( {'pdic':self.pdic, 'gdic':self.gdic, 'param':self.prm} )
(_, _, proc) = self.frame.obj_to_cmd_dic_cmd_proc(obj)
hszr = None
self.vps = []
self.tmp_msg = None
szr = wx.BoxSizer(wx.VERTICAL)
topic_szrs = (None, None)
vars = self.prm.get('vars')
if self.gdic.get('show_order'):
var_lst = lambda name, vars : [ var for var in vars if var.get('name') == name ]
vars = reduce( lambda lst, name : lst + var_lst(name, vars), self.gdic.get('show_order'), [] )
for var in vars:
name = var.get('name')
if not gdic_dialog_type_chk(self.gdic, name):
continue
gdic_v = self.get_gdic_v_and_chk_enable(name)
if gdic_v is None:
continue
bak_stk_push(gdic_v, 'func')
if gdic_v.get('func'):
continue
v = self.pdic.get(name, var.get('v'))
vp = VarPanel(self, var=var, v=v, update=self.update)
vp.setup_tooltip()
self.vps.append(vp)
gdic_v['var'] = vp
gdic_v['func'] = vp.get_v
prop = gdic_v.get('prop', 0)
border = gdic_v.get('border', 0)
flag = wx_flag_get(gdic_v.get('flags', []))
do_category = 'no_category' not in gdic_v.get('flags', [])
if do_category and self.in_msg(var):
bak = (szr, hszr)
(szr, hszr) = topic_szrs
if szr is None:
szr = static_box_sizer(self, 'topic : ' + self.prm.get('topic'))
bak[0].Add(szr, 0, wx.EXPAND | wx.ALL, 4)
targ_szr = szr
if vp.is_nl():
hszr = None if hszr else hszr
flag |= wx.EXPAND
else:
if hszr is None:
hszr = wx.BoxSizer(wx.HORIZONTAL)
szr.Add(hszr, 0, wx.EXPAND)
flag |= wx.ALIGN_CENTER_VERTICAL
targ_szr = hszr
if do_category and 'rosparam' in var:
rp_szr = static_box_sizer(self, 'rosparam : ' + var.get('rosparam'))
targ_szr.Add(rp_szr, 0, wx.EXPAND | wx.ALL, 4)
targ_szr = rp_szr
user_category = gdic_v.get('user_category')
if user_category is not None and hszr:
user_szr = static_box_sizer(self, user_category, orient=wx.HORIZONTAL)
(flgs, bdr) = gdic_v.get('user_category_add', [ [], 0 ])
targ_szr.Add(user_szr, 0, wx_flag_get(flgs), bdr)
targ_szr = hszr = user_szr
targ_szr.Add(vp, prop, flag, border)
if 'nl' in gdic_v.get('flags', []):
hszr = None
if do_category and self.in_msg(var):
topic_szrs = (szr, hszr)
(szr, hszr) = bak
if 'hline' in gdic_v.get('flags', []) and hszr is None:
szr.Add(wx.StaticLine(self, wx.ID_ANY), 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 4)
if not self.in_msg(var) and var.get('rosparam'):
k = 'ext_toggle_enables'
self.gdic[ k ] = self.gdic.get(k, []) + [ vp ]
enables_set(vp, 'toggle', proc is None)
if 'disable' in gdic_v.get('flags', []):
vp.Enable(False)
if 'hide' in gdic_v.get('flags', []):
vp.Hide()
self.SetSizer(szr)
if 'no_init_update' not in self.prm.get('flags', []):
self.update()
def get_gdic_v_and_chk_enable(self, var_name):
gdic_v = dic_getset(self.gdic, var_name, {})
if 'panel' in gdic_v and dic_eval_if_str(self.frame, gdic_v, 'panel') != self.GetParent():
return None
return gdic_v
def update(self, var=None):
update_func = self.gdic.get('update_func')
if update_func:
self.gdic['update_func_arg_var'] = var
update_func(self.pdic, self.gdic, self.prm)
def detach_func(self):
for var in self.prm.get('vars'):
name = var.get('name')
if not gdic_dialog_type_chk(self.gdic, name):
continue
gdic_v = self.get_gdic_v_and_chk_enable(name)
if gdic_v is None:
continue
if 'func' in gdic_v:
bak_stk_pop(gdic_v, 'func')
vp = gdic_v.get('var')
lst_remove_once(self.gdic.get('ext_toggle_enables', []), vp)
def in_msg(self, var):
if 'topic' not in self.prm or 'msg' not in self.prm:
return False
if self.tmp_msg is None:
klass_msg = globals().get( self.prm.get('msg') )
if klass_msg is None:
return False
self.tmp_msg = klass_msg()
(obj, attr) = msg_path_to_obj_attr(self.tmp_msg, var.get('name'))
return obj and attr in obj.__slots__
class VarPanel(wx.Panel):
def __init__(self, *args, **kwds):
self.var = kwds.pop('var')
v = kwds.pop('v')
self.update = kwds.pop('update')
wx.Panel.__init__(self, *args, **kwds)
self.min = self.var.get('min')
self.max = self.var.get('max')
self.has_slider = self.min is not None and self.max is not None
self.lb = None
label = self.var.get('label', '')
self.kind = self.var.get('kind')
if self.kind == 'radio_box':
choices = self.var.get('choices', [])
style = wx.RA_SPECIFY_COLS if self.var.get('choices_style') == 'h' else wx.RA_SPECIFY_ROWS
self.obj = wx.RadioBox(self, wx.ID_ANY, label, choices=choices, majorDimension=0, style=style)
self.choices_sel_set(v)
self.Bind(wx.EVT_RADIOBOX, self.OnUpdate, self.obj)
return
if self.kind == 'menu':
choices = self.var.get('choices', [])
self.obj = wx.Choice(self, wx.ID_ANY, choices=choices)
self.choices_sel_set(v)
self.Bind(wx.EVT_CHOICE, self.OnUpdate, self.obj)
if label:
self.lb = wx.StaticText(self, wx.ID_ANY, label)
flag = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
sizer_wrap((self.lb, self.obj), wx.HORIZONTAL, 0, flag, 4, self)
return
if self.kind == 'checkbox':
self.obj = wx.CheckBox(self, wx.ID_ANY, label)
self.obj.SetValue(v)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.obj)
return
if self.kind == 'checkboxes':
item_n = dic_eval_if_str(self, self.var, 'item_n', 1)
self.obj = Checkboxes(self, item_n, label)
self.obj.set(v)
for box in self.obj.boxes:
self.obj.Bind(wx.EVT_CHECKBOX, self.OnUpdate, box)
return
if self.kind == 'toggle_button':
self.obj = wx.ToggleButton(self, wx.ID_ANY, label)
set_val(self.obj, v)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnUpdate, self.obj)
button_color_hdr_setup(self.obj)
return
if self.kind == 'hide':
self.Hide()
return
if self.kind == 'topic':
topic_type = self.var.get('topic_type')
topics = self._get_topics_by_type(topic_type)
self.obj = wx.ComboBox(self, id=wx.ID_ANY, value=v, choices=topics, style=wx.CB_DROPDOWN, size=(130,-1))
self.lb = wx.StaticText(self, wx.ID_ANY, label)
flag = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
sizer_wrap((self.lb, self.obj), wx.HORIZONTAL, 0, flag, 4, self)
return
szr = wx.BoxSizer(wx.HORIZONTAL)
self.lb = wx.StaticText(self, wx.ID_ANY, label)
flag = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
szr.Add(self.lb, 0, flag, 4)
if self.kind == 'path':
v = str(v)
v = path_expand_cmd(v)
v = os.path.expandvars(os.path.expanduser(v))
style = wx.TE_PROCESS_ENTER + wx_flag_get( self.var.get('str_flags', []) )
self.tc = wx.TextCtrl(self, wx.ID_ANY, str(v), style=style, size=(130,-1))
self.Bind(wx.EVT_TEXT_ENTER, self.OnUpdate, self.tc)
if self.kind in ('num', None):
if self.has_slider:
self.w = self.max - self.min
vlst = [ v, self.min, self.max, self.var['v'] ]
self.is_float = len( [ v_ for v_ in vlst if type(v_) is not int ] ) > 0
self.int_max = 1000 if self.is_float else self.max
self.int_min = 0 if self.is_float else self.min
self.slider = wx.Slider(self, wx.ID_ANY, self.get_int_v(), self.int_min, self.int_max)
self.Bind(wx.EVT_COMMAND_SCROLL, self.OnScroll, self.slider)
self.slider.SetMinSize((82, 27))
szr.Add(self.slider, 1, wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, 4)
else:
self.is_float = type(self.var['v']) is not int
self.tc.SetMinSize((40,27))
flag = wx.ALIGN_CENTER_VERTICAL
prop = 1 if self.kind == 'path' or self.kind == 'str' else 0
szr.Add(self.tc, prop, flag, 4)
if self.kind == 'path':
self.ref = wx.Button(self, wx.ID_ANY, 'Ref')
self.Bind(wx.EVT_BUTTON, self.OnRef, self.ref)
button_color_hdr_setup(self.ref)
self.ref.SetMinSize((40,29))
szr.Add(self.ref, 0, flag, 4)
if self.has_slider or self.kind == 'num':
vszr = wx.BoxSizer(wx.VERTICAL)
vszr.Add( self.create_bmbtn("images/inc.png", self.OnIncBtn) )
vszr.Add( self.create_bmbtn("images/dec.png", self.OnDecBtn) )
szr.Add(vszr, 0, wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(szr)
def _get_topics_by_type(self, message_type):
#get list of current available topics:
ros_topics = rospy.get_published_topics()
matched_topics = list(filter(lambda x: x[1]==message_type,ros_topics))
topic_names = [x[0] for x in matched_topics]
topic_names.sort()
return topic_names
def setup_tooltip(self):
if get_tooltips(self.var):
set_tooltips(self.obj, self.var)
if get_tooltip(self.var):
obj = self.lb if self.lb else (self if self.kind == 'radio_box' else self.obj)
set_tooltip(obj, self.var)
def create_bmbtn(self, filename, hdr):
dir = rtmgr_src_dir()
bm = wx.Bitmap(dir + filename, wx.BITMAP_TYPE_ANY)
style = wx.BORDER_NONE | wx.BU_EXACTFIT
obj = wx.lib.buttons.GenBitmapButton(self, wx.ID_ANY, bm, style=style)
self.Bind(wx.EVT_BUTTON, hdr, obj)
return obj
def get_v(self):
if self.kind in [ 'radio_box', 'menu' ]:
return self.choices_sel_get()
if self.kind in [ 'checkbox', 'toggle_button', 'topic' ]:
return self.obj.GetValue()
if self.kind == 'checkboxes':
return self.obj.get()
if self.kind == 'hide':
return self.var.get('v')
if self.kind in [ 'path', 'str' ]:
return str(self.tc.GetValue())
if not self.has_slider and self.tc.GetValue() == '':
return ''
return self.get_tc_v()
def get_tc_v(self):
s = self.tc.GetValue()
v = str_to_float(s) if self.is_float else int(s)
if self.has_slider:
v = self.min if v < self.min else v
v = self.max if v > self.max else v
self.tc.SetValue(adjust_num_str(str(v)))
return v
def get_int_v(self):
v = self.get_tc_v()
if self.is_float:
v = int( self.int_max * (v - self.min) / self.w if self.w != 0 else 0 )
return v
def OnScroll(self, event):
iv = self.slider.GetValue()
s = str(iv)
if self.is_float:
v = self.min + float(self.w) * iv / self.int_max
s = str(Decimal(v).quantize(Decimal(str(self.get_step()))))
self.tc.SetValue(s)
self.update(self.var)
def OnIncBtn(self, event):
step = self.get_step()
self.add_v(step)
def OnDecBtn(self, event):
step = self.get_step()
self.add_v(-step)
def get_step(self):
step = self.var.get('step')
return step if step else 0.01 if self.is_float else 1
def add_v(self, step):
ov = self.get_v()
self.tc.SetValue(str(ov + step))
v = self.get_v()
if v != ov:
if self.has_slider:
self.slider.SetValue(self.get_int_v())
self.update(self.var)
def OnUpdate(self, event):
if self.has_slider:
self.slider.SetValue(self.get_int_v())
self.update(self.var)
def OnRef(self, event):
if file_dialog(self, self.tc, self.var) == wx.ID_OK:
self.update(self.var)
def choices_sel_get(self):
return self.obj.GetStringSelection() if self.var.get('choices_type') == 'str' else self.obj.GetSelection()
def choices_sel_set(self, v):
if self.var.get('choices_type') == 'str':
self.obj.SetStringSelection(v)
else:
self.obj.SetSelection(v)
def is_nl(self):
return self.has_slider or self.kind in [ 'path' ]
class MyDialogParam(rtmgr.MyDialogParam):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.pdic_bak = pdic.copy()
gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogParam.__init__(self, *args, **kwds)
set_size_gdic(self, gdic)
self.Bind(wx.EVT_CLOSE, self.OnClose)
ok_lb_key = 'open_dialog_ok_label'
if dic_list_get(gdic, 'dialog_type', 'config') == 'open' and ok_lb_key in gdic:
self.button_1.SetLabel( gdic.get(ok_lb_key) )
parent = self.panel_v
frame = self.GetParent()
self.panel = ParamPanel(parent, frame=frame, pdic=pdic, gdic=gdic, prm=prm)
szr = sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.SetTitle(prm.get('name', ''))
(w,h) = self.GetSize()
(w2,_) = szr.GetMinSize()
w2 += 20
if w2 > w:
self.SetSize((w2,h))
def OnOk(self, event):
self.panel.update()
self.panel.detach_func()
self.EndModal(0)
def OnCancel(self, event):
self.panel.pdic.update(self.pdic_bak) # restore
self.panel.detach_func()
self.panel.update()
self.EndModal(-1)
def OnClose(self, event):
self.OnCancel(event)
class MyDialogDpm(rtmgr.MyDialogDpm):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.pdic_bak = pdic.copy()
gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogDpm.__init__(self, *args, **kwds)
set_size_gdic(self, gdic)
self.Bind(wx.EVT_CLOSE, self.OnClose)
parent = self.panel_v
frame = self.GetParent()
self.frame = frame
self.panel = ParamPanel(parent, frame=frame, pdic=pdic, gdic=gdic, prm=prm)
szr = sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.SetTitle(prm.get('name', ''))
(w,h) = self.GetSize()
(w2,_) = szr.GetMinSize()
w2 += 20
if w2 > w:
self.SetSize((w2,h))
fix_link_color(self.hyperlink_car)
fix_link_color(self.hyperlink_pedestrian)
def OnOk(self, event):
self.panel.update()
self.panel.detach_func()
self.EndModal(0)
def OnLink(self, event):
obj = event.GetEventObject()
dic = { self.hyperlink_car : self.frame.button_car_dpm,
self.hyperlink_pedestrian : self.frame.button_pedestrian_dpm }
obj = dic.get(obj)
if obj:
self.frame.OnHyperlinked_obj(obj)
def OnCancel(self, event):
self.panel.pdic.update(self.pdic_bak) # restore
self.panel.detach_func()
self.panel.update()
self.EndModal(-1)
def OnClose(self, event):
self.OnCancel(event)
class MyDialogCarPedestrian(rtmgr.MyDialogCarPedestrian):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
rtmgr.MyDialogCarPedestrian.__init__(self, *args, **kwds)
set_size_gdic(self)
self.Bind(wx.EVT_CLOSE, self.OnClose)
frame = self.GetParent()
self.frame = frame
self.SetTitle(prm.get('name', ''))
fix_link_color(self.hyperlink_car)
fix_link_color(self.hyperlink_pedestrian)
def OnLink(self, event):
obj = event.GetEventObject()
car_ped = { self.hyperlink_car : 'car', self.hyperlink_pedestrian : 'pedestrian' }.get(obj, 'car')
obj_key = self.gdic.get('car_pedestrian_obj_key', {}).get(car_ped)
obj = getattr(self.frame, 'button_' + obj_key, None) if obj_key else None
if obj:
self.frame.OnHyperlinked_obj(obj)
self.EndModal(0)
def OnClose(self, event):
self.EndModal(-1)
class MyDialogLaneStop(rtmgr.MyDialogLaneStop):
def __init__(self, *args, **kwds):
self.pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
rtmgr.MyDialogLaneStop.__init__(self, *args, **kwds)
set_size_gdic(self)
self.frame = self.GetParent()
name = 'lane_stop'
var = next( ( var for var in self.prm.get('vars', []) if var.get('name') == name ), {} )
v = self.pdic.get( name, var.get('v', False) )
set_val(self.checkbox_lane_stop, v)
def update(self):
update_func = self.gdic.get('update_func')
if update_func:
update_func(self.pdic, self.gdic, self.prm)
def OnTrafficRedLight(self, event):
self.pdic['traffic_light'] = 0
self.update()
def OnTrafficGreenLight(self, event):
self.pdic['traffic_light'] = 1
self.update()
def OnTrafficLightRecognition(self, event):
pub = rospy.Publisher('/config/lane_stop', ConfigLaneStop, latch=True, queue_size=10)
msg = ConfigLaneStop()
v = event.GetEventObject().GetValue()
self.pdic['lane_stop'] = v
msg.manual_detection = not v
pub.publish(msg)
def OnOk(self, event):
self.EndModal(0)
def OnCancel(self, event):
self.EndModal(-1)
class MyDialogNdtMapping(rtmgr.MyDialogNdtMapping):
def __init__(self, *args, **kwds):
self.pdic = kwds.pop('pdic')
self.pdic_bak = self.pdic.copy()
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
rtmgr.MyDialogNdtMapping.__init__(self, *args, **kwds)
set_size_gdic(self)
parent = self.panel_v
frame = self.GetParent()
self.panel = ParamPanel(parent, frame=frame, pdic=self.pdic, gdic=self.gdic, prm=self.prm)
sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.update_filename()
self.klass_msg = ConfigNdtMappingOutput
self.pub = rospy.Publisher('/config/ndt_mapping_output', self.klass_msg, queue_size=10)
def update_filename(self):
tc = self.text_ctrl_path
path = tc.GetValue()
(dn, fn) = os.path.split(path)
now = datetime.datetime.now()
fn = 'autoware-%02d%02d%02d.pcd' % (
now.year % 100, now.month, now.day)
path = os.path.join(dn, fn)
set_path(tc, path)
def OnRef(self, event):
tc = self.text_ctrl_path
file_dialog(self, tc, { 'path_type' : 'save' } )
def OnRadio(self, event):
v = self.radio_btn_filter_resolution.GetValue()
tc = self.text_ctrl_filter_resolution
tc.Enable(v)
def OnPcdOutput(self, event):
tc = self.text_ctrl_filter_resolution
v = tc.GetValue() if self.radio_btn_filter_resolution.GetValue() else '0.0'
msg = self.klass_msg()
msg.filename = self.text_ctrl_path.GetValue()
msg.filter_res = str_to_float(v)
self.pub.publish(msg)
def OnOk(self, event):
self.panel.detach_func()
self.EndModal(0)
class MyDialogWaypointLoader(rtmgr.MyDialogWaypointLoader):
def __init__(self, *args, **kwds):
self.pdic = kwds.pop('pdic')
self.pdic_bak = self.pdic.copy()
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
rtmgr.MyDialogWaypointLoader.__init__(self, *args, **kwds)
set_size_gdic(self)
parent = self.panel_v
frame = self.GetParent()
self.panel = ParamPanel(parent, frame=frame, pdic=self.pdic, gdic=self.gdic, prm=self.prm)
sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.klass_msg = Bool
self.pub = rospy.Publisher('/config/waypoint_loader_output', self.klass_msg, queue_size=10)
def OnCsvOutput(self, event):
msg = self.klass_msg()
msg.data = True
self.pub.publish(msg)
def OnOk(self, event):
self.panel.detach_func()
self.EndModal(0)
class InfoBarLabel(wx.BoxSizer):
def __init__(self, parent, btm_txt=None, lmt_bar_prg=90, bar_orient=wx.VERTICAL):
wx.BoxSizer.__init__(self, orient=wx.VERTICAL)
self.lb = wx.StaticText(parent, wx.ID_ANY, '')
self.bar = BarLabel(parent, hv=bar_orient, show_lb=False)
bt = wx.StaticText(parent, wx.ID_ANY, btm_txt) if btm_txt else None
self.Add(self.lb, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
if bar_orient == wx.VERTICAL:
sz = self.bar.GetSize()
sz.SetWidth(20)
self.bar.SetMinSize(sz)
self.Add(self.bar, 1, wx.ALIGN_CENTER_HORIZONTAL, 0)
if bt:
self.Add(bt, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
else:
szr = wx.BoxSizer(wx.HORIZONTAL)
if bt:
szr.Add(bt, 0, 0, 0)
szr.Add(self.bar, 1, 0, 0)
self.Add(szr, 1, wx.EXPAND, 0)
self.lmt_bar_prg = lmt_bar_prg
def lb_set(self, txt, col):
self.lb.SetForegroundColour(col)
self.lb.SetLabel(txt);
self.Layout()
def bar_set(self, prg):
(col1, col2) = (wx.Colour(0,0,250), wx.Colour(0,0,128))
if prg >= self.lmt_bar_prg:
(col1, col2) = (wx.Colour(250,0,0), wx.Colour(128,0,0))
self.bar.set_col(col1, col2)
self.bar.set(prg)
class Checkboxes(wx.Panel):
def __init__(self, parent, item_n, lb):
wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize)
self.boxes = [ wx.CheckBox(self, wx.ID_ANY, lb + str(i)) for i in range(item_n) ]
vsz = wx.BoxSizer(wx.VERTICAL)
for j in range((item_n + 7) / 8):
hsz = wx.BoxSizer(wx.HORIZONTAL)
for i in range(8):
idx = j * 8 + i
if idx < len(self.boxes):
hsz.Add(self.boxes[idx], 0, wx.LEFT, 8)
vsz.Add(hsz)
self.SetSizer(vsz)
vsz.Fit(self)
def set(self, vs):
vs = vs if vs else [ True for box in self.boxes ]
for (box, v) in zip(self.boxes, vs):
box.SetValue(v)
def get(self):
return [ box.GetValue() for box in self.boxes ]
class BarLabel(wx.Panel):
def __init__(self, parent, txt='', pos=wx.DefaultPosition, size=wx.DefaultSize, style=0, hv=wx.HORIZONTAL, show_lb=True):
wx.Panel.__init__(self, parent, wx.ID_ANY, pos, size)
self.lb = wx.StaticText(self, wx.ID_ANY, '', style=style)
self.txt = txt
self.hv = hv
self.dir = wx.SOUTH if hv == wx.HORIZONTAL else wx.EAST
self.show_lb = show_lb
self.prg = -1
self.dflt_col1 = wx.Colour(250,250,250)
self.dflt_col2 = wx.Colour(128,128,128)
self.col1 = self.dflt_col1
self.col2 = self.dflt_col2
self.Bind(wx.EVT_PAINT, self.OnPaint)
def set(self, prg):
self.prg = prg
if self.show_lb:
self.lb.SetLabel(self.txt + str(prg) + '%' if prg >= 0 else '')
self.Refresh()
def set_col(self, col1, col2):
self.col1 = col1 if col1 != wx.NullColour else self.dflt_col1
self.col2 = col2 if col2 != wx.NullColour else self.dflt_col2
def clear(self):
self.set(-1)
def OnPaint(self, event):
dc = wx.PaintDC(self)
(w,h) = self.GetSize()
if self.IsEnabled():
p = (w if self.hv == wx.HORIZONTAL else h) * self.prg / 100
rect = wx.Rect(0, 0, p, h) if self.hv == wx.HORIZONTAL else wx.Rect(0, h-p, w, p)
dc.GradientFillLinear(rect, self.col1, self.col2, self.dir)
rect = wx.Rect(p, 0, w-p, h) if self.hv == wx.HORIZONTAL else wx.Rect(0, 0, w, h-p)
dc.GradientFillLinear(rect, wx.Colour(200,200,200), wx.Colour(220,220,220), self.dir)
else:
rect = wx.Rect(0, 0, w, h)
dc.GradientFillLinear(rect, wx.Colour(250,250,250), wx.Colour(250,250,250), self.dir)
class ColorLabel(wx.Panel):
def __init__(self, parent, lst=[], pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.Panel.__init__(self, parent, wx.ID_ANY, pos, size)
self.lst = lst
self.Bind(wx.EVT_PAINT, self.OnPaint)
def set(self, lst):
self.lst = lst
self.Refresh()
def OnPaint(self, event):
dc = wx.PaintDC(self)
dc.Clear()
#change_font_point_by_rate(dc, 0.75)
(x,y) = (0,0)
(_, h, _, _) = dc.GetFullTextExtent(' ')
for v in self.lst:
if type(v) is tuple and len(v) == 2:
(x,y) = v
elif type(v) is tuple and len(v) == 3:
dc.SetTextForeground(v)
elif v == '\n':
(x,y) = (0,y+h)
elif type(v) is str:
dc.DrawText(v, x, y)
(w, _, _, _) = dc.GetFullTextExtent(v)
x += w
class StrValObj:
def __init__(self, s, v):
self.s = s
self.v = v
def GetValue(self):
return self.v
def SetValue(self, v):
self.v = v
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame_1 = MyFrame(None, wx.ID_ANY, "")
self.SetTopWindow(frame_1)
buttons_color_hdr_setup(frame_1)
frame_1.Show()
return 1
class MyDialogRosbagRecord(rtmgr.MyDialogRosbagRecord):
def __init__(self, *args, **kwds):
self.cmd_dic = kwds.pop('cmd_dic')
rtmgr.MyDialogRosbagRecord.__init__(self, *args, **kwds)
self.cbs = []
self.refresh()
self.parent = self.GetParent()
self.cmd_dic[ self.button_start ] = ('rosbag record', None)
self.toggles = [ self.button_start, self.button_stop ]
def OnRef(self, event):
tc = self.text_ctrl
file_dialog(self, tc, { 'path_type' : 'save' } )
def OnStart(self, event):
key_obj = self.button_start
path = self.text_ctrl.GetValue()
if path == '':
print('path=""')
return
topic_opt = []
if self.cbs[0].GetValue(): # 'All'
topic_opt = [ '-a' ]
else:
for obj in self.cbs:
if obj.GetValue():
topic_opt += [ obj.GetLabel() ]
if topic_opt == []:
print('topic=[]')
return
args = topic_opt + [ '-O', path ]
split_arg = [ '--split' ] if self.checkbox_split.GetValue() else []
size_arg = self.size_arg_get()
if split_arg and not size_arg:
wx.MessageBox('size is required, with split')
return
args += split_arg + size_arg
(cmd, proc) = self.cmd_dic[ key_obj ]
proc = self.parent.launch_kill(True, cmd, proc, add_args=args, obj=key_obj, kill_children=True)
self.cmd_dic[ key_obj ] = (cmd, proc)
self.parent.toggle_enables(self.toggles)
def OnStop(self, event):
key_obj = self.button_start
(cmd, proc) = self.cmd_dic[ key_obj ]
proc = self.parent.launch_kill(False, cmd, proc, sigint=True, obj=key_obj, kill_children=True)
self.cmd_dic[ key_obj ] = (cmd, proc)
self.parent.toggle_enables(self.toggles)
self.Hide()
def OnRefresh(self, event):
self.refresh()
def refresh(self):
lst = [ 'all' ] + subprocess.check_output([ 'rostopic', 'list' ]).strip().split('\n')
panel = self.panel_1
szr = self.sizer_topic
for obj in self.cbs:
szr.Remove(obj)
obj.Destroy()
self.cbs = []
for topic in lst:
obj = wx.CheckBox(panel, wx.ID_ANY, topic)
bdr = 4 if topic == 'All' else 4 * 4
szr.Add(obj, 0, wx.LEFT, bdr)
self.cbs.append(obj)
szr.Layout()
panel.SetVirtualSize(szr.GetMinSize())
def show(self):
self.Show()
self.update_filename()
def update_filename(self):
tc = self.text_ctrl
path = tc.GetValue()
(dn, fn) = os.path.split(path)
now = datetime.datetime.now()
fn = 'autoware-%04d%02d%02d%02d%02d%02d' % (
now.year, now.month, now.day, now.hour, now.minute, now.second)
path = os.path.join(dn, fn)
set_path(tc, path)
def size_arg_get(self):
tc = self.text_ctrl_size
s = tc.GetValue()
mb = 0
try:
mb = str_to_float(s)
except ValueError:
mb = 0
if mb <= 0:
tc.SetValue('')
return [ '--size=' + str(int(mb * 1024 * 1024)) ] if mb > 0 else []
def set_size_gdic(dlg, gdic={}):
(w, h) = dlg.GetSize()
if not gdic:
gdic = getattr(dlg, 'gdic', {})
nw = gdic.get('dialog_width', w)
nh = gdic.get('dialog_height', h)
if (w, h) != (nw, nh):
dlg.SetSize((nw, nh))
def file_dialog(parent, tc, path_inf_dic={}):
path = tc.GetValue()
path = get_top(path.split(','), path)
(dn, fn) = os.path.split(path)
path_type = path_inf_dic.get('path_type')
if path_type == 'dir':
fns = path_inf_dic.get('filenames')
if type(fns) is str and fns[-5:] == '.yaml':
fns = load_yaml(fns)
if type(fns) is not list:
fns = None
path_inf_dic['filenames'] = fns
dlg = wx.DirDialog(parent, defaultPath=path)
else:
st_dic = { 'save' : wx.FD_SAVE, 'multi' : wx.FD_MULTIPLE }
dlg = wx.FileDialog(parent, defaultDir=dn, defaultFile=fn,
style=st_dic.get(path_type, wx.FD_DEFAULT_STYLE))
ret = show_modal(dlg)
if ret == wx.ID_OK:
path = ','.join(dlg.GetPaths()) if path_type == 'multi' else dlg.GetPath()
if path_type == 'dir' and fns:
path = ','.join([ path + '/' + fn for fn in fns ])
set_path(tc, path)
dlg.Destroy()
return ret
def post_evt_toggle_obj(win, obj, v):
evt_id = {
CT.GenericTreeItem : CT.wxEVT_TREE_ITEM_CHECKED,
wx.CheckBox : wx.EVT_CHECKBOX.typeId,
wx.ToggleButton : wx.EVT_TOGGLEBUTTON.typeId,
wx.Button : wx.EVT_BUTTON.typeId,
}.get( type(obj) )
if evt_id == CT.wxEVT_TREE_ITEM_CHECKED:
evt = CT.TreeEvent( evt_id, win.GetId() )
evt.SetItem(obj)
else:
evt = wx.PyCommandEvent( evt_id, obj.GetId() )
evt.SetEventObject(obj)
set_val(obj, v)
wx.PostEvent(win, evt)
def button_color_change(btn, v=None):
if v is None and type(btn) is wx.ToggleButton:
v = btn.GetValue()
key = ( v , btn.IsEnabled() )
dic = { (True,True):('#F9F9F8','#8B8BB9'), (True,False):('#F9F9F8','#E0E0F0') }
(fcol, bcol) = dic.get(key, (wx.NullColour, wx.NullColour))
btn.SetForegroundColour(fcol)
btn.SetBackgroundColour(bcol)
def OnButtonColorHdr(event):
btn = event.GetEventObject()
dic = { wx.EVT_TOGGLEBUTTON.typeId : None,
wx.EVT_LEFT_DOWN.typeId : True,
wx.EVT_LEFT_UP.typeId : False }
v = dic.get(event.GetEventType(), '?')
if v != '?':
button_color_change(btn, v)
event.Skip()
btn_null_bgcol = None
def is_btn_null_bgcol(btn):
global btn_null_bgcol
bak = btn.GetBackgroundColour()
if btn_null_bgcol is None:
btn.SetBackgroundColour(wx.NullColour)
btn_null_bgcol = btn.GetBackgroundColour()
if bak != btn_null_bgcol:
btn.SetBackgroundColour(bak)
return bak == btn_null_bgcol
def button_color_hdr_setup(btn):
hdr = OnButtonColorHdr
if type(btn) is wx.ToggleButton:
btn.Bind(wx.EVT_TOGGLEBUTTON, hdr)
elif type(btn) is wx.Button and is_btn_null_bgcol(btn):
btn.Bind(wx.EVT_LEFT_DOWN, hdr)
btn.Bind(wx.EVT_LEFT_UP, hdr)
def buttons_color_hdr_setup(frm_obj):
key = 'button_'
btns = [ getattr(frm_obj, nm) for nm in dir(frm_obj) if nm[:len(key)] == key ]
for btn in btns:
button_color_hdr_setup(btn)
def show_modal(dlg):
buttons_color_hdr_setup(dlg)
return dlg.ShowModal()
def load_yaml(filename, def_ret=None):
dir = rtmgr_src_dir()
path = dir + filename
if not os.path.isfile(path):
return def_ret
print('loading ' + filename)
f = open(dir + filename, 'r')
d = yaml.load(f)
f.close()
return d
def terminate_children(proc, sigint=False):
for child in get_proc_children(proc):
terminate_children(child, sigint)
terminate(child, sigint)
def terminate(proc, sigint=False):
if sigint:
proc.send_signal(signal.SIGINT)
else:
proc.terminate()
def proc_wait_thread(ev, proc, obj):
proc.wait()
wx.CallAfter(enables_set, obj, 'proc_wait', True)
th_end((None, ev))
def th_start(target, kwargs={}):
ev = threading.Event()
kwargs['ev'] = ev
th = threading.Thread(target=target, kwargs=kwargs)
th.daemon = True
th.start()
return (th, ev)
def th_end((th, ev)):
if not th:
th = threading.current_thread()
threading.Timer( 1.0, th_end, ((th, ev),) ).start()
return
ev.set()
th.join()
def que_clear(que):
with que.mutex:
que.queue.clear()
def append_tc_limit(tc, s, rm_chars=0):
if rm_chars > 0:
tc.Remove(0, rm_chars)
tc.AppendText(s)
def cut_esc(s):
while True:
i = s.find(chr(27))
if i < 0:
break
j = s.find('m', i)
if j < 0:
break
s = s[:i] + s[j+1:]
return s
def change_font_point_by_rate(obj, rate=1.0):
font = obj.GetFont()
pt = font.GetPointSize()
pt = int(pt * rate)
font.SetPointSize(pt)
obj.SetFont(font)
def fix_link_color(obj):
t = type(obj)
if t is CT.GenericTreeItem or t is CT.CustomTreeCtrl:
obj.SetHyperTextVisitedColour(obj.GetHyperTextNewColour())
elif t is wx.HyperlinkCtrl:
obj.SetVisitedColour(obj.GetNormalColour())
def get_tooltip(dic):
return dic.get('desc')
def get_tooltips(dic):
return dic.get('descs', [])
def set_tooltip(obj, dic):
set_tooltip_str(obj, get_tooltip(dic))
def set_tooltip_str(obj, s):
if s and getattr(obj, 'SetToolTipString', None):
obj.SetToolTipString(s)
def set_tooltips(obj, dic):
lst = get_tooltips(dic)
if lst and getattr(obj, 'SetItemToolTip', None):
for (ix, s) in enumerate(lst):
obj.SetItemToolTip(ix, s)
def get_tooltip_obj(obj):
if getattr(obj, 'GetToolTip', None):
t = obj.GetToolTip()
return t.GetTip() if t else None
return None
def scaled_bitmap(bm, scale):
(w, h) = bm.GetSize()
img = wx.ImageFromBitmap(bm)
img = img.Scale(w * scale, h * scale, wx.IMAGE_QUALITY_HIGH)
return wx.BitmapFromImage(img)
def sizer_wrap(add_objs, orient=wx.VERTICAL, prop=0, flag=0, border=0, parent=None):
szr = wx.BoxSizer(orient)
for obj in add_objs:
szr.Add(obj, prop, flag, border)
if parent:
parent.SetSizer(szr)
return szr
def static_box_sizer(parent, s, orient=wx.VERTICAL):
sb = wx.StaticBox(parent, wx.ID_ANY, s)
sb.Lower()
return wx.StaticBoxSizer(sb, orient)
def wx_flag_get(flags):
dic = { 'top' : wx.TOP, 'bottom' : wx.BOTTOM, 'left' : wx.LEFT, 'right' : wx.RIGHT,
'all' : wx.ALL, 'expand' : wx.EXPAND, 'fixed_minsize' : wx.FIXED_MINSIZE,
'center_v' : wx.ALIGN_CENTER_VERTICAL, 'center_h' : wx.ALIGN_CENTER_HORIZONTAL,
'passwd' : wx.TE_PASSWORD }
lst = [ dic.get(f) for f in flags if f in dic ]
return reduce(lambda a,b : a+b, [0] + lst)
def msg_path_to_obj_attr(msg, path):
lst = path.split('.')
obj = msg
for attr in lst[:-1]:
obj = getattr(obj, attr, None)
return (obj, lst[-1])
def str_to_rosval(s, type_str, def_ret=None):
cvt_dic = {
'int8':int , 'int16':int , 'int32':int ,
'uint8':int , 'uint16':int , 'uint32':int ,
'int64':long , 'uint64':long,
'float32':float, 'float64':float,
}
t = cvt_dic.get(type_str)
s = s.replace(',','.') if t is float and type(s) is str else s
return t(s) if t else def_ret
def str_to_float(s):
return float( s.replace(',','.') )
def set_path(tc, v):
tc.SetValue(v)
tc.SetInsertionPointEnd()
def set_val(obj, v):
func = getattr(obj, 'SetValue', getattr(obj, 'Check', None))
if func:
func(v)
obj_refresh(obj)
if type(obj) is wx.ToggleButton:
button_color_change(obj)
def enables_set(obj, k, en):
if hasattr(obj, 'enables_proxy'):
(obj, k) = obj.enables_proxy
d = attr_getset(obj, 'enabLes', {})
d[k] = en
d['last_key'] = k
if hasattr(obj, 'Enable'):
obj.Enable( all( d.values() ) )
obj_refresh(obj)
if isinstance(obj, wx.HyperlinkCtrl):
if not hasattr(obj, 'coLor'):
obj.coLor = { True:obj.GetNormalColour(), False:'#808080' }
c = obj.coLor.get(obj.IsEnabled())
obj.SetNormalColour(c)
obj.SetVisitedColour(c)
def enables_get(obj, k, def_ret=None):
return attr_getset(obj, 'enabLes', {}).get(k, def_ret)
def enables_get_last(obj):
k = enables_get(obj, 'last_key')
return (k, enables_get(obj, k))
def obj_refresh(obj):
if type(obj) is CT.GenericTreeItem:
while obj.GetParent():
obj = obj.GetParent()
tree = obj.GetData()
tree.Refresh()
# dic_list util (push, pop, get)
def dic_list_push(dic, key, v):
dic_getset(dic, key, []).append(v)
def dic_list_pop(dic, key):
dic.get(key, [None]).pop()
def dic_list_get(dic, key, def_ret=None):
return dic.get(key, [def_ret])[-1]
def bak_stk_push(dic, key):
if key in dic:
k = key + '_bak_str'
dic_getset(dic, k, []).append( dic.get(key) )
def bak_stk_pop(dic, key):
k = key + '_bak_str'
stk = dic.get(k, [])
if len(stk) > 0:
dic[key] = stk.pop()
else:
del dic[key]
def bak_stk_set(dic, key, v):
bak_str_push(dic, key)
dic[key] = v
def attr_getset(obj, name, def_ret):
if not hasattr(obj, name):
setattr(obj, name, def_ret)
return getattr(obj, name)
def dic_getset(dic, key, def_ret):
if key not in dic:
dic[key] = def_ret
return dic.get(key)
def lst_append_once(lst, v):
exist = v in lst
if not exist:
lst.append(v)
return exist
def lst_remove_once(lst, v):
exist = v in lst
if exist:
lst.remove(v)
return exist
def get_top(lst, def_ret=None):
return lst[0] if len(lst) > 0 else def_ret
def adjust_num_str(s):
if '.' in s:
while s[-1] == '0':
s = s[:-1]
if s[-1] == '.':
s = s[:-1]
return s
def rtmgr_src_dir():
return os.path.abspath(os.path.dirname(__file__)) + "/"
def path_expand_cmd(path):
lst = path.split('/')
s = lst[0]
if s[:2] == '$(' and s[-1] == ')':
cmd = s[2:-1].split(' ')
lst[0] = subprocess.check_output(cmd).strip()
path = '/'.join(lst)
return path
def eval_if_str(self, v):
return eval(v) if type(v) is str else v
def dic_eval_if_str(self, dic, key, def_ret=None):
return eval_if_str( self, dic.get(key, def_ret) )
def prn_dict(dic):
for (k,v) in dic.items():
print (k, ':', v)
def send_to_proc_manager(order):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.connect(PROC_MANAGER_SOCK)
except socket.error:
print('Failed connect to {}'.format(PROC_MANAGER_SOCK))
return -1
sock.send(yaml.dump(order))
ret = sock.recv(1024)
sock.close()
return int(ret) == 0
def set_process_nice(proc, value):
order = {
"name": "nice",
"pid": proc.pid,
"nice": value
}
return send_to_proc_manager(order)
def set_process_cpu_affinity(proc, cpus):
order = {
"name": "cpu_affinity",
"pid": proc.pid,
"cpus": cpus,
}
return send_to_proc_manager(order)
def shutdown_proc_manager():
order = {
"name": "shutdown",
}
return send_to_proc_manager(order)
def set_scheduling_policy(proc, policy, priority):
order = {
"name": "scheduling_policy",
"pid": proc.pid,
"policy": policy,
"priority": priority,
}
return send_to_proc_manager(order)
# psutil 3.x to 1.x backward compatibility
def get_cpu_count():
try:
return psutil.NUM_CPUS
except AttributeError:
return psutil.cpu_count()
def get_proc_children(proc, r=False):
try:
return proc.get_children(recursive=r)
except AttributeError:
return proc.children(recursive=r)
def get_proc_nice(proc):
try:
return proc.get_nice()
except AttributeError:
return proc.nice()
def get_proc_cpu_affinity(proc):
try:
return proc.get_cpu_affinity()
except AttributeError:
return proc.cpu_affinity()
if __name__ == "__main__":
gettext.install("app")
app = MyApp(0)
app.MainLoop()
# EOF
|
app.py | """
Slack chat-bot Lambda handler.
Modified from: https://github.com/Beartime234/sam-python-slackapp-template
"""
# Module Imports
import os
import logging
import json
import time
import hmac
import hashlib
import json
import urllib.parse
import base64
import threading
# import multiprocessing
# import requests
from slack import WebClient as Slack_WebClient
from http.client import UNSUPPORTED_MEDIA_TYPE, BAD_REQUEST
from http.client import OK as OK_200
# Local imports
import helpers
from version import __version__
# Get Environment Variables
# This is declared globally because as this is useful for tests etc.
SECRETS_NAME = os.environ["SECRETS_NAME"]
STAGE = os.environ["STAGE"]
CUTOFF = os.environ.get('SLACK_LAMBDA_MASTER_CUTOFF')
THREADED_LAMBDA_HEADER = 'X-Spawn-Lambda-Thread'
UNIT_TEST_HEADER_FLAGS = 'X-Unit-Test-Flags'
F_SKIP_THREAD_SPAWN = 'skip-thread-spawn'
F_B64_STUB = 'b64stub'
F_B64_RESP = 'b64response'
# Set up logging here info so we should get the
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
# Ignore non important logs from botocore and boto3 cause they talk to much
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
# Grab secrets for the application.
SECRETS = json.loads(helpers.get_secrets(SECRETS_NAME))
SLACK_WEBCLIENT = Slack_WebClient(SECRETS["BOT_TOKEN"])
def obscure_dict(some_dict):
ret = {}
for each_key in some_dict:
val = some_dict[each_key]
ret[each_key] = '{}..{}..{}'.format( val[0:2], len(val), val[-2:] )
return ret
def encode_b64_dict(response_dict):
""" utility to take dictionary
and return {'req_body_base64' : 'some64string=='} """
body_json = json.dumps(response_dict)
body_bytes = body_json.encode('utf-8')
body_base64 = base64.b64encode(body_bytes)
ret = {
'req_body_base64': '{}'.format(body_base64.decode('utf-8'))
}
return ret
# if "bot_id" in slack_body_dict:
# logging.warning("Ignore bot event")
# else:
# # Get the text of the message the user sent to the bot,
# # and reverse it.
# ret = process_event(slack_event_dict)
# slack_event_dict = {}
# if slack_event_dict:
# # Get the ID of the channel where the message was posted.
# channel_id = slack_event_dict["channel"]
# response = slack_client.chat_postMessage(
# channel=channel_id,
# text=ret
# )
# LOGGER.debug('Response: {}'.format(response))
def process_not_implemented(**kwargs):
""" default process function - return not implemented """
ret_dict = { 'function_return': 'not-implemented',
'slack_response': {} }
return ret_dict
def process_event(**kwargs):
""" process slack event """
slack_event_dict = kwargs['body']['event']
ret_dict = process_not_implemented()
if 'bot_id' in slack_event_dict:
LOGGER.debug('Ignoring event ({}) caused by bot chatter.'.format(slack_event_dict["type"]))
ret_dict = { 'function_return': 'ignored bot chatter',
'slack_response': {} }
else:
LOGGER.debug('Will process event: {}'.format(slack_event_dict["type"]))
if slack_event_dict["type"] == "message":
text = slack_event_dict.get("text")
if UNIT_TEST_HEADER_FLAGS in kwargs['headers']:
# If unit-test flag is detected, reverse the text
if text:
reversed_text = text[::-1]
channel_id = slack_event_dict["channel"]
response = SLACK_WEBCLIENT.chat_postMessage(
channel=channel_id, text=reversed_text )
ret_dict = { 'function_return': reversed_text,
'slack_response': response }
return ret_dict
def process_shortcut(**kwargs):
""" process slack shortcut (message / global) """
ret_dict = process_not_implemented()
return ret_dict
def process_slash_cmd(**kwargs):
""" process slack slash command """
ret_dict = process_not_implemented()
return ret_dict
def lambda_handler(api_event, api_context):
"""Handle an incoming HTTP request from a Slack chat-bot.
"""
if type(SECRETS) is not dict:
raise TypeError("Secrets response must be a dictionary.")
if CUTOFF == True or CUTOFF == '1':
LOGGER.warning("Master cutoff switch is on. Exiting lambda.")
return helpers.form_response(OK_200, {'CUTOFF ERROR': 'Master cutoff switch is engaged. Exiting.'})
LOGGER.info(f" -- Startup Information Version: {__version__}")
LOGGER.debug(f"Secret Information: {obscure_dict(SECRETS)}")
apievent_ContentType = (api_event.get('headers') or {}).get('Content-Type') or 'null'
request_headers = api_event["headers"]
# First and foremost, process challenge event, if sent:
# This is to appease the slack challenge event that is sent
# when subscribing to the slack event API. You can read more
# here https://api.slack.com/events/url_verification
if apievent_ContentType == 'application/json':
apievent_body_ = json.loads(api_event.get('body') or {})
if is_challenge(apievent_body_):
challenge_response_body = {
"challenge": apievent_body_["challenge"]
}
LOGGER.info('Responding to challenge event.')
return helpers.form_response(OK_200, challenge_response_body)
# *** DO NOT DELETE BELOW, REVISIT IN FUTURE ***
# Responding immediately is best practice recommended by Slack
# If not challenge, then immediately return OK
# when customer header THREADED_LAMBDA_HEADER is not present
# If SKIP_THREAD_SPAWN is not sent in custom header UNIT_TEST_HEADER_FLAGS,
# then spawn a new thread with the payload
# Otherwise, if THREADED_LAMBDA_HEADER is not present, then process payload
# To test behavior after the spawn, include THREADED_LAMBDA_HEADER
# To test immediate response, do not include THREADED_LAMBDA_HEADER
# if THREADED_LAMBDA_HEADER not in request_headers:
# # Skip creating new thread if UNIT_TEST_HEADER_FLAGS indicates to not do so
# if F_SKIP_THREAD_SPAWN not in (request_headers.get(UNIT_TEST_HEADER_FLAGS) or ''):
# # Spawn new thread with special thread header
# api_event['headers'][THREADED_LAMBDA_HEADER] = 'respawned-to-self-for-async-response'
# LOGGER.info('Launching separate thread for lambda to process request!')
# # p = multiprocessing.Process(target=lambda_handler, args=(api_event, api_context, ))
# # p.start()
# # p.join()
# t = threading.Thread(target=lambda_handler, args=(api_event, api_context), daemon=False )
# t.start()
# # I couldn't get this to work like I wanted it to.
# # I wanted to spawn an autonomous thread that would finish running after this thread returns (dies)
# # But I wasn't able to figure out if this was even possible to do.
# # Since it currently executes "fast enough", I'm just going to wait for the processing to finish
# t.join()
# # https://stackoverflow.com/questions/53386968/multithreading-in-aws-lambda-using-python3
# LOGGER.info('Returning 200 OK to slack')
# return helpers.form_response(OK_200, {})
# to test a long-running process doesn't die before the 'parent' thread:
# for i in range(0, 29000000):
# pass
# *** DO NOT DELETE ABOVE, REVISIT IN FUTURE ***
LOGGER.info(f'Detected Content-Type: {apievent_ContentType}')
# At this stage, this could be multiple things (see notes below), so log entire dictionary as json
LOGGER.debug('api_event: {}'.format(json.dumps(api_event)))
# Set default processing function
process_function = process_not_implemented
# load dict with payload, set processing function to match body contents
# Note: Being a little sloppy with this initially;
# It is possible I will need to be more specific about different slack calls later
# This may manifest with slack calls being processed with the wrong function
if apievent_ContentType in ('application/x-www-form-urlencoded'):
apievent_body_ = urllib.parse.parse_qs(api_event.get('body'))
apievent_body_['slack_event_type'] = 'slash-command'
process_function = process_event
if 'payload' in apievent_body_:
new_apievent_body_ = { 'payload' : [] }
LOGGER.debug('apievent_body_: {}'.format(apievent_body_))
for each_el in apievent_body_.get('payload') or {}:
new_apievent_body_['payload'].append( json.loads(each_el) )
apievent_body_ = new_apievent_body_
apievent_body_['slack_event_type'] = 'shortcut'
process_function = process_shortcut
LOGGER.debug('payload based apievent_body: {}'.format(apievent_body_))
elif apievent_ContentType in ('application/json'):
apievent_body_ = api_event.get('body')
try:
apievent_body_ = json.loads(apievent_body_)
apievent_body_['slack_event_type'] = 'json-string'
except TypeError:
pass
if 'slack_event_type' not in apievent_body_:
apievent_body_['slack_event_type'] = 'json'
else:
LOGGER.error(f'Content-Type unexpected: {apievent_ContentType}')
return helpers.form_response(UNSUPPORTED_MEDIA_TYPE, {"Error": f"Unexpected Content-Type ({apievent_ContentType})"})
LOGGER.debug('body({}): {}'.format(apievent_ContentType, json.dumps(apievent_body_)))
slack_event_dict = apievent_body_.get("event") or {}
LOGGER.debug('event dict: {}'.format(json.dumps(slack_event_dict)))
# Grab relevant information form the api_event
# slack_body_raw = api_event["body"]
# slack_body_dict = json.loads(slack_body_raw)
slack_body_dict = apievent_body_
if F_B64_STUB in (request_headers.get(UNIT_TEST_HEADER_FLAGS) or ''):
# If F_B64_STUB is present in request header UNIT_TEST_HEADER_FLAGS,
# return the body dict as b64 encoded json to test exepected data structure
stub_return = encode_b64_dict(apievent_body_)
return helpers.form_response(OK_200, stub_return)
# If the stage is production make sure that we are receiving events from slack otherwise we don't care
if STAGE is "prod":
LOGGER.debug(f"We are in production. So we are going to verify the request.")
if not verify_request(request_headers["X-Slack-Signature"], request_headers["X-Slack-Request-Timestamp"],
slack_body_raw, SECRETS["SIGNING_SECRET"]):
return helpers.form_response(BAD_REQUEST, {"Error": "Bad Request Signature"})
# If cutoff is half-engaged, terminate execution here
# if CUTOFF == '.5':
# LOGGER.debug("Master cutoff switch is half-engaged. Exiting except for unit tests.")
# return helpers.form_response(OK_200, {'CUTOFF ERROR': 'Master cutoff switch is half-engaged. Exiting except for unit tests.'})
# There are different types of payloads.
# - slash-commands
# - message-shortcuts
# - challenge
# - events
# See unit tests for more information.
# call appropriate processing function for slack call
if CUTOFF:
skip_slack_call = True
else:
skip_slack_call = False
ret = process_function( body=apievent_body_, skip_slack=skip_slack_call )
# TODO - Remove
# b64_body = (json.loads(ret_body or {})).get('req_body_base64')
# if b64_body:
# b64_bytes = b64_body.encode('utf-8')
# msg_bytes = base64.b64decode(b64_bytes)
# msg = msg_bytes.decode('utf-8')
# body_dict = json.loads(msg)
# else:
# body_dict = json.loads(ret_body)
# return body_dict
# def b64_encode_dict()
# if F_B64_RESP in (request_headers.get(UNIT_TEST_HEADER_FLAGS) or ''):
# # If F_B64_RESP is present in request header UNIT_TEST_HEADER_FLAGS,
# # return a response dict as b64 encoded json to test exepected behavior
# lambda_response = encode_b64_dict( ret )
# return helpers.form_response(OK_200, lambda_response)
# # If there is a request header indicating a unit test,
# # then return the body dict as b64 encoded json
# # to test if data is exactly as expected
# body_json = json.dumps(apievent_body_)
# body_bytes = body_json.encode('utf-8')
# body_base64 = base64.b64encode(body_bytes)
# stub_return = {
# 'req_body_base64': '{}'.format(body_base64.decode('utf-8'))
# }
# return helpers.form_response(OK_200, stub_return)
# This parses the slack body dict to get the event JSON
# this will hold information about the text and
# the user who did it.
# Build the slack client. This allows us make slack API calls
# read up on the python-slack-client here. We get this from
# AWS secrets manager. https://github.com/slackapi/python-slackclient
# slack_client = Slack_WebClient(secrets["BOT_TOKEN"])
# We need to discriminate between events generated by
# the users, which we want to process and handle,
# and those generated by the bot.
# if "bot_id" in slack_body_dict:
# logging.warning("Ignore bot event")
# else:
# # Get the text of the message the user sent to the bot,
# # and reverse it.
# ret = process_event(slack_event_dict)
# slack_event_dict = {}
# if slack_event_dict:
# # Get the ID of the channel where the message was posted.
# channel_id = slack_event_dict["channel"]
# response = slack_client.chat_postMessage(
# channel=channel_id,
# text=ret
# )
# LOGGER.debug('Response: {}'.format(response))
# Everything went fine return a good response.
if CUTOFF == '.5':
LOGGER.warning("Master cutoff switch is half-engaged. Exiting except for unit tests.")
return helpers.form_response(OK_200, {'CUTOFF ERROR': 'Master cutoff switch is half-engaged. Exiting except for unit tests.'})
else:
return helpers.form_response(OK_200, {})
def is_challenge(slack_event_body: dict) -> bool:
"""Is the event a challenge from slack? If yes return the correct response to slack
Args:
slack_event_body (dict): The slack event JSON
Returns:
returns True if it is a slack challenge event returns False otherwise
"""
if "challenge" in slack_event_body:
LOGGER.info("Challenge Data: {}".format(slack_event_body['challenge']))
return True
return False
def verify_request(slack_signature: str, slack_timestamp: str, slack_event_body: str, app_signing_secret) -> bool:
"""Does the header sent in the request match the secret token.
If it doesn't it may be an insecure request from someone trying to pose as your
application. You can read more about the url-verification and why this is necessary
here https://api.slack.com/docs/verifying-requests-from-slack
Args:
app_signing_secret (str): The apps local signing secret that is given by slack to compare with formulated.
slack_signature (str): The header of the http_request from slack X-Slack-Signature
slack_timestamp (str): The header of the http_request from slack X-Slack-Request-Timestamp
slack_event_body (str): The slack event body that must be formulated as a string
Returns:
A boolean. If True the request was valid if False request was not valid.
"""
if abs(time.time() - float(slack_timestamp)) > 60 * 5:
# The request is older then 5 minutes
LOGGER.warning(f"Request verification failed. Timestamp was over 5 mins old for the request")
return False
sig_basestring = f"v0:{slack_timestamp}:{slack_event_body}".encode('utf-8')
slack_signing_secret = bytes(app_signing_secret, 'utf-8')
my_signature = 'v0=' + hmac.new(slack_signing_secret, sig_basestring, hashlib.sha256).hexdigest()
if hmac.compare_digest(my_signature, slack_signature):
return True
else:
LOGGER.warning(f"Verification failed. my_signature: {my_signature} slack_signature: {slack_signature}")
return False
|
test_plc_route.py | import unittest
import threading
import socket
import struct
from contextlib import closing
from pyads import add_route_to_plc
from pyads.utils import platform_is_linux
class PLCRouteTestCase(unittest.TestCase):
SENDER_AMS = "1.2.3.4.1.1"
PLC_IP = "127.0.0.1"
USERNAME = "user"
PASSWORD = "password"
ROUTE_NAME = "Route"
ADDING_AMS_ID = "5.6.7.8.1.1"
HOSTNAME = "Host"
PLC_AMS_ID = "11.22.33.44.1.1"
def setUp(self):
pass
def tearDown(self):
pass
def plc_route_receiver(self):
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:
# Listen on 48899 for communication
sock.bind(("", 48899))
# Keep looping until we get an add address packet
addr = [0]
while addr[0] != self.PLC_IP:
data, addr = sock.recvfrom(1024)
# Decipher data and 'add route'
data = data[12:] # Remove our data header
sending_ams_bytes = data[:6] # Sending AMS address
sending_ams = ".".join(map(str, struct.unpack(">6B", sending_ams_bytes)))
data = data[6:]
comm_port = struct.unpack("<H", data[:2])[
0
] # Internal communication port (PORT_SYSTEMSERVICE)
data = data[2:]
command_code = struct.unpack("<H", data[:2])[
0
] # Comand code to write to PLC
data = data[2:]
data = data[4:] # Remove protocol bytes
len_sending_host = struct.unpack("<H", data[:2])[0] # Length of host name
data = data[2:]
hostname = data[:len_sending_host].decode(
"utf-8"
) # Null terminated hostname
data = data[len_sending_host:]
data = data[2:] # Remove protocol bytes
len_ams_id = struct.unpack("<H", data[:2])[0] # Length of adding AMS ID
data = data[2:]
adding_ams_id_bytes = data[:len_ams_id] # AMS ID being added to PLC
adding_ams_id = ".".join(
map(str, struct.unpack(">6B", adding_ams_id_bytes))
)
data = data[len_ams_id:]
data = data[2:] # Remove protocol bytes
len_username = struct.unpack("<H", data[:2])[0] # Length of PLC username
data = data[2:]
username = data[:len_username].decode("utf-8") # Null terminated username
data = data[len_username:]
data = data[2:] # Remove protocol bytes
len_password = struct.unpack("<H", data[:2])[0] # Length of PLC password
data = data[2:]
password = data[:len_password].decode("utf-8") # Null terminated username
data = data[len_password:]
data = data[2:] # Remove protocol bytes
len_route_name = struct.unpack("<H", data[:2])[0] # Length of PLC password
data = data[2:]
route_name = data[:len_route_name].decode(
"utf-8"
) # Null terminated username
data = data[len_route_name:]
self.assertEqual(len(data), 0) # We should have popped everything from data
self.assertEqual(sending_ams, self.SENDER_AMS)
self.assertEqual(comm_port, 10000)
self.assertEqual(command_code, 5)
self.assertEqual(
len_sending_host, len(self.HOSTNAME) + 1
) # +1 for the null terminator
self.assertEqual(hostname, self.HOSTNAME + "\0")
self.assertEqual(adding_ams_id, self.ADDING_AMS_ID)
self.assertEqual(
len_username, len(self.USERNAME) + 1
) # +1 for the null terminator
self.assertEqual(username, self.USERNAME + "\0")
# Don't check the password since that's part the correct/incorrect response test
# We can also assume that if the data after the password is correct, then the password was sent/read correctly
# self.assertEqual(len_password, len(self.PASSWORD) + 1) # +1 for the null terminator
# self.assertEqual(password, self.PASSWORD + '\0')
self.assertEqual(
len_route_name, len(self.ROUTE_NAME) + 1
) # +1 for the null terminator
self.assertEqual(route_name, self.ROUTE_NAME + "\0")
if password == self.PASSWORD + "\0":
password_correct = True
else:
password_correct = False
# Build response
response = struct.pack(
">12s", b"\x03\x66\x14\x71\x00\x00\x00\x00\x06\x00\x00\x80"
) # Same header as being sent to the PLC, but with 80 at the end
response += struct.pack(
">6B", *map(int, self.PLC_AMS_ID.split("."))
) # PLC AMS id
response += struct.pack(
"<H", 10000
) # Internal communication port (PORT_SYSTEMSERVICE)
response += struct.pack(">2s", b"\x01\x00") # Command code read
response += struct.pack(
">4s", b"\x00\x00\x01\x04"
) # Block of unknown protocol
if password_correct:
response += struct.pack(">3s", b"\x04\x00\x00") # Password Correct
else:
response += struct.pack(">3s", b"\x00\x04\x07") # Password Incorrect
response += struct.pack(">2s", b"\x00\x00") # Block of unknown protocol
# Send our response to 55189
sock.sendto(response, (self.PLC_IP, 55189))
def test_correct_route(self):
if platform_is_linux():
# Start receiving listener
route_thread = threading.Thread(target=self.plc_route_receiver)
route_thread.setDaemon(True)
route_thread.start()
# Try to set up a route with ourselves using all the optionals
try:
result = add_route_to_plc(
self.SENDER_AMS,
self.HOSTNAME,
self.PLC_IP,
self.USERNAME,
self.PASSWORD,
route_name=self.ROUTE_NAME,
added_net_id=self.ADDING_AMS_ID,
)
except:
result = None
self.assertTrue(result)
def test_incorrect_route(self):
if platform_is_linux():
# Start receiving listener
route_thread = threading.Thread(target=self.plc_route_receiver)
route_thread.setDaemon(True)
route_thread.start()
# Try to set up a route with ourselves using all the optionals AND an incorrect password
try:
result = add_route_to_plc(
self.SENDER_AMS,
self.HOSTNAME,
self.PLC_IP,
self.USERNAME,
"Incorrect Password",
route_name=self.ROUTE_NAME,
added_net_id=self.ADDING_AMS_ID,
)
except:
result = None
self.assertFalse(result)
if __name__ == "__main__":
unittest.main()
|
KarmaBoi.py | #!/usr/bin/env python
'''
Main application - sets and receives environment variables, initial logger
configuration.
In order to work with Cloud Foundry healthcheck, threadding is imported to
create a simple flask instance. The slack client is persistent, and all
exceptions will be logged but the slack client will restart.
Released under MIT license, copyright 2018 Tyler Ramer
'''
import dbopts
import os
import traceback
import sys
import time
import slack_parse
import logging
import errno
import textwrap as tw
import threading
from slackclient import SlackClient
from cache import TimedCache
import argparse
from flask import Flask
from cfenv import AppEnv
'''
These values are set in ~/.KarmaBoi and exported to environment by sourcing
init.sh if on a local host. If on PCF, be sure to set the environment variables
'SLACK_BOT_NAME' and 'SLACK_BOT_TOKEN'
'''
BOT_NAME = os.environ.get('SLACK_BOT_NAME')
SLACK_BOT_TOKEN = os.environ.get('SLACK_BOT_TOKEN')
READ_WEBSOCKET_DELAY = .1 # delay in seconds between reading from firehose
env = AppEnv()
parser = argparse.ArgumentParser()
parser.add_argument(
'-v',
'--verbose',
help='add debug messages to log output',
action='store_true')
args = parser.parse_args()
# set up log setting
if args.verbose:
logLevel = logging.DEBUG
else:
logLevel = logging.INFO
'''
Setting environment specific logger settings - log to file if not using CF
'''
if env.name is None:
BOT_HOME = os.environ.get('BOT_HOME')
envHandler = logging.FileHandler("{}/{}.log".format(BOT_HOME, 'KarmaBoi'))
else:
envHandler = logging.StreamHandler()
logging.basicConfig(
level=logLevel,
handlers=[envHandler],
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s')
logger = logging.getLogger(__name__)
'''
Using flask as a simple server - provides a quick status check, and, more
importantly, allows it to work on Cloud Foundry by providing socket listening
'''
app = Flask(__name__)
port = int(os.getenv("PORT", 8080))
def bot_id(BOT_NAME, sc):
user_obj = sc.api_call('users.list')
if user_obj.get('ok'):
# retrieve all users so we can find our bot
users = user_obj.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
BOT_ID = user.get('id')
logger.debug('Returned Bot ID {}'.format(BOT_ID))
return BOT_ID
else:
logger.critical(
"API call failed - please ensure your token and bot name are valid"
)
return NULL
@app.route('/')
def status():
return 'I\'m alive!'
def webMain():
app.run(host='0.0.0.0', port=port)
logger.info('Listening on port {}'.format(port))
def botMain():
try:
logger.info('Checking DB connection...')
db = dbopts.db_connect()
db.close()
if env.name is not None:
try:
logger.debug('connection appears successful, checking tables')
dbopts.check_tables()
except Exception as e:
try:
logger.warning(
'tables may not exist, attempting to create them now')
logger.warning('Exceptoin: {}'.format(e))
dbopts.create_karma_table()
dbopts.create_also_table()
except Exception as e:
logger.exception('Could not create tables: ')
except Exception as e:
logger.critical('DB connection not successful, exiting')
logger.exception('Failed on:')
exit(1)
logger.info('DB connection successful')
# connect to channel and do things
attempt = 0
MAX_ATTEMPTS = 500
'''
we'll try to connect/recover after a failure for MAX_ATTEMPTS times - this
should probably be changed into separate connection vs. failure/recovery
attempts later.
Probably should always attempt to recover after broken pipe (or, recover
so many times wthin some time period), but stop general errors after a
number of attempts.
'''
while True:
sc = SlackClient(SLACK_BOT_TOKEN)
kcache = TimedCache()
if sc.rtm_connect():
logger.info('KarmaBoi connected')
BOT_ID = bot_id(BOT_NAME, sc)
try:
logger.info('now doing ~important~ things')
while True:
slack_parse.triage(sc, BOT_ID, kcache)
time.sleep(READ_WEBSOCKET_DELAY)
except BrokenPipeError as e:
logger.error('connection failed with Broken Pipe')
logger.error(e)
logger.error('retrying connection in a few seconds...')
time.sleep(5)
except Exception as e:
logger.error('Connection failed with some other error')
logger.exception(e)
logger.error('trying to restore bot in a few seconds')
time.sleep(5)
else:
logger.critical(
'general bot error: now ending this short life')
logger.exception("Error found:")
break
attempt += 1
logger.warning('Attempt number {} of {}'.format(attempt, MAX_ATTEMPTS))
logger.critical('too many failed attempts - shutting down')
if __name__ == "__main__":
s = threading.Thread(name='slack_bot', target=botMain)
f = threading.Thread(name='flask_server', target=webMain)
s.start()
f.start()
|
run.py | import multiprocessing as mp
import os, sys, re, time, copy
import configparser
import textwrap
import mentionbot.mentionbot as botentry
ini_file_name = "config.ini"
# These two regexes must both be used to verify a folder name.
re_dirname_fullmatch = re.compile("[a-z0-9_-]+") # This must be full-matched.
re_dirname_once = re.compile("[a-z0-9]") # There must be at least one match.
reconnect_on_error = None
bot_user_token = None
# TODO: Allow user customization of the cache directory's location.
config_defaults = {
"DEFAULT": {
"bot_user_token": "PLACEHOLDER",
"bot_owner_id": "PLACEHOLDER",
},
"error_handling": {
"kill_bot_on_message_exception": "FALSE",
"reconnect_on_error": "TRUE",
"message_bot_owner_on_error": "TRUE",
},
"api_keys": {
"plotly_api_key": "PLACEHOLDER",
"plotly_username": "PLACEHOLDER",
"wolfram_alpha": "PLACEHOLDER",
},
"filenames": {
"cache_folder": "cache",
},
"misc": {
"default_command_prefix": "/",
"message_bot_owner_on_init": "TRUE",
"default_status": "bot is running",
"initialization_status": "bot is initializing",
},
}
# TODO: Consider merging this with the bot utils.py version.
accepted_true_strings = {"true", "yes", "y", "on", "1", "set", "ye" }
accepted_false_strings = {"false", "no", "n", "off", "0", "clear", "clr"}
# Returns a dictionary object on a successful parse.
# Otherwise, returns None if no login key was found.
def ini_load():
ret = None
config = configparser.ConfigParser()
if os.path.isfile(ini_file_name):
# Parse the config file.
config.read(ini_file_name)
# Check if the login data is present.
missing_login = False
try:
config["DEFAULT"]["bot_user_token"] # Attempt an access
except KeyError as e:
missing_login = True
# Fill in default values.
write_back = False
for (k, v) in config_defaults.items():
if k in config:
section = config[k]
for (k2, v2) in config_defaults[k].items():
if not k2 in section:
section[k2] = v2
write_back = True
else:
config[k] = v
write_back = True
# Write back to the file if necessary.
if write_back:
with open(ini_file_name, "w") as f:
config.write(f)
if not missing_login:
# Makes a deep copy, except with dictionaries.
ret = {k: {k2: v2 for (k2, v2) in v.items()} for (k, v) in config.items()}
else:
# Set up a new file.
config.read_dict(config_defaults)
with open(ini_file_name, "w") as f:
config.write(f)
return ret
# Converts strings to bools, numbers, dates, etc, as necessary.
# Additionally, does validation by raising exceptio.
# Returns nothing, but raises an exception if an error is found.
def ini_parse(config_dict):
must_be_bool = [
("error_handling", "kill_bot_on_message_exception"),
("error_handling", "reconnect_on_error"),
("error_handling", "message_bot_owner_on_error"),
("misc", "message_bot_owner_on_init"),
]
def convert_to_bool(key1, key2):
assert isinstance(key1, str) and isinstance(key2, str)
val = config_dict[key1][key2]
assert isinstance(val, str)
val = val.lower()
if val in accepted_true_strings:
val = True
elif val in accepted_false_strings:
val = False
else:
buf = "{} {} in {} must be 'TRUE' or 'FALSE'."
raise ValueError(buf.format(key1, key2, ini_file_name))
config_dict[key1][key2] = val
return
for (key1, key2) in must_be_bool:
convert_to_bool(key1, key2)
assert isinstance(config_dict[key1][key2], bool)
# Check cache folder name.
fname = config_dict["filenames"]["cache_folder"]
if not re_dirname_once.search(fname):
raise ValueError("Cache folder name must have at least one lowercase or digit.")
if not re_dirname_fullmatch.fullmatch(fname):
raise ValueError("Cache folder name must only be made of up lowercase, digits, underscores, or dashes.")
return
def run():
print("Reading config.ini settings...\n")
config_dict = ini_load()
if config_dict is None:
buf = textwrap.dedent("""
This appears to be your first time setting up this bot.
Please edit the following items in in config.ini before relaunching:
bot_user_token
bot_owner_id
Optionally, also fill in the other placeholders to enable further \
functionality.
""").strip()
print(buf)
return
ini_parse(config_dict)
reconnect_on_error = config_dict["error_handling"]["reconnect_on_error"]
while True:
config_dict_copy = copy.deepcopy(config_dict)
proc = mp.Process(target=botentry.run, daemon=True, args=(config_dict_copy,))
proc.start()
proc.join()
ret = proc.exitcode
print("Bot terminated. Return value: " + str(ret))
if ret == 0:
print("Bot has completed execution.")
return
if not reconnect_on_error:
print("reconnect_on_error is disabled.")
print("Bot has completed execution.")
return
print("Abnormal exit. Reconnecting in 10 seconds.")
time.sleep(10)
print("Attempting to reconnect...")
if __name__ == '__main__':
run()
|
check_system.py | import threading
from tkinter import *
from tkinter import messagebox
from tkinter.ttk import *
from tkinter import ttk
import time
import json
from PIL import ImageTk, Image
import requests
lista_opciones = list()
lista_opciones.append(("1_morjarse_manos"))
lista_opciones.append(("2_aplique_jabon"))
lista_opciones.append(("3_palma_con_palma"))
lista_opciones.append(("4_detras_manos"))
lista_opciones.append(("5_entre_dedos"))
lista_opciones.append(("6_detras_dedos"))
lista_opciones.append(("7_pulgares"))
lista_opciones.append(("8_unas"))
lista_opciones.append(("9_munecas"))
lista_opciones.append(("10_enjuaga_seca"))
def get(url):
DATA = requests.get(url, params={}, timeout=10)
status_code = DATA.status_code
text = DATA.text
return status_code, text
def proceso():
while True:
time.sleep(2)
if ip_guardada.get():
try:
url = 'http://' + txt.get() + ':5006/move'
rta = get(url)
if rta[1].find("move"):
if json.loads(rta[1])['move'] == 1:
hay_movimiento.set(True)
lbl_move.configure(image=img_move)
lbl_move.image = img_move
else:
hay_movimiento.set(False)
lbl_move.configure(image=img_no_move)
lbl_move.image = img_no_move
acc_move.set("Hay movimiento ({})".format(json.loads(rta[1])['acc']))
except:
print('Info', 'No se encontró el microservicio 5006')
try:
url = 'http://' + txt.get() + ':5004/mirror'
rta = get(url)
if rta[1].find("near"):
uuid_cardholder.set([k for k in json.loads(rta[1])['near'].keys()][0])
except:
print('Info', 'No se encontró el microservicio 5004')
try:
url = 'http://' + txt.get() + ':5004/names'
rta = get(url)
if len(rta[1]) > 10:
OBJ = json.loads(rta[1])
for k, v in OBJ.items():
if uuid_cardholder.get() == k:
name_person.set(v)
except:
print('Info', 'No se encontró el microservicio 5004')
try:
url = 'http://' + txt.get() + ':5003/topics'
rta = get(url)
OBJ = json.loads(rta[1])
keys = [k for k in OBJ.keys()]
if len(keys) > 1:
hay_conexion_mqtt.set(True)
lbl_mqtt.configure(image=mqtt_ON)
lbl_move.image = mqtt_ON
acc_mqtt.set("Hay conexion mqtt ({})".format("SI"))
else:
hay_conexion_mqtt.set(False)
lbl_mqtt.configure(image=mqtt_OFF)
lbl_move.image = mqtt_OFF
acc_mqtt.set("Hay conexion mqtt ({})".format("NO"))
except:
print('Info', 'No se encontró el microservicio 5003')
else:
print('Info', 'Configure IP')
t = threading.Thread(target=proceso)
t.start()
window = Tk()
window.title("Manitor app")
window.geometry('450x400')
window.resizable(False, False)
def clicked_btn_save_ip():
print("command", txt.get())
hay_movimiento.set(False)
name_person.set("")
ip_guardada.set(True)
messagebox.showinfo('Info', 'IP guardada correctamente')
def clicked_send_audiovisual():
rta = combo.get()
id = int(rta[:rta.find("_")])
print("command", id)
try:
url = 'http://' + txt.get() + ':5005/mostrar?id=' + str(id) + "&name=" + name_person.get()
rta = get(url)
except:
print('Info', 'No se encontró el microservicio 5005')
try:
url = 'http://' + txt.get() + ':5002/reproduce?id=' + str(id)
rta = get(url)
except:
print('Info', 'No se encontró el microservicio 5002')
def cambio_opcion(event):
print("New Element Selected", combo.get())
tab_control = ttk.Notebook(window)
tab1 = ttk.Frame(tab_control)
tab2 = ttk.Frame(tab_control)
tab_control.add(tab1, text='First')
tab_control.add(tab2, text='Second')
# primera pestaña
txt = Entry(tab1)
txt.place(width=200,height=50)
txt.grid(column=1, row=1)
txt.focus()
ip_guardada = BooleanVar()
btn = Button(tab1, text="Guardar ip", command=clicked_btn_save_ip).grid(column=2, row=1)
# segunda pestaña
img_move = ImageTk.PhotoImage(Image.open("mano_move.png").resize((200, 200), Image.ANTIALIAS))
img_no_move = ImageTk.PhotoImage(Image.open("mano_NO_move.png").resize((200, 200), Image.ANTIALIAS))
lbl_move = Label(tab2, image=img_no_move)
lbl_move.grid(column=1, row=3)
mqtt_ON = ImageTk.PhotoImage(Image.open("mqtt_ON.png").resize((200, 200), Image.ANTIALIAS))
mqtt_OFF = ImageTk.PhotoImage(Image.open("mqtt_off.png").resize((200, 200), Image.ANTIALIAS))
lbl_mqtt = Label(tab2, image=mqtt_OFF)
lbl_mqtt.grid(column=2, row=3)
hay_movimiento = BooleanVar()
acc_move = StringVar()
acc_move.set("Hay movimiento")
chk = Checkbutton(tab2, textvar=acc_move, var=hay_movimiento)
chk.grid(column=1, row=4)
hay_conexion_mqtt = BooleanVar()
acc_mqtt = StringVar()
acc_mqtt.set("Hay conexion mqtt")
chk_2 = Checkbutton(tab2, textvar=acc_mqtt, var=hay_conexion_mqtt)
chk_2.grid(column=2, row=4)
combo = Combobox(tab2)
combo['values'] = lista_opciones
combo.current(0)
combo.grid(column=1, row=5)
combo.bind("<<ComboboxSelected>>", cambio_opcion)
btn2 = Button(tab2, text="Activar audiovisual", command=clicked_send_audiovisual).grid(column=2, row=5)
uuid_cardholder = StringVar()
lbl = Label(tab2, textvariable=uuid_cardholder)
lbl.grid(column=1, row=6)
name_person = StringVar()
lbl2 = Label(tab2, textvariable=name_person)
lbl2.grid(column=2, row=6)
tab_control.pack(expand=1, fill='both')
window.mainloop()
|
client.py | import grpc
import logging
import queue
import threading
import uuid
from clients.reference.app.battleships_pb2 import Attack, Request, Response, Status
from clients.reference.app.battleships_pb2_grpc import BattleshipsStub
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Battleship:
# The gRPC turn types mapped onto handler method names
RESPONSES = {
Response.State.BEGIN: 'begin',
Response.State.START_TURN: 'start_turn',
Response.State.STOP_TURN: 'end_turn',
Response.State.WIN: 'win',
Response.State.LOSE: 'lose',
}
# The gRPC report states mapped onto handler method names
STATES = {
Status.State.MISS: 'miss',
Status.State.HIT: 'hit',
}
__supported_events = [
'begin', 'start_turn', 'end_turn', 'attack',
'hit', 'miss', 'win', 'lose'
]
def __init__(self, grpc_host='localhost', grpc_port='50051'):
self.__handlers = {}
self.__host = grpc_host
self.__port = grpc_port
self.__player_id = ''
self.__queue = queue.Queue()
self.__channel = None
self.__response_thread = None
def __del__(self):
if self.__channel is not None:
self.__channel.close()
def on(self, event=None):
"""A decorator that is used to register an event handler for a
given event. This does the same as :meth:`add_event_handler`
but is intended for decorator usage:
@client.on(event='attack')
def on_attack(vector):
pass
:param event: The event that the handler should listen for. If
this parameter is None, the event is inferred from
the handler's name. For instance, to add a handler
for `attack` messages, you can simply write:
@client.on()
def attack(vector):
pass
Handlers that are supported are `begin`, `start_turn`,
`end_turn`, `attack`, `hit`, `miss`, `defeat`.
"""
def decorator(f):
self.add_event_listener(event, f)
return f
return decorator
def add_event_listener(self, event=None, handler=None):
"""Method that is used to register an event handler for a
given event. See :meth:`on` for a detailed explanation.
:param event: Event to register handler for
:param handler: Handler for event
"""
if event is None:
event = handler.__name__
if event not in self.__supported_events:
raise ValueError(f'Unable to register event {event}!')
logger.info(f'Registering {handler.__name__} for event "{event}"')
self.__handlers[event] = handler
def join(self):
"""This method sets up the client for sending and receiving gRPC
messages to the server. It then sends a join message to the game
server to indicate we are ready to play a new game.
"""
self.__player_id = str(uuid.uuid4())
logger.info(f'New player: {self.__player_id}')
self.__channel = grpc.insecure_channel(f'{self.__host}:{self.__port}')
stub = BattleshipsStub(self.__channel)
responses = stub.Game(self.__stream())
self.__response_thread = threading.Thread(
target=lambda: self.__receive_responses(responses))
self.__response_thread.daemon = True
self.__response_thread.start()
# Everything's set up, so we can now join a game
self.__send(Request(join=Request.Player(id=self.__player_id)))
def __send(self, msg):
"""Convience method that places a message in the queue for
transmission to the game server.
"""
self.__queue.put(msg)
def attack(self, vector):
"""This method sends an Attack message with the associated vector
to the game server. This method does not do any validation on the
provided vector, other than that is must be a string. It is up to
the caller to determine what the vector should look like.
:param vector: Vector to send to game server, e.g., "G4"
:raise ValueError: if vector is None or not a string
"""
if vector is None or type(vector) is not int:
raise ValueError('Parameter vector must be a integer!')
self.__send(Request(move=Attack(vector=vector)))
def hit(self):
"""This method indicates to the game server that the received
attack was a HIT. Oh no!
"""
self.__send(Request(report=Status(state=Status.State.HIT)))
def miss(self):
"""This method indicates to the game server that the received
attack was a MISS. Phew!
"""
self.__send(Request(report=Status(state=Status.State.MISS)))
def defeat(self):
"""This method indicates to the game serve that the received
attack was a HIT, which sunk the last of the remaining ships.
In other words: Game Over. Too bad.
"""
self.__send(Request(report=Status(state=Status.State.DEFEAT)))
def __stream(self):
"""Return a generator of outgoing gRPC messages.
:return: a gRPC message generator
"""
while True:
s = self.__queue.get()
if s is not None:
logger.info(f'{self.__player_id} - Sending {s}')
yield s
else:
return
def __receive_responses(self, in_stream):
"""Receive response from the gRPC in-channel.
:param in_stream: input channel to handle
"""
while True:
try:
response = next(in_stream)
logger.info(f'{self.__player_id} - Received {response}')
self.__handle_response(response)
except StopIteration:
return
def __handle_response(self, msg):
"""This method handles the actual response coming from the game
server.
:param msg: Message received from the game server
"""
which = msg.WhichOneof('event')
if which == 'turn':
if msg.turn in self.RESPONSES:
self.__exc_callback(self.RESPONSES[msg.turn])
else:
logger.error('Response contains unknown state!')
elif which == 'move':
self.__exc_callback('attack', msg.move.vector)
elif which == 'report':
if msg.report.state in self.STATES:
self.__exc_callback(self.STATES[msg.report.state])
else:
logger.error('Report contains unknown state!')
else:
logger.error('Got unknown response type!')
def __exc_callback(self, *args):
"""Convenience method that calls the appropriate callback
function if it has been registered.
"""
cmd = args[0]
if cmd in self.__handlers:
if len(args) == 1:
self.__handlers[cmd]()
else:
self.__handlers[cmd](args[1:])
|
tests.py | # -*- coding: utf-8 -*-
import multiprocessing
import os
import re
import threading
import time
import unittest
from lua_sandbox.executor import LuaException
from lua_sandbox.executor import LuaInvariantException
from lua_sandbox.executor import LuaOutOfMemoryException
from lua_sandbox.executor import LuaSyntaxError
from lua_sandbox.executor import SandboxedExecutor
from lua_sandbox.executor import check_stack
from lua_sandbox.executor import _executor
from lua_sandbox.executor import Capsule
class SimpleSandboxedExecutor(object):
def __init__(self, name=None, **kw):
self.lua = SandboxedExecutor(name=name, **kw)
def execute(self, program, env={}):
loaded = self.lua.sandboxed_load(program)
for k, v in env.items():
self.lua.sandbox[k] = v
return tuple(x.to_python() for x in loaded())
def skip_if_luajit(fn):
if _executor.LUA_VERSION_NUM == 501:
return unittest.skip("unsupported on luajit")(fn)
return fn
def only_on_luajit(fn):
if _executor.LUA_VERSION_NUM != 501:
return unittest.skip("only supported on luajit")(fn)
return fn
class TestLuaExecution(unittest.TestCase):
def setUp(self, *a, **kw):
self.ex = SimpleSandboxedExecutor(name=self.id(),
max_memory=None)
def test_basics1(self):
program = """
return 1
"""
self.assertEqual(self.ex.execute(program, {}),
(1.0,))
def test_basics2(self):
program = """
return a, b, a+b
"""
self.assertEqual(self.ex.execute(program, {'a': 1, 'b': 2}),
(1.0, 2.0, 3.0))
def test_basics3(self):
program = """
foo = {}
while #foo < 5 do
foo[#foo+1] = #foo+1
end
return foo
"""
self.assertEqual(self.ex.execute(program, {}),
({1.0: 1.0,
2.0: 2.0,
3.0: 3.0,
4.0: 4.0,
5.0: 5.0},))
def test_check_stack(self):
# we rely on the @check_stack decorator a lot to detect stack leaks, so
# make sure it at least works
def _fn(executor):
executor.create_table()._bring_to_top(False)
with self.assertRaises(LuaException):
check_stack(0, 0)(_fn)(self.ex.lua)
def test_parse_error(self):
program = "()code"
with self.assertRaises(LuaSyntaxError):
self.ex.lua.load(program)
def test_serialize_deserialize(self):
program = """
return foo
"""
input_data = {
1: 2,
2: 3,
4: "abc",
5: ('a', 'b', 'c'),
5.5: ['a', 'b', 'c'],
6: 6.5,
7: None,
8: '',
9: True,
10: False,
11: {'a': {1: 2}}
}
expected_output = {
1.0: 2.0,
2.0: 3.0,
4.0: "abc",
5.0: {1.0: 'a', 2.0: 'b', 3.0: 'c'},
5.5: {1.0: 'a', 2.0: 'b', 3.0: 'c'},
6.0: 6.5,
# 7 disappears
8.0: '',
9.0: True,
10.0: False,
11.0: {'a': {1.0: 2.0}}
}
self.assertEqual(self.ex.execute(program,
{'foo': input_data}),
(expected_output,))
def test_serialize_unicode(self):
program = """
return data
"""
english = 'hello'
chinese = u'你好'
input_data = {english: chinese}
self.assertEqual(self.ex.execute(program,
{'data': input_data}),
({english: chinese.encode('utf8')},))
def test_no_weird_python_types(self):
program = """
return foo
"""
with self.assertRaises(TypeError):
self.ex.execute(program, {'foo': object()})
with self.assertRaises(ValueError):
# recursive structure
d = {}
d['foo'] = d
self.ex.execute(program, {'foo': d})
def test_capsule_return(self):
program = """
return capsule
"""
obj = object()
capsule = Capsule(obj)
ret = self.ex.execute(program, {'capsule': capsule})
self.assertIs(obj, ret[0])
def test_capsule_caches(self):
program = """
first_time = capsule.property
update_value()
second_time = capsule.property
return first_time, second_time
"""
d = {'property': 'foo'}
capsule = Capsule(d, cache=True)
def update_value():
d['property'] = 'bar'
ret = self.ex.execute(program, {'capsule': capsule,
'update_value': update_value})
self.assertEquals(ret, ('foo', 'foo'))
def test_capsule_no_caches(self):
program = """
first_time = capsule.property
update_value()
second_time = capsule.property
return first_time, second_time
"""
d = {'property': 'foo'}
capsule = Capsule(d, cache=False)
def update_value():
d['property'] = 'bar'
ret = self.ex.execute(program, {'capsule': capsule,
'update_value': update_value})
self.assertEquals(ret, ('foo', 'bar'))
def test_capsule_return_pass_arg(self):
success = []
orig = object()
capsule = Capsule(orig)
def _fn(cap):
success.append(cap)
return Capsule(cap)
program = """
return foo(capsule)
"""
ret = self.ex.execute(program, {'foo': _fn, 'capsule': capsule})
self.assertEqual(success, [orig])
self.assertEqual(ret, (orig,))
def test_capsule_index(self):
data = {'foo': 5, 'bar': {'baz': 10}, 'str1': 'str2'}
program = """
return data.foo, data.bar.baz, data.str1, data.notthere
"""
ret = self.ex.execute(program, {'data': Capsule(data)})
self.assertEqual(ret, (5.0, 10.0, 'str2', None))
def test_capsule_none(self):
program = "return data"
ret = self.ex.execute(program, {'data': Capsule(None)})
self.assertEqual(ret, (None,))
def test_capsule_lazy(self):
loaded = self.ex.lua.sandboxed_load("""
return data.more_data
""")
self.ex.lua.sandbox['data'] = Capsule(dict(
data=1,
more_data="string",
# if the capsule isn't lazy, this will keep it from being
# passed in
something_non_serialiseable=object(),
))
result = [x.to_python() for x in loaded()]
self.assertEqual(result, ["string",])
def test_function_noargs(self):
program = """
return foo()
"""
ret = self.ex.execute(program, {'foo': lambda: 5})
self.assertEqual((5.0,), ret)
def test_function_passing(self):
closed = []
def closure(argument1, argument2):
closed.append(argument1)
closed.append(argument2)
return argument1 ** argument2, argument2 ** argument1
program = """
a = 1+3
return foo(2, 3)
"""
ret = self.ex.execute(program, {'foo': closure})
self.assertEqual(({1.0: 8.0, 2.0: 9.0},), ret)
self.assertEqual([2.0, 3.0], closed)
def test_function_args(self):
program = """
local function multiplier(a, b)
return a*b
end
return multiplier
"""
loaded = self.ex.lua.load(program)
func = loaded()[0]
multiplied = func(3, 7)
self.assertEqual([21.0], [x.to_python() for x in multiplied])
def test_method_passing(self):
class MyObject(object):
def double(self, x):
return x*2
program = """
return doubler(4)
"""
ret = self.ex.execute(program, {'doubler': MyObject().double})
self.assertEqual((8.0,), ret)
def test_regular_exception(self):
program = """
error("I'm an error!")
"""
try:
self.ex.execute(program)
except LuaException as e:
self.assertEqual(str(e), 'LuaStateException(\'[string "Lua"]:2: I\\\'m an error!\')')
else:
self.assertTrue(False)
def test_number_exception(self):
program = """
error(3.14159)
"""
try:
self.ex.execute(program)
except LuaException as e:
self.assertEqual(str(e), 'LuaStateException(\'[string "Lua"]:2: 3.14159\')')
# lua doesn't thread the original number back, it coerces to a
# string
self.assertEqual(e.lua_value.to_python(), '[string "Lua"]:2: 3.14159')
else:
self.assertTrue(False)
def test_table_exception(self):
program = """
error({['whole message']= 'this is my message'})
"""
try:
self.ex.execute(program)
except LuaException as e:
self.assertEqual(e.lua_value.to_python(), {
'whole message': 'this is my message',
})
else:
self.assertTrue(False)
def test_pyfunction_exception(self):
program = """
return foo("hello")
"""
class MyException(Exception): pass
def bad_closure(x):
raise MyException("nuh uh")
try:
self.ex.execute(program, {'foo': bad_closure})
except LuaException as e:
self.assertIsInstance(e.lua_value.to_python(), MyException)
self.assertIsInstance(e.__cause__, MyException)
else:
self.assertTrue(False)
def test_pyobject_exception(self):
obj = object()
program = """
error(foo)
"""
try:
self.ex.execute(program, {'foo': Capsule(obj)})
except LuaException as e:
self.assertIs(e.lua_value.to_python(), obj)
else:
self.assertTrue(False)
def test_assertions(self):
program = """
assert(false)
"""
with self.assertRaises(LuaException):
self.ex.execute(program)
program = """
error("nuh uh")
"""
with self.assertRaises(LuaException):
self.ex.execute(program)
def test_setitem(self):
program = "return x"
# round trip
self.ex.lua['x'] = 5
self.assertEqual(self.ex.lua['x'].to_python(), 5.0)
# nils
self.assertEqual(self.ex.lua['bar'].type_name(), 'nil')
self.assertEqual(self.ex.lua['bar'].to_python(), None)
# loaded code can get to it
loaded = self.ex.lua.load(program)
returns = [x.to_python() for x in loaded()]
self.assertEqual(returns, [5.0])
def test_createtable(self):
t = self.ex.lua.create_table()
t['foo'] = 5
self.assertEquals(t['foo'].to_python(), 5.0)
self.assertEquals(t['bar'].type_name(), 'nil')
self.assertEquals(t['bar'].to_python(), None)
self.assertTrue(t['bar'].is_nil())
with self.assertRaises(TypeError):
x = t['foo']['bar']
with self.assertRaises(TypeError):
t['foo']['bar'] = 5
def test_buffer_interface(self):
# we can get a buffer into a Lua string and, and re.search works on
# that buffer
s = b""" return "this is my string" """
loaded = self.ex.lua.load(s)
lua_string, = loaded()
with lua_string.as_buffer() as buff:
self.assertEqual(re.search('my (string.*)', buff).groups(),
('string',))
self.assertEqual(len(buff), len('this is my string'))
# this is the regular callback way with lots of copies, here mostly for
# demonstration
def slow_search(pat, x):
result = re.search(pat, x)
if result:
return result.groups()
ret = self.ex.execute(""" return re.search("(string)", str)[1] """,
{
're': {'search': slow_search},
'str': 'this is my string'
})
self.assertEqual(ret, ('string',))
# and this is a faster callback which can avoid the copy on the `x`
# argument
def fast_search(pat, x):
pat = pat.to_python()
with x.as_buffer() as buff:
result = re.search(pat, buff)
if result:
# this does still copy if there is a match
return result.groups()
ret = self.ex.execute(""" return re.search("(string)", str)[1] """,
{
're': {
'search': Capsule(fast_search,
raw_lua_args=True),
},
'str': 'this is my string'
})
self.assertEqual(ret, ('string',))
class TestSafeguards(TestLuaExecution):
def setUp(self, *a, **kw):
self.ex = SimpleSandboxedExecutor(name=self.id(),
max_memory=5*1024*1024)
def test_memory(self):
def _tester(program):
start_time = time.time()
with self.assertRaises(LuaOutOfMemoryException):
self.ex.execute(program)
self.assertLess(time.time()-start_time, 1.1)
_tester("""
foo = {}
while #foo < 500000000 do
foo[#foo+1] = 1
end
return 1
""")
def test_memory_used(self):
self.ex.lua['some_var'] = '*'*(1024*1024)
self.assertGreater(self.ex.lua.memory_used, 1024*1024)
def test_timeout(self):
def _tester(program):
start_time = time.time()
with self.assertRaises(LuaException):
with self.ex.lua.limit_runtime(0.5, disable_jit=True):
self.ex.execute(program)
self.assertLess(time.time()-start_time, 0.7)
_tester("""
foo = {}
while true do
end
return 1
""")
with self.ex.lua.limit_runtime(1.0, disable_jit=True):
# make sure the limiter doesn't just always trigger
self.ex.execute("return 5")
def test_no_print(self):
# make sure we didn't pass any libraries to the client program
program = """
print(foo)
return 0
"""
with self.assertRaises(LuaException):
self.ex.execute(program, {'foo':0})
@unittest.skip("allowing this for now")
def test_no_patterns(self):
# there are some lua pattern operations you can do that are super slow,
# so we block them entirely in the SimpleSandboxingExecutor
program = """
return string.find(("a"):rep(1e4), ".-.-.-.-b$")
"""
started = time.time()
with self.assertRaises(LuaException):
self.ex.execute(program)
self.assertLess(time.time() - started, 1.0)
def test_have_good_libs(self):
# make sure we did pass the libraries that are okay
program = """
math.abs(-1)
table.sort({})
return foo
"""
self.ex.execute(program, {'foo': 0})
@skip_if_luajit
def test_luajit_not_present(self):
lua = self.ex.lua
self.assertEqual(lua.jit_mode(), None)
@only_on_luajit
def test_luajit_present(self):
lua = self.ex.lua
self.assertNotEqual(lua.jit_mode(), None)
@only_on_luajit
def test_luajit_mode(self):
lua = self.ex.lua
lua.jit_mode().compiler_mode(True)
lua.jit_mode().compiler_mode(False)
lua.jit_mode().flush_compiler()
class TestReusingExecutor(TestLuaExecution):
def __init__(self, *a, **kw):
unittest.TestCase.__init__(self, *a, **kw)
self.ex = SimpleSandboxedExecutor()
def setUp(self):
pass
if __name__ == '__main__':
if os.environ.get('LEAKTEST', False):
from pympler import tracker
tr = tracker.SummaryTracker()
def _fn():
for _ in range(10):
unittest.main(verbosity=0, exit=False)
while True:
threads = []
for _ in range(multiprocessing.cpu_count()):
t = threading.Thread(target=_fn)
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join()
del t
del threads
tr.print_diff()
else:
unittest.main()
|
Wallet.py | #!/usr/bin/env python3
##########################################
# Duino-Coin Tkinter GUI Wallet (v2.52)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
import sys
from base64 import b64decode, b64encode
from configparser import ConfigParser
from datetime import datetime
from json import loads
from json import loads as jsonloads
from locale import getdefaultlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import path, system
from pathlib import Path
from socket import socket
from sqlite3 import connect as sqlconn
import subprocess
from threading import Thread, Timer
from time import sleep, time
from tkinter import (BOTH, END, LEFT, RIGHT, Button, Checkbutton, E, Entry,
Frame, IntVar, Label, Listbox, N, PhotoImage, S,
Scrollbar, StringVar, Tk, Toplevel, W, messagebox, ttk)
from tkinter.font import Font
from urllib.request import urlopen, urlretrieve
from webbrowser import open_new_tab
from requests import get
# Version number
VERSION = 2.52
# Colors
BACKGROUND_COLOR = "#121212"
FONT_COLOR = "#fffdee"
FOREGROUND_COLOR = "#ff9f43"
FOREGROUND_COLOR_SECONDARY = "#fdcb6e"
# Minimum transaction amount to be saved
MIN_TRANSACTION_VALUE = 0.00000000001
# Minimum transaction amount to show a notification
MIN_TRANSACTION_VALUE_NOTIFY = 0.5
# Resources folder location
resources = "Wallet_" + str(VERSION) + "_resources/"
ENCRYPTION_ITERATIONS = 100_000
config = ConfigParser()
wrong_passphrase = False
global_balance = 0
oldbalance = 0
balance = 0
unpaid_balance = 0
profitCheck = 0
curr_bal = 0
WS_URI = "wss://server.duinocoin.com:15808"
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
execl(sys.executable, sys.executable, *sys.argv)
def get_duco_price():
global duco_fiat_value
jsonapi = get(
"https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duco-statistics/master/"
+ "api.json",
data=None)
if jsonapi.status_code == 200:
try:
content = jsonapi.content.decode()
contentjson = loads(content)
duco_fiat_value = round(float(contentjson["Duco price"]), 4)
except Exception:
duco_fiat_value = 0.003
else:
duco_fiat_value = 0.003
Timer(30, get_duco_price).start()
def title(title):
if osname == "nt":
system("title " + title)
else:
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def _derive_key(
password: bytes,
salt: bytes,
iterations: int = ENCRYPTION_ITERATIONS) -> bytes:
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=ENCRYPTION_ITERATIONS,
backend=backend)
return b64e(kdf.derive(password))
def password_encrypt(
message: bytes,
password: str,
iterations: int = ENCRYPTION_ITERATIONS) -> bytes:
salt = secrets.token_bytes(16)
key = _derive_key(
password.encode(),
salt,
ENCRYPTION_ITERATIONS)
return b64e(
b"%b%b%b" % (
salt,
ENCRYPTION_ITERATIONS.to_bytes(4, "big"),
b64d(Fernet(key).encrypt(message))))
def password_decrypt(
token: bytes,
password: str) -> bytes:
decoded = b64d(token)
salt, ENCRYPTION_ITERATIONS, token = decoded[:16], decoded[16:20], b64e(
decoded[20:])
ENCRYPTION_ITERATIONS = int.from_bytes(ENCRYPTION_ITERATIONS, "big")
key = _derive_key(
password.encode(),
salt,
ENCRYPTION_ITERATIONS)
return Fernet(key).decrypt(token)
def get_string(string_name):
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def openTos(handler):
open_new_tab("https://github.com/revoxhere/duino-coin#terms-of-usage")
def openGitHub(handler):
open_new_tab("https://github.com/revoxhere/duino-coin")
def openWebsite(handler):
open_new_tab("https://duinocoin.com")
def openExchange(handler):
open_new_tab("https://revoxhere.github.io/duco-exchange/")
def openDiscord(handler):
open_new_tab("https://discord.com/invite/kvBkccy")
def openTransaction(hashToOpen):
open_new_tab("https://explorer.duinocoin.com/?search="+str(hashToOpen))
class LoginFrame(Frame):
def __init__(self, master):
super().__init__(master)
master.title("Login")
master.resizable(False, False)
TEXT_FONT_BOLD = Font(size=12, weight="bold")
TEXT_FONT = Font(size=12, weight="normal")
self.duco = ImageTk.PhotoImage(Image.open(resources + "duco.png"))
self.duco.image = self.duco
self.ducoLabel = Label(
self, background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
image=self.duco)
self.ducoLabel2 = Label(
self,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("welcome_message"),
font=TEXT_FONT_BOLD)
self.spacer = Label(self)
self.label_username = Label(
self,
text=get_string("username"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
padx=5)
self.label_password = Label(
self,
text=get_string("passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
padx=5)
self.entry_username = Entry(
self,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
self.entry_password = Entry(
self,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
self.ducoLabel.grid(
row=0,
sticky="nswe",
pady=(5, 0),
padx=(5))
self.ducoLabel2.grid(
row=1,
sticky="nswe",
padx=(5))
self.label_username.grid(
row=4,
sticky=W,
pady=(5, 0))
self.entry_username.grid(
row=5,
sticky=N,
padx=(5))
self.label_password.grid(
row=6,
sticky=W)
self.entry_password.grid(
row=7,
sticky=N)
self.logbtn = Button(
self,
text=get_string("login"),
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
command=self._login_btn_clicked,
font=TEXT_FONT_BOLD)
self.logbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5),
pady=(5, 1))
self.regbtn = Button(
self,
text=get_string("register"),
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
command=self._register_btn_clicked,
font=TEXT_FONT_BOLD)
self.regbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5),
pady=(0, 5))
self.configure(background=BACKGROUND_COLOR)
self.master.bind(
"<Return>",
self._login_btn_clicked_bind)
self.pack()
def _login_btn_clicked_bind(self, event):
self._login_btn_clicked()
def _login_btn_clicked(self):
global username, password
username = self.entry_username.get()
password = self.entry_password.get()
if username and password:
soc = websocket.create_connection(WS_URI)
soc.recv()
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
response = soc.recv().rstrip("\n")
response = response.split(",")
if response[0] == "OK":
passwordEnc = b64encode(bytes(password, encoding="utf8"))
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute(
"""INSERT INTO
UserData(username, password, useWrapper)
VALUES(?, ?, ?)""",
(username, passwordEnc, "False"))
con.commit()
root.destroy()
else:
messagebox.showerror(
title=get_string("login_error"),
message=response[1])
else:
messagebox.showerror(
title=get_string("login_error"),
message=get_string("fill_the_blanks_warning"))
def _registerprotocol(self):
emailS = email.get()
usernameS = username.get()
passwordS = password.get()
confpasswordS = confpassword.get()
if emailS and usernameS and passwordS and confpasswordS:
if passwordS == confpasswordS:
soc = websocket.create_connection(WS_URI)
soc.recv()
soc.send(
bytes(
"REGI,"
+ str(usernameS)
+ ","
+ str(passwordS)
+ ","
+ str(emailS),
encoding="utf8"))
response = soc.recv().rstrip("\n")
response = response.split(",")
if response[0] == "OK":
messagebox.showinfo(
title=get_string("registration_success"),
message=get_string("registration_success_msg"))
register.destroy()
execl(sys.executable, sys.executable, *sys.argv)
else:
messagebox.showerror(
title=get_string("register_error"),
message=response[1])
else:
messagebox.showerror(
title=get_string("register_error"),
message=get_string("error_passwd_dont_match"))
else:
messagebox.showerror(
title=get_string("register_error"),
message=get_string("fill_the_blanks_warning"))
def _register_btn_clicked(self):
global username, password, confpassword, email, register
root.destroy()
register = Tk()
register.title(get_string("register"))
register.resizable(False, False)
TEXT_FONT_BOLD = Font(
register,
size=12,
weight="bold")
TEXT_FONT = Font(
register,
size=12,
weight="normal")
tos_warning = get_string("register_tos_warning")
import textwrap
tos_warning = textwrap.dedent(tos_warning)
tos_warning = "\n".join(l for line in tos_warning.splitlines()
for l in textwrap.wrap(line, width=20))
duco = ImageTk.PhotoImage(Image.open(resources + "duco.png"))
duco.image = duco
ducoLabel = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
image=duco)
ducoLabel.grid(
row=0,
padx=5,
pady=(5, 0),
sticky="nswe")
ducoLabel2 = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("register_on_network"),
font=TEXT_FONT_BOLD)
ducoLabel2.grid(row=1,
padx=5,
sticky="nswe")
def colorLabelBlue(handler):
ducoLabel3.configure(foreground="#6c5ce7")
def colorLabelNormal(handler):
ducoLabel3.configure(foreground=FONT_COLOR)
ducoLabel3 = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=tos_warning,
font=TEXT_FONT)
ducoLabel3.grid(
row=2,
padx=5,
sticky="nswe")
ducoLabel3.bind("<Button-1>", openTos)
ducoLabel3.bind("<Enter>", colorLabelBlue)
ducoLabel3.bind("<Leave>", colorLabelNormal)
Label(
register,
text=get_string("username").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=3,
sticky=W,
padx=5,
pady=(5, 0))
username = Entry(
register,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
username.grid(
row=4,
padx=5)
Label(
register,
text=get_string("passwd").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=5,
sticky=W,
padx=5)
password = Entry(
register,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
password.grid(
row=6,
padx=5)
Label(
register,
text=get_string("confirm_passwd").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=7,
sticky=W,
padx=5)
confpassword = Entry(
register,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
confpassword.grid(
row=8,
padx=5)
Label(
register,
text=get_string("email").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=9,
sticky=W,
padx=5)
email = Entry(
register,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
email.grid(
row=10,
padx=5)
self.logbtn = Button(
register,
text=get_string("register"),
activebackground=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
command=self._registerprotocol,
font=TEXT_FONT_BOLD)
self.logbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5, 5),
pady=(5, 5))
register.configure(background=BACKGROUND_COLOR)
def loading_window():
global loading, status
loading = Tk()
loading.resizable(False, False)
loading.configure(background=BACKGROUND_COLOR)
loading.title(get_string("loading"))
try:
loading.iconphoto(True,
PhotoImage(file=resources + "duco_color.png"))
except Exception:
pass
TEXT_FONT = Font(loading,
size=10,
weight="bold")
TEXT_FONT_BOLD = Font(loading,
size=14,
weight="bold")
original = Image.open(resources + "duco_color.png")
resized = original.resize((128, 128), Image.ANTIALIAS)
github = ImageTk.PhotoImage(resized)
github.image = github
githubLabel = Label(loading,
image=github,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
githubLabel.grid(row=0,
column=0,
sticky=N + S + E + W,
pady=(5, 0),
padx=(5))
Label(
loading,
text=get_string("duino_coin_wallet"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=1,
column=0,
sticky=S + W,
pady=(5, 0),
padx=5)
loading.update()
status = Label(
loading,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("loading_database"),
font=TEXT_FONT)
status.grid(
row=2,
column=0,
sticky=S + W,
pady=(0, 5),
padx=5)
loading.update()
def transactions_window(handler):
transactionsWindow = Toplevel()
transactionsWindow.resizable(False, False)
transactionsWindow.title(get_string("wallet_transactions"))
transactionsWindow.transient([root])
transactionsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD_LARGE = Font(
transactionsWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
transactionsWindow,
size=12,
weight="normal")
Label(
transactionsWindow,
text=get_string("transaction_list"),
font=TEXT_FONT_BOLD_LARGE,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR,
).grid(row=0,
column=0,
columnspan=2,
sticky=S + W,
pady=(5, 0),
padx=5)
Label(
transactionsWindow,
text=get_string("transaction_list_notice"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=1,
column=0,
columnspan=2,
sticky=S + W,
pady=(5, 0),
padx=5)
listbox = Listbox(
transactionsWindow,
width="35",
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
listbox.grid(
row=2,
column=0,
sticky=S + W + N + E,
padx=(5, 0),
pady=(0, 5))
scrollbar = Scrollbar(transactionsWindow,
background=BACKGROUND_COLOR)
scrollbar.grid(
row=2,
column=1,
sticky=N + S,
padx=(0, 5),
pady=(0, 5))
for i in gtxl:
listbox.insert(END, gtxl[i]["Sender"] + " to " + gtxl[i]
["Recipient"] + ": " + str(gtxl[i]["Amount"]) + " DUCO")
def get_selection(event):
try:
selection = listbox.curselection()[0]
openTransaction(gtxl[str(selection)]["Hash"])
except IndexError:
pass
listbox.bind("<Button-1>", get_selection)
listbox.config(yscrollcommand=scrollbar.set, font=TEXT_FONT)
scrollbar.config(command=listbox.yview)
def currency_converter_calc():
fromcurrency = fromCurrencyInput.get(fromCurrencyInput.curselection())
tocurrency = toCurrencyInput.get(toCurrencyInput.curselection())
amount = amountInput.get()
# TODO
value = duco_fiat_value * float(amount)
result = get_string("result") + ": " + str(round(value, 6))
conversionresulttext.set(str(result))
calculatorWindow.update()
def currency_converter_window(handler):
global conversionresulttext
global fromCurrencyInput
global toCurrencyInput
global amountInput
global calculatorWindow
calculatorWindow = Toplevel()
calculatorWindow.resizable(False, False)
calculatorWindow.title(get_string("wallet_calculator"))
calculatorWindow.transient([root])
calculatorWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD = Font(
calculatorWindow,
size=12,
weight="bold")
TEXT_FONT_BOLD_LARGE = Font(
calculatorWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
calculatorWindow,
size=12,
weight="normal")
Label(
calculatorWindow,
text=get_string("currency_converter"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=0,
columnspan=2,
column=0,
sticky=S + W,
pady=5,
padx=5)
Label(
calculatorWindow,
text=get_string("from"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=1,
column=0,
sticky=S + W,
padx=5)
fromCurrencyInput = Listbox(
calculatorWindow,
exportselection=False,
background=BACKGROUND_COLOR,
selectbackground=FOREGROUND_COLOR,
border="0",
font=TEXT_FONT,
foreground=FONT_COLOR,
width="20",
height="13",
)
fromCurrencyInput.grid(row=2,
column=0,
sticky=S + W,
padx=(5, 0))
fromCurrencyInput.insert(0, "DUCO")
vsb = Scrollbar(
calculatorWindow,
orient="vertical",
command=fromCurrencyInput.yview,
background=BACKGROUND_COLOR,
)
vsb.grid(row=2,
column=1,
sticky="ns",
padx=(0, 5))
fromCurrencyInput.configure(yscrollcommand=vsb.set)
fromCurrencyInput.select_set(0)
fromCurrencyInput.event_generate("<<ListboxSelect>>")
Label(
calculatorWindow,
text=get_string("to"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=1,
column=3,
columnspan=2,
sticky=S + W,
padx=5)
toCurrencyInput = Listbox(
calculatorWindow,
exportselection=False,
background=BACKGROUND_COLOR,
selectbackground=FOREGROUND_COLOR,
border="0",
foreground=FONT_COLOR,
font=TEXT_FONT,
width="20",
height="13")
toCurrencyInput.grid(
row=2,
column=3,
sticky=S + W,
padx=(5, 0))
toCurrencyInput.insert(0, "USD")
vsb2 = Scrollbar(
calculatorWindow,
orient="vertical",
command=toCurrencyInput.yview,
background=BACKGROUND_COLOR,)
vsb2.grid(
row=2,
column=4,
sticky="ns",
padx=(0, 5))
toCurrencyInput.configure(yscrollcommand=vsb2.set)
toCurrencyInput.select_set(0)
toCurrencyInput.event_generate("<<ListboxSelect>>")
Label(
calculatorWindow,
text=get_string("input_amount"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=3,
columnspan=2,
column=0,
sticky=S + W,
padx=5)
def clear_ccamount_placeholder(self):
amountInput.delete("0", "100")
amountInput = Entry(
calculatorWindow,
foreground=FOREGROUND_COLOR_SECONDARY,
border="0",
font=TEXT_FONT,
background=BACKGROUND_COLOR,)
amountInput.grid(
row=4,
column=0,
sticky=N + S + W + E,
padx=5,
columnspan=2,
pady=(0, 5))
amountInput.insert("0", str(global_balance))
amountInput.bind("<FocusIn>", clear_ccamount_placeholder)
Button(
calculatorWindow,
text=get_string("calculate"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
background=BACKGROUND_COLOR,
command=currency_converter_calc,
).grid(row=3,
columnspan=2,
column=2,
sticky=N + S + W + E,
pady=(5, 0),
padx=5)
conversionresulttext = StringVar(calculatorWindow)
conversionresulttext.set(get_string("result") + ": 0.0")
conversionresultLabel = Label(
calculatorWindow,
textvariable=conversionresulttext,
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,)
conversionresultLabel.grid(
row=4,
columnspan=2,
column=2,
pady=(0, 5))
calculatorWindow.mainloop()
def statistics_window(handler):
statsApi = get(
"https://server.duinocoin.com"
+ "/api.json",
data=None)
if statsApi.status_code == 200: # Check for reponse
statsApi = statsApi.json()
miner_api = get(
"https://server.duinocoin.com"
+ "/miners.json",
data=None)
if miner_api.status_code == 200: # Check for reponse
miner_api = miner_api.json()
statsWindow = Toplevel()
statsWindow.resizable(False, False)
statsWindow.title(get_string("statistics_title"))
statsWindow.transient([root])
statsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD_LARGE = Font(
statsWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
statsWindow,
size=12,
weight="normal")
Active_workers_listbox = Listbox(
statsWindow,
exportselection=False,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
border="0",
font=TEXT_FONT,
width="65",
height="8",)
Active_workers_listbox.grid(
row=1,
columnspan=2,
sticky=N + E + S + W,
pady=(0, 5),
padx=5)
i = 0
totalHashrate = 0
for threadid in miner_api:
if username in miner_api[threadid]["User"]:
rigId = miner_api[threadid]["Identifier"]
if rigId == "None":
rigId = ""
else:
rigId += ": "
software = miner_api[threadid]["Software"]
hashrate = str(round(miner_api[threadid]["Hashrate"], 2))
totalHashrate += float(hashrate)
difficulty = str(miner_api[threadid]["Diff"])
shares = (
str(miner_api[threadid]["Accepted"])
+ "/"
+ str(
miner_api[threadid]["Accepted"]
+ miner_api[threadid]["Rejected"]))
Active_workers_listbox.insert(
i,
"#"
+ str(i + 1)
+ ": "
+ rigId
+ software
+ " "
+ str(round(float(hashrate) / 1000, 2))
+ " kH/s @ diff "
+ difficulty
+ ", "
+ shares)
i += 1
if i == 0:
Active_workers_listbox.insert(
i, get_string("statistics_miner_warning"))
totalHashrateString = str(int(totalHashrate)) + " H/s"
if totalHashrate > 1000000000:
totalHashrateString = str(
round(totalHashrate / 1000000000, 2)) + " GH/s"
elif totalHashrate > 1000000:
totalHashrateString = str(round(totalHashrate / 1000000, 2)) + " MH/s"
elif totalHashrate > 1000:
totalHashrateString = str(round(totalHashrate / 1000, 2)) + " kH/s"
Active_workers_listbox.configure(height=i)
Active_workers_listbox.select_set(32)
Active_workers_listbox.event_generate("<<ListboxSelect>>")
Label(
statsWindow,
text=get_string("your_miners") + " - " + totalHashrateString,
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=0,
column=0,
columnspan=2,
sticky=S + W,
pady=5,
padx=5)
Label(
statsWindow,
text=get_string("richlist"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=2,
column=0,
sticky=S + W,
pady=5,
padx=5)
Top_10_listbox = Listbox(
statsWindow,
exportselection=False,
border="0",
font=TEXT_FONT,
width="30",
height="10",
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
Top_10_listbox.grid(
row=3,
column=0,
rowspan=10,
sticky=N + E + S + W,
pady=(0, 5),
padx=5)
num = 0
for i in statsApi["Top 10 richest miners"]:
Top_10_listbox.insert(num, i)
num += 1
Top_10_listbox.select_set(32)
Top_10_listbox.event_generate("<<ListboxSelect>>")
Label(
statsWindow,
text=get_string("network_info"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=2,
column=1,
sticky=S + W,
padx=5,
pady=5)
Label(
statsWindow,
text=get_string("difficulty")
+ ": "
+ str(statsApi["Current difficulty"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=3,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("mined_blocks")
+ ": "
+ str(statsApi["Mined blocks"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=4,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("network_hashrate")
+ ": "
+ str(statsApi["Pool hashrate"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=5,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("active_miners")
+ ": "
+ str(len(statsApi["Miners"])),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=6,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text="1 DUCO "
+ get_string("estimated_price")
+ ": $"
+ str(statsApi["Duco price"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=7,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("registered_users")
+ ": "
+ str(statsApi["Registered users"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=8,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("mined_duco")
+ ": "
+ str(statsApi["All-time mined DUCO"])
+ " ᕲ",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=9,
column=1,
sticky=S + W,
padx=5)
statsWindow.mainloop()
def wrapper_window(handler):
def Wrap():
amount = amountWrap.get()
print("Got amount:", amount)
print("pub key:", pub_key)
soc = websocket.create_connection(WS_URI)
soc.recv()
try:
float(amount)
except Exception:
pass
else:
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
_ = soc.recv()
soc.send(
bytes(
"WRAP,"
+ str(amount)
+ ","
+ str(pub_key)
+ str(",placeholder"),
encoding="utf8"))
soc.close()
sleep(2)
wrapperWindow.quit()
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
except Exception:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error"))
else:
if TRONPY_ENABLED:
pub_key = pubkeyfile.read()
pubkeyfile.close()
wrapperWindow = Toplevel()
wrapperWindow.resizable(False, False)
wrapperWindow.title(get_string("wrapper_title"))
wrapperWindow.transient([root])
askWrapAmount = Label(
wrapperWindow,
text=get_string("wrapper_amount_to_wrap") + ":")
askWrapAmount.grid(row=0,
column=0,
sticky=N + W)
amountWrap = Entry(wrapperWindow,
border="0",
font=Font(size=15))
amountWrap.grid(row=1,
column=0,
sticky=N + W)
wrapButton = Button(wrapperWindow,
text="Wrap",
command=Wrap)
wrapButton.grid(row=2,
column=0,
sticky=N + W)
else:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error_tronpy"))
def unwrapper_window(handler):
def UnWrap():
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pub_key = pubkeyfile.read()
pubkeyfile.close()
passphrase = passphraseEntry.get()
privkeyfile = open(str(resources + "DUCOPrivKey.encrypt"), "r")
privKeyEnc = privkeyfile.read()
privkeyfile.close()
try:
priv_key = password_decrypt(privKeyEnc, passphrase).decode()
use_wrapper = True
except InvalidToken:
print(get_string("invalid_passphrase"))
use_wrapper = False
amount = amountUnWrap.get()
print("Got amount:", amount)
soc = websocket.create_connection(WS_URI)
soc.recv()
try:
float(amount)
except Exception:
pass
else:
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
_ = soc.recv()
if use_wrapper:
pendingvalues = wduco.functions.pendingWithdrawals(
pub_key, username)
# transaction wasn't initiated, but variable should be declared
txn_success = False
try:
amount = float(amount)
except ValueError:
print("Value should be numeric - aborting")
else:
if int(float(amount) * 10 ** 6) >= pendingvalues:
toInit = int(float(amount) * 10 ** 6) - pendingvalues
else:
toInit = amount * 10 ** 6
if toInit > 0:
txn = (
wduco.functions.initiateWithdraw(username, toInit)
.with_owner(pub_key)
.fee_limit(5_000_000)
.build()
.sign(PrivateKey(bytes.fromhex(priv_key))))
txn = txn.broadcast()
txnfeedback = txn.result()
if txnfeedback:
txn_success = True
else:
txn_success = False
if txn_success or amount <= pendingvalues:
soc.send(
bytes(
"UNWRAP,"
+ str(amount)
+ ","
+ str(pub_key)
+ str(",placeholder"),
encoding="utf8"))
soc.close()
sleep(2)
unWrapperWindow.quit()
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pubkeyfile.read()
pubkeyfile.close()
except Exception:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error"))
else:
if TRONPY_ENABLED:
unWrapperWindow = Toplevel()
unWrapperWindow.resizable(False, False)
unWrapperWindow.title(get_string("unwrapper_title"))
unWrapperWindow.transient([root])
unWrapperWindow.configure()
askAmount = Label(
unWrapperWindow,
text=get_string("unwrap_amount"))
askAmount.grid(row=1,
column=0,
sticky=N + W)
amountUnWrap = Entry(
unWrapperWindow,
border="0",
font=Font(size=15))
amountUnWrap.grid(row=2,
column=0,
sticky=N + W)
askPassphrase = Label(
unWrapperWindow,
text=get_string("ask_passphrase"))
askPassphrase.grid(row=4,
column=0,
sticky=N + W)
passphraseEntry = Entry(
unWrapperWindow,
border="0",
font=Font(size=15))
passphraseEntry.grid(
row=5,
column=0,
sticky=N + W)
wrapButton = Button(
unWrapperWindow,
text=get_string("unwrap_duco"),
command=UnWrap)
wrapButton.grid(
row=7,
column=0,
sticky=N + W)
else:
messagebox.showerror(
title=get_string("wrapper_error"),
message=get_string("wrapper_error_tronpy"))
def settings_window(handler):
def _wrapperconf():
if TRONPY_ENABLED:
privkey_input = StringVar()
passphrase_input = StringVar()
wrapconfWindow = Toplevel()
wrapconfWindow.resizable(False, False)
wrapconfWindow.title(get_string("wrapper_title"))
wrapconfWindow.transient([root])
wrapconfWindow.configure()
def setwrapper():
if privkey_input and passphrase_input:
priv_key = privkey_entry.get()
print("Got priv key:", priv_key)
passphrase = passphrase_entry.get()
print("Got passphrase:", passphrase)
try:
pub_key = PrivateKey(
bytes.fromhex(priv_key)
).public_key.to_base58check_address()
except Exception:
pass
else:
print("Saving data")
privkeyfile = open(
str(resources + "DUCOPrivKey.encrypt"), "w")
privkeyfile.write(
str(password_encrypt(
priv_key.encode(), passphrase
).decode()))
privkeyfile.close()
pubkeyfile = open(
str(resources + "DUCOPubKey.pub"), "w")
pubkeyfile.write(pub_key)
pubkeyfile.close()
Label(wrapconfWindow, text=get_string(
"wrapper_success")).pack()
wrapconfWindow.quit()
title = Label(
wrapconfWindow,
text=get_string("wrapper_config_title"),
font=Font(size=20))
title.grid(row=0,
column=0,
sticky=N + W,
padx=5)
askprivkey = Label(
wrapconfWindow,
text=get_string("ask_private_key"))
askprivkey.grid(row=1,
column=0,
sticky=N + W)
privkey_entry = Entry(
wrapconfWindow,
font=TEXT_FONT,
textvariable=privkey_input)
privkey_entry.grid(row=2,
column=0,
sticky=N + W)
askpassphrase = Label(wrapconfWindow,
text=get_string("passphrase"))
askpassphrase.grid(row=3,
column=0,
sticky=N + W)
passphrase_entry = Entry(
wrapconfWindow,
font=TEXT_FONT,
textvariable=passphrase_input)
passphrase_entry.grid(row=4,
column=0,
sticky=N + W)
wrapConfigButton = Button(
wrapconfWindow,
text=get_string("configure_wrapper_lowercase"),
command=setwrapper)
wrapConfigButton.grid(row=5,
column=0,
sticky=N + W)
wrapconfWindow.mainloop()
else:
messagebox.showerror(
title=get_string("wrapper_error"),
message=get_string("wrapper_error_tronpy"))
def _logout():
try:
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("DELETE FROM UserData")
con.commit()
except Exception as e:
print(e)
try:
execl(sys.executable, sys.executable, *sys.argv)
except Exception as e:
print(e)
def _cleartrs():
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("DELETE FROM transactions")
con.commit()
def _chgpass():
def _changepassprotocol():
oldpasswordS = oldpassword.get()
newpasswordS = newpassword.get()
confpasswordS = confpassword.get()
if oldpasswordS != newpasswordS:
if oldpasswordS and newpasswordS and confpasswordS:
if newpasswordS == confpasswordS:
soc = websocket.create_connection(WS_URI)
soc.recv()
soc.send(
bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
soc.recv()
soc.send(
bytes(
"CHGP,"
+ str(oldpasswordS)
+ ","
+ str(newpasswordS),
encoding="utf8"))
response = soc.recv().rstrip("\n").split(",")
soc.close()
if not "OK" in response[0]:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=response[1])
else:
messagebox.showinfo(
title=get_string("change_passwd_ok"),
message=response[1])
try:
try:
with sqlconn(
resources + "wallet.db"
) as con:
cur = con.cursor()
cur.execute("DELETE FROM UserData")
con.commit()
except Exception as e:
print(e)
except FileNotFoundError:
pass
execl(sys.executable, sys.executable, *sys.argv)
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("error_passwd_dont_match"))
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("fill_the_blanks_warning"))
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("same_passwd_error"))
settingsWindow.destroy()
changepassWindow = Toplevel()
changepassWindow.title(get_string("change_passwd_lowercase"))
changepassWindow.resizable(False, False)
changepassWindow.transient([root])
changepassWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD = Font(changepassWindow, size=12, weight="bold")
TEXT_FONT = Font(changepassWindow, size=12, weight="normal")
Label(
changepassWindow,
text=get_string("old_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=0,
sticky=W,
padx=5)
oldpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
oldpassword.grid(row=1,
sticky="nswe",
padx=5)
Label(
changepassWindow,
text=get_string("new_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=2,
sticky=W,
padx=5)
newpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
newpassword.grid(row=3,
sticky="nswe",
padx=5)
Label(
changepassWindow,
text=get_string("confirm_new_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=4,
sticky=W,
padx=5)
confpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
confpassword.grid(row=5,
sticky="nswe",
padx=5)
chgpbtn = Button(
changepassWindow,
text=get_string("change_passwd"),
command=_changepassprotocol,
foreground=FOREGROUND_COLOR,
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
chgpbtn.grid(columnspan=2,
sticky="nswe",
pady=5,
padx=5)
settingsWindow = Toplevel()
settingsWindow.resizable(False, False)
settingsWindow.title(get_string("settings_title"))
settingsWindow.transient([root])
settingsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT = Font(
settingsWindow,
size=12,
weight="normal")
TEXT_FONT_BOLD_LARGE = Font(
settingsWindow,
size=12,
weight="bold")
Label(
settingsWindow,
text=get_string("uppercase_settings"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=0,
column=0,
columnspan=4,
sticky=S + W,
pady=(5, 5),
padx=(5, 0))
logoutbtn = Button(
settingsWindow,
text=get_string("logout"),
command=_logout,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
logoutbtn.grid(row=1,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
chgpassbtn = Button(
settingsWindow,
text=get_string("change_passwd"),
command=_chgpass,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
chgpassbtn.grid(row=2,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
wrapperconfbtn = Button(
settingsWindow,
text=get_string("configure_wrapper"),
command=_wrapperconf,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
wrapperconfbtn.grid(row=3,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
cleartransbtn = Button(
settingsWindow,
text=get_string("clear_transactions"),
command=_cleartrs,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
cleartransbtn.grid(row=4,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
separator = ttk.Separator(settingsWindow, orient="horizontal")
separator.grid(
row=5,
column=0,
columnspan=4,
sticky=N + S + E + W,
padx=(5, 5),
pady=5)
Label(
settingsWindow,
text=get_string("logged_in_as")
+ ": "
+ str(username),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=6,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("wallet_version")
+ ": "
+ str(VERSION),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=7,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("translation_author_message")
+ " "
+ get_string("translation_author"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=8,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("config_dev_warning"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=9,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
separator = ttk.Separator(settingsWindow, orient="horizontal")
separator.grid(
row=10,
column=0,
columnspan=4,
sticky=N + S + E + W,
padx=(5, 5),
pady=5)
original = Image.open(resources + "duco.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
website = ImageTk.PhotoImage(resized)
website.image = website
websiteLabel = Label(
settingsWindow,
image=website,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
websiteLabel.grid(
row=11,
column=0,
sticky=N + S + E + W,
padx=(5, 0),
pady=(0, 5))
websiteLabel.bind("<Button-1>", openWebsite)
original = Image.open(resources + "github.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
github = ImageTk.PhotoImage(resized)
github.image = github
githubLabel = Label(
settingsWindow,
image=github,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
githubLabel.grid(
row=11,
column=1,
sticky=N + S + E + W,
pady=(0, 5))
githubLabel.bind("<Button-1>", openGitHub)
original = Image.open(resources + "exchange.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
exchange = ImageTk.PhotoImage(resized)
exchange.image = exchange
exchangeLabel = Label(
settingsWindow,
image=exchange,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
exchangeLabel.grid(
row=11,
column=2,
sticky=N + S + E + W,
pady=(0, 5))
exchangeLabel.bind("<Button-1>", openExchange)
original = Image.open(resources + "discord.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
discord = ImageTk.PhotoImage(resized)
discord.image = discord
discordLabel = Label(
settingsWindow,
image=discord,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
discordLabel.grid(
row=11,
column=3,
sticky=N + S + E + W,
padx=(0, 5),
pady=(0, 5))
discordLabel.bind("<Button-1>", openDiscord)
def get_balance():
global oldbalance
global balance
global unpaid_balance
global global_balance
global gtxl
try:
soc = websocket.create_connection(WS_URI)
soc.recv()
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
_ = soc.recv()
soc.send(bytes(
"BALA",
encoding="utf8"))
oldbalance = balance
balance = float(soc.recv().rstrip("\n"))
global_balance = round(float(balance), 8)
try:
gtxl = {}
soc.send(bytes(
"GTXL," + str(username) + ",7",
encoding="utf8"))
gtxl = str(soc.recv().rstrip(
"\n").replace("\'", "\""))
gtxl = jsonloads(gtxl)
except Exception as e:
print("Error getting transaction list: " + str(e))
if oldbalance != balance:
difference = float(balance) - float(oldbalance)
dif_with_unpaid = (
float(balance) - float(oldbalance)) + unpaid_balance
if float(balance) != float(difference):
if (dif_with_unpaid >= MIN_TRANSACTION_VALUE
or dif_with_unpaid < 0
):
now = datetime.now()
difference = round(dif_with_unpaid, 8)
if (
difference >= MIN_TRANSACTION_VALUE_NOTIFY
or difference < 0
and notificationsEnabled
):
notification = Notify()
notification.title = get_string("duino_coin_wallet")
notification.message = (
get_string("notification_new_transaction")
+ "\n"
+ now.strftime("%d.%m.%Y %H:%M:%S\n")
+ str(round(difference, 6))
+ " DUCO")
notification.icon = resources + "duco_color.png"
notification.send(block=False)
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute(
"""INSERT INTO Transactions(Date, amount)
VALUES(?, ?)""", (
now.strftime("%d.%m.%Y %H:%M:%S"),
round(difference, 8)))
con.commit()
unpaid_balance = 0
else:
unpaid_balance += float(balance) - float(oldbalance)
except Exception as e:
print("Retrying in 3s. (" + str(e) + ")")
Timer(3, get_balance).start()
def get_wbalance():
if TRONPY_ENABLED:
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pub_key = pubkeyfile.read()
pubkeyfile.close()
wBalance = float(wduco.functions.balanceOf(pub_key)) / (10 ** 6)
return wBalance
except Exception:
return 0.0
else:
return 0.0
def update_balance_labels():
global profit_array, profitCheck
try:
balancetext.set(str(round(global_balance, 7)) + " ᕲ")
wbalancetext.set(str(get_wbalance()) + " wᕲ")
balanceusdtext.set(
"$" + str(round(global_balance * duco_fiat_value, 4)))
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("SELECT rowid,* FROM Transactions ORDER BY rowid DESC")
Transactions = cur.fetchall()
transactionstext_format = ""
for i, row in enumerate(Transactions, start=1):
transactionstext_format += str(row[1]) + \
" " + str(row[2]) + " DUCO\n"
if i == 6:
transactionstext_format = transactionstext_format.rstrip("\n")
break
transactionstext.set(transactionstext_format)
if profit_array[2] != 0:
sessionprofittext.set(
get_string("session") + ": "
+ str(profit_array[0]) + " ᕲ")
minuteprofittext.set(
"≈" + str(profit_array[1]) + " ᕲ/"
+ get_string("minute"))
hourlyprofittext.set(
"≈" + str(profit_array[2]) + " ᕲ/"
+ get_string("hour"))
dailyprofittext.set(
"≈"
+ str(profit_array[3])
+ " ᕲ/"
+ get_string("day")
+ " ($"
+ str(round(profit_array[3] * duco_fiat_value, 4))
+ ")")
else:
if profitCheck > 10:
sessionprofittext.set(get_string("sessionprofit_unavailable1"))
minuteprofittext.set(get_string("sessionprofit_unavailable2"))
hourlyprofittext.set("")
dailyprofittext.set("")
profitCheck += 1
except Exception:
_exit(0)
Timer(1, update_balance_labels).start()
def profit_calculator(start_bal):
try: # Thanks Bilaboz for the code!
global curr_bal, profit_array
prev_bal = curr_bal
curr_bal = global_balance
session = curr_bal - start_bal
tensec = curr_bal - prev_bal
minute = tensec * 6
hourly = minute * 60
daily = hourly * 24
if tensec >= 0:
profit_array = [
round(session, 8),
round(minute, 6),
round(hourly, 4),
round(daily, 2)]
except Exception:
_exit(0)
Timer(10, profit_calculator, [start_bal]).start()
def send_funds_protocol(handler):
recipientStr = recipient.get()
amountStr = amount.get()
MsgBox = messagebox.askquestion(
get_string("warning"),
get_string("send_funds_warning")
+ " "
+ str(amountStr)
+ " DUCO "
+ get_string("send_funds_to")
+ " "
+ str(recipientStr)
+ "?",
icon="warning",)
if MsgBox == "yes":
soc = websocket.create_connection(WS_URI)
soc.recv()
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
response = soc.recv()
soc.send(
bytes(
"SEND,"
+ "-"
+ ","
+ str(recipientStr)
+ ","
+ str(amountStr),
encoding="utf8"))
response = soc.recv().rstrip("\n").split(",")
soc.close()
if "OK" in str(response[0]):
MsgBox = messagebox.showinfo(response[0],
response[1]
+ "\nTXID:"
+ response[2])
else:
MsgBox = messagebox.showwarning(response[0], response[1])
root.update()
def init_rich_presence():
global RPC
try:
RPC = Presence(806985845320056884)
RPC.connect()
except Exception: # Discord not launched
pass
def update_rich_presence():
startTime = int(time())
while True:
try:
balance = round(global_balance, 4)
RPC.update(
details=str(balance)
+ " ᕲ ($"
+ str(round(duco_fiat_value * balance, 2))
+ ")",
start=startTime,
large_image="duco",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
except Exception: # Discord not launched
pass
sleep(15)
class Wallet:
def __init__(self, master):
global recipient
global amount
global balancetext
global wbalancetext
global sessionprofittext
global minuteprofittext
global hourlyprofittext
global dailyprofittext
global balanceusdtext
global transactionstext
global curr_bal
global profit_array
try:
loading.destroy()
except Exception:
pass
textFont4 = Font(
size=14,
weight="bold")
TEXT_FONT_BOLD_LARGE = Font(
size=12,
weight="bold")
TEXT_FONT_BOLD = Font(
size=18,
weight="bold")
TEXT_FONT = Font(
size=12,
weight="normal")
self.master = master
master.resizable(False, False)
master.configure(background=BACKGROUND_COLOR)
master.title(get_string("duino_coin_wallet"))
Label(
master,
text=get_string("uppercase_duino_coin_wallet")
+ ": "
+ str(username),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=0,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
balancetext = StringVar()
wbalancetext = StringVar()
balancetext.set(get_string("please_wait"))
if TRONPY_ENABLED:
wbalancetext.set(get_string("please_wait"))
else:
wbalancetext.set("0.00")
balanceLabel = Label(
master,
textvariable=balancetext,
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
balanceLabel.grid(row=1,
column=0,
columnspan=3,
sticky=S + W,
padx=(5, 0))
wbalanceLabel = Label(
master,
textvariable=wbalancetext,
font=textFont4,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
wbalanceLabel.grid(row=2,
column=0,
columnspan=3,
sticky=S + W,
padx=(5, 0))
balanceusdtext = StringVar()
balanceusdtext.set(get_string("please_wait"))
Label(
master,
textvariable=balanceusdtext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=1,
column=3,
sticky=S + E,
pady=(0, 1.5),
padx=(0, 5))
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=4,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5, 5),
pady=(0, 5))
def clear_recipient_placeholder(self):
recipient.delete("0", "100")
def clear_amount_placeholder(self):
amount.delete("0", "100")
Label(
master,
text=get_string("recipient"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=5,
column=0,
sticky=W + S,
padx=(5, 0))
recipient = Entry(
master,
border="0",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
recipient.grid(row=5,
column=1,
sticky=N + W + S + E,
columnspan=3,
padx=(0, 5))
recipient.insert("0", "revox")
recipient.bind("<FocusIn>", clear_recipient_placeholder)
Label(
master,
text=get_string("amount"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=6,
column=0,
sticky=W + S,
padx=(5, 0))
amount = Entry(
master,
border="0",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
amount.grid(row=6,
column=1,
sticky=N + W + S + E,
columnspan=3,
padx=(0, 5))
amount.insert("0", str(VERSION))
amount.bind("<FocusIn>", clear_amount_placeholder)
sendLabel = Button(
master,
text=get_string("send_funds"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
sendLabel.grid(
row=8,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5),
pady=(1, 2))
sendLabel.bind("<Button-1>", send_funds_protocol)
wrapLabel = Button(
master,
text=get_string("wrap_duco"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
wrapLabel.grid(
row=9,
column=0,
sticky=N + S + E + W,
columnspan=2,
padx=(5, 1),
pady=(1, 5))
wrapLabel.bind("<Button-1>", wrapper_window)
wrapLabel = Button(
master,
text=get_string("unwrap_duco"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
wrapLabel.grid(
row=9,
column=2,
sticky=N + S + E + W,
columnspan=2,
padx=(1, 5),
pady=(1, 5))
wrapLabel.bind("<Button-1>", unwrapper_window)
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=10,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5, 5))
Label(
master,
text=get_string("estimated_profit"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=11,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
sessionprofittext = StringVar()
sessionprofittext.set(get_string("please_wait_calculating"))
sessionProfitLabel = Label(
master,
textvariable=sessionprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
sessionProfitLabel.grid(
row=12,
column=0,
sticky=W,
columnspan=4,
padx=5)
minuteprofittext = StringVar()
minuteProfitLabel = Label(
master,
textvariable=minuteprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
minuteProfitLabel.grid(
row=13,
column=0,
sticky=W,
columnspan=4,
padx=5)
hourlyprofittext = StringVar()
hourlyProfitLabel = Label(
master,
textvariable=hourlyprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
hourlyProfitLabel.grid(
row=14,
column=0,
sticky=W,
columnspan=4,
padx=5)
dailyprofittext = StringVar()
dailyprofittext.set("")
dailyProfitLabel = Label(
master,
textvariable=dailyprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
dailyProfitLabel.grid(
row=15,
column=0,
sticky=W,
columnspan=4,
padx=5)
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=16,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=5)
Label(
master,
text=get_string("local_transactions"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=17,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
transactionstext = StringVar()
transactionstext.set("")
transactionstextLabel = Label(
master,
textvariable=transactionstext,
font=TEXT_FONT,
justify=LEFT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
transactionstextLabel.grid(
row=18,
column=0,
sticky=W,
columnspan=4,
padx=5,
pady=(0, 5))
separator = ttk.Separator(master,
orient="horizontal")
separator.grid(
row=19,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=5,
pady=(0, 10))
original = Image.open(resources + "transactions.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
transactions = ImageTk.PhotoImage(resized)
transactions.image = transactions
transactionsLabel = Label(
master,
image=transactions,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
transactionsLabel.grid(
row=20,
column=0,
sticky=N + S + W + E,
pady=(0, 5))
transactionsLabel.bind("<Button>", transactions_window)
original = Image.open(resources + "calculator.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
calculator = ImageTk.PhotoImage(resized)
calculator.image = calculator
calculatorLabel = Label(
master,
image=calculator,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
calculatorLabel.grid(
row=20,
column=1,
sticky=N + S + W + E,
padx=(0, 5),
pady=(0, 5))
calculatorLabel.bind("<Button>", currency_converter_window)
original = Image.open(resources + "stats.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
stats = ImageTk.PhotoImage(resized)
stats.image = stats
statsLabel = Label(
master,
image=stats,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
statsLabel.grid(
row=20,
column=2,
sticky=N + S + W + E,
padx=(0, 5),
pady=(0, 5))
statsLabel.bind("<Button>", statistics_window)
original = Image.open(resources + "settings.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
settings = ImageTk.PhotoImage(resized)
settings.image = settings
settingsLabel = Label(
master,
image=settings,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
settingsLabel.grid(
row=20,
column=3,
sticky=N + S + W + E,
padx=(0, 10),
pady=(0, 5))
settingsLabel.bind("<Button>", settings_window)
root.iconphoto(True, PhotoImage(file=resources + "duco_color.png"))
start_balance = global_balance
curr_bal = start_balance
profit_calculator(start_balance)
update_balance_labels()
root.mainloop()
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed."
+ "Wallet will try to install it. "
+ "If it fails, please manually install \"pypresence\".")
install("pypresence")
try:
from PIL import Image, ImageTk
except ModuleNotFoundError:
print("Pillow is not installed. "
+ "Wallet will try to install it. "
+ "If it fails, please manually install \"Pillow\".")
install("Pillow")
try:
from notifypy import Notify
except ModuleNotFoundError:
print("Notify-py is not installed. "
+ "Continuing without notification system.")
notificationsEnabled = False
else:
notificationsEnabled = True
try:
from cryptography.fernet import Fernet, InvalidToken
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
backend = default_backend()
except ModuleNotFoundError:
print("Cryptography is not installed. "
+ "Please manually install \"cryptography\"."
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
import secrets
except ModuleNotFoundError:
print("Secrets is not installed. "
+ "Please manually install \"secrets\"."
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
from base64 import urlsafe_b64decode as b64d
from base64 import urlsafe_b64encode as b64e
except ModuleNotFoundError:
print("Base64 is not installed. "
+ "Please manually install \"base64\""
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
import websocket
except ModuleNotFoundError:
print("websocket-client is not installed. "
+ "Wallet will try to install it. "
+ "If it fails, please manually install \"websocket-client\".")
install("websocket-client")
try:
import tronpy
from tronpy.keys import PrivateKey
TRONPY_ENABLED = True
except ModuleNotFoundError:
TRONPY_ENABLED = False
print("Tronpy is not installed. "
+ "Please manually install \"tronpy\" "
+ "if you intend on using wDUCO wrapper.")
else:
try:
tron = tronpy.Tron()
wduco = tron.get_contract("TWYaXdxA12JywrUdou3PFD1fvx2PWjqK9U")
except:
TRONPY_ENABLED = False
print("Tron-side error, disabling wrapper for this session")
if not path.exists(resources):
mkdir(resources)
with sqlconn(resources + "/wallet.db") as con:
cur = con.cursor()
cur.execute(
"""CREATE TABLE IF NOT EXISTS
Transactions(Date TEXT, amount REAL)""")
cur.execute(
"""CREATE TABLE IF NOT EXISTS
UserData(username TEXT, password TEXT, useWrapper TEXT)""")
con.commit()
if not Path(resources + "duco.png").is_file():
urlretrieve("https://i.imgur.com/9JzxR0B.png", resources + "duco.png")
if not Path(resources + "duco_color.png").is_file():
urlretrieve(
"https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/master/"
+ "Resources/duco.png?raw=true",
resources + "duco_color.png")
if not Path(resources + "calculator.png").is_file():
urlretrieve("https://i.imgur.com/iqE28Ej.png",
resources + "calculator.png")
if not Path(resources + "exchange.png").is_file():
urlretrieve("https://i.imgur.com/0qMtoZ7.png",
resources + "exchange.png")
if not Path(resources + "discord.png").is_file():
urlretrieve("https://i.imgur.com/LoctALa.png",
resources + "discord.png")
if not Path(resources + "github.png").is_file():
urlretrieve("https://i.imgur.com/PHEfWbl.png",
resources + "github.png")
if not Path(resources + "settings.png").is_file():
urlretrieve("https://i.imgur.com/NNEI4WL.png",
resources + "settings.png")
if not Path(resources + "transactions.png").is_file():
urlretrieve("https://i.imgur.com/nbVPlKk.png",
resources + "transactions.png")
if not Path(resources + "stats.png").is_file():
urlretrieve("https://i.imgur.com/KRfHZUM.png",
resources + "stats.png")
if not Path(resources + "langs.json").is_file():
urlretrieve(
"https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "Wallet_langs.json",
resources + "langs.json")
# Load language strings depending on system locale
with open(resources + "langs.json", "r", encoding="utf-8") as lang_file:
lang_file = jsonloads(lang_file.read())
try:
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("bg"):
lang = "bulgarian"
elif locale.startswith("nl"):
lang = "dutch"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
elif locale.startswith("th"):
lang = "thai"
else:
lang = "english"
except IndexError:
lang = "english"
if __name__ == "__main__":
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("SELECT COUNT(username) FROM UserData")
userdata_count = cur.fetchall()[0][0]
if userdata_count < 1:
root = Tk()
lf = LoginFrame(root)
root.mainloop()
cur = con.cursor()
cur.execute("SELECT COUNT(username) FROM UserData")
userdata_count = cur.fetchall()[0][0]
if userdata_count >= 1:
loading_window()
cur = con.cursor()
cur.execute("SELECT * FROM UserData")
userdata_query = cur.fetchone()
username = userdata_query[0]
passwordEnc = (userdata_query[1]).decode("utf-8")
password = b64decode(passwordEnc).decode("utf8")
status.config(text=get_string("preparing_wallet_window"))
loading.update()
try:
# Start duco price updater
get_duco_price()
get_balance()
init_rich_presence()
Thread(target=update_rich_presence).start()
try:
# Destroy loading dialog and start the main wallet window
loading.destroy()
except Exception:
pass
root = Tk()
my_gui = Wallet(root)
except Exception as e:
print(e)
_exit(0)
|
test_certificates.py | from __future__ import unicode_literals # at top of module
import datetime
import json
import ssl
import threading
from http.server import HTTPServer, SimpleHTTPRequestHandler
from tempfile import NamedTemporaryFile
import arrow
import pytest
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from marshmallow import ValidationError
from freezegun import freeze_time
from unittest.mock import patch
from sqlalchemy.testing import fail
from lemur.certificates.service import create_csr, identify_and_persist_expiring_deployed_certificates
from lemur.certificates.views import * # noqa
from lemur.common import utils
from lemur.domains.models import Domain
from lemur.tests.test_messaging import create_cert_that_expires_in_days
from lemur.tests.vectors import (
VALID_ADMIN_API_TOKEN,
VALID_ADMIN_HEADER_TOKEN,
VALID_USER_HEADER_TOKEN,
CSR_STR,
INTERMEDIATE_CERT_STR,
SAN_CERT_STR,
SAN_CERT_CSR,
SAN_CERT_KEY,
ROOTCA_KEY,
ROOTCA_CERT_STR,
)
def test_get_or_increase_name(session, certificate):
from lemur.certificates.models import get_or_increase_name
from lemur.tests.factories import CertificateFactory
serial = "AFF2DB4F8D2D4D8E80FA382AE27C2333"
assert get_or_increase_name(
certificate.name, certificate.serial
) == "{0}-{1}".format(certificate.name, serial)
certificate.name = "test-cert-11111111"
assert (
get_or_increase_name(certificate.name, certificate.serial)
== "test-cert-11111111-" + serial
)
certificate.name = "test-cert-11111111-1"
assert (
get_or_increase_name("test-cert-11111111-1", certificate.serial)
== "test-cert-11111111-1-" + serial
)
CertificateFactory(name="certificate1")
CertificateFactory(name="certificate1-" + serial)
session.commit()
assert get_or_increase_name(
"certificate1", int(serial, 16)
) == "certificate1-{}-1".format(serial)
def test_get_all_certs(session, certificate):
from lemur.certificates.service import get_all_certs
assert len(get_all_certs()) > 1
def test_get_by_name(session, certificate):
from lemur.certificates.service import get_by_name
found = get_by_name(certificate.name)
assert found
def test_get_by_serial(session, certificate):
from lemur.certificates.service import get_by_serial
found = get_by_serial(certificate.serial)
assert found
def test_get_all_certs_attached_to_endpoint_without_autorotate(session):
from lemur.certificates.service import get_all_certs_attached_to_endpoint_without_autorotate, \
cleanup_after_revoke
from lemur.tests.factories import EndpointFactory
# add a certificate with endpoint
EndpointFactory()
list_before = get_all_certs_attached_to_endpoint_without_autorotate()
len_list_before = len(list_before)
assert len_list_before > 0
# revoked the first certificate
first_cert_with_endpoint = list_before[0]
cleanup_after_revoke(first_cert_with_endpoint)
list_after = get_all_certs_attached_to_endpoint_without_autorotate()
assert len(list_after) + 1 == len_list_before
def test_delete_cert(session):
from lemur.certificates.service import delete, get
from lemur.tests.factories import CertificateFactory
delete_this = CertificateFactory(name="DELETEME")
session.commit()
cert_exists = get(delete_this.id)
# it needs to exist first
assert cert_exists
delete(delete_this.id)
cert_exists = get(delete_this.id)
# then not exist after delete
assert not cert_exists
def test_cleanup_after_revoke(session, issuer_plugin, crypto_authority):
from lemur.certificates.service import cleanup_after_revoke, get
from lemur.tests.factories import CertificateFactory
revoke_this = CertificateFactory(name="REVOKEME")
session.commit()
to_be_revoked = get(revoke_this.id)
assert to_be_revoked
to_be_revoked.notify = True
to_be_revoked.rotation = True
# Assuming the cert is revoked by corresponding issuer, update the records in lemur
cleanup_after_revoke(to_be_revoked)
revoked_cert = get(to_be_revoked.id)
# then not exist after delete
assert revoked_cert
assert revoked_cert.status == "revoked"
assert not revoked_cert.notify
assert not revoked_cert.rotation
assert not revoked_cert.destinations
def test_get_by_attributes(session, certificate):
from lemur.certificates.service import get_by_attributes
# Should get one cert
certificate1 = get_by_attributes(
{
"name": "SAN-san.example.org-LemurTrustUnittestsClass1CA2018-20171231-20471231"
}
)
# Should get one cert using multiple attrs
certificate2 = get_by_attributes(
{"name": "test-cert-11111111-1", "cn": "san.example.org"}
)
# Should get multiple certs
multiple = get_by_attributes(
{
"cn": "LemurTrust Unittests Class 1 CA 2018",
"issuer": "LemurTrustUnittestsRootCA2018",
}
)
assert len(certificate1) == 1
assert len(certificate2) == 1
assert len(multiple) > 1
def test_find_duplicates(session):
from lemur.certificates.service import find_duplicates
cert = {"body": SAN_CERT_STR, "chain": INTERMEDIATE_CERT_STR}
dups1 = find_duplicates(cert)
cert["chain"] = ""
dups2 = find_duplicates(cert)
assert len(dups1) > 0
assert len(dups2) > 0
def test_get_certificate_primitives(certificate):
from lemur.certificates.service import get_certificate_primitives
names = [x509.DNSName(x.name) for x in certificate.domains]
with freeze_time(datetime.date(year=2016, month=10, day=30)):
primitives = get_certificate_primitives(certificate)
assert len(primitives) == 26
assert primitives["key_type"] == "RSA2048"
def test_certificate_output_schema(session, certificate, issuer_plugin):
from lemur.certificates.schemas import CertificateOutputSchema
# Clear the cached attribute first
if "parsed_cert" in certificate.__dict__:
del certificate.__dict__["parsed_cert"]
# Make sure serialization parses the cert only once (uses cached 'parsed_cert' attribute)
with patch(
"lemur.common.utils.parse_certificate", side_effect=utils.parse_certificate
) as wrapper:
data, errors = CertificateOutputSchema().dump(certificate)
assert data["issuer"] == "LemurTrustUnittestsClass1CA2018"
assert data["distinguishedName"] == "L=Earth,ST=N/A,C=EE,OU=Karate Lessons,O=Daniel San & co,CN=san.example.org"
# Authority does not have 'cab_compliant', thus subject details should not be returned
assert "organization" not in data
assert wrapper.call_count == 1
def test_certificate_output_schema_subject_details(session, certificate, issuer_plugin):
from lemur.certificates.schemas import CertificateOutputSchema
from lemur.authorities.service import update_options
# Mark authority as non-cab-compliant
update_options(certificate.authority.id, '[{"name": "cab_compliant","value":false}]')
data, errors = CertificateOutputSchema().dump(certificate)
assert not errors
assert data["issuer"] == "LemurTrustUnittestsClass1CA2018"
assert data["distinguishedName"] == "L=Earth,ST=N/A,C=EE,OU=Karate Lessons,O=Daniel San & co,CN=san.example.org"
# Original subject details should be returned because of cab_compliant option update above
assert data["country"] == "EE"
assert data["state"] == "N/A"
assert data["location"] == "Earth"
assert data["organization"] == "Daniel San & co"
assert data["organizationalUnit"] == "Karate Lessons"
# Mark authority as cab-compliant
update_options(certificate.authority.id, '[{"name": "cab_compliant","value":true}]')
data, errors = CertificateOutputSchema().dump(certificate)
assert not errors
assert "country" not in data
assert "state" not in data
assert "location" not in data
assert "organization" not in data
assert "organizationalUnit" not in data
def test_certificate_edit_schema(session):
from lemur.certificates.schemas import CertificateEditInputSchema
input_data = {"owner": "bob@example.com"}
data, errors = CertificateEditInputSchema().load(input_data)
assert not errors
assert len(data["notifications"]) == 3
assert data["roles"][0].name == input_data["owner"]
def test_authority_key_identifier_schema():
from lemur.schemas import AuthorityKeyIdentifierSchema
input_data = {"useKeyIdentifier": True, "useAuthorityCert": True}
data, errors = AuthorityKeyIdentifierSchema().load(input_data)
assert sorted(data) == sorted(
{"use_key_identifier": True, "use_authority_cert": True}
)
assert not errors
data, errors = AuthorityKeyIdentifierSchema().dumps(data)
assert sorted(data) == sorted(json.dumps(input_data))
assert not errors
def test_certificate_info_access_schema():
from lemur.schemas import CertificateInfoAccessSchema
input_data = {"includeAIA": True}
data, errors = CertificateInfoAccessSchema().load(input_data)
assert not errors
assert data == {"include_aia": True}
data, errors = CertificateInfoAccessSchema().dump(data)
assert not errors
assert data == input_data
def test_subject_key_identifier_schema():
from lemur.schemas import SubjectKeyIdentifierSchema
input_data = {"includeSKI": True}
data, errors = SubjectKeyIdentifierSchema().load(input_data)
assert not errors
assert data == {"include_ski": True}
data, errors = SubjectKeyIdentifierSchema().dump(data)
assert not errors
assert data == input_data
def test_extension_schema(client):
from lemur.certificates.schemas import ExtensionSchema
input_data = {
"keyUsage": {"useKeyEncipherment": True, "useDigitalSignature": True},
"extendedKeyUsage": {"useServerAuthentication": True},
"subjectKeyIdentifier": {"includeSKI": True},
}
data, errors = ExtensionSchema().load(input_data)
assert not errors
data, errors = ExtensionSchema().dump(data)
assert not errors
def test_certificate_input_schema(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
"commonName": "test.example.com",
"owner": "jim@example.com",
"authority": {"id": authority.id},
"description": "testtestest",
"validityStart": arrow.get(2018, 11, 9).isoformat(),
"validityEnd": arrow.get(2019, 11, 9).isoformat(),
"dnsProvider": None,
"location": "A Place"
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
assert data["authority"].id == authority.id
assert data["location"] == "A Place"
# make sure the defaults got set
assert data["common_name"] == "test.example.com"
assert data["country"] == "US"
assert data["key_type"] == "ECCPRIME256V1"
assert len(data.keys()) == 19
def test_certificate_input_schema_empty_location(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
"commonName": "test.example.com",
"owner": "jim@example.com",
"authority": {"id": authority.id},
"description": "testtestest",
"validityStart": arrow.get(2018, 11, 9).isoformat(),
"validityEnd": arrow.get(2019, 11, 9).isoformat(),
"dnsProvider": None,
"location": ""
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
assert len(data.keys()) == 19
assert data["location"] == ""
# make sure the defaults got set
assert data["common_name"] == "test.example.com"
assert data["country"] == "US"
assert data["key_type"] == "ECCPRIME256V1"
def test_certificate_input_with_extensions(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
"commonName": "test.example.com",
"owner": "jim@example.com",
"authority": {"id": authority.id},
"description": "testtestest",
"extensions": {
"keyUsage": {"digital_signature": True},
"extendedKeyUsage": {
"useClientAuthentication": True,
"useServerAuthentication": True,
},
"subjectKeyIdentifier": {"includeSKI": True},
"subAltNames": {
"names": [{"nameType": "DNSName", "value": "test.example.com"}]
},
},
"dnsProvider": None,
"keyType": "RSA2048"
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
assert data["key_type"] == "RSA2048"
def test_certificate_input_schema_parse_csr(authority):
from lemur.certificates.schemas import CertificateInputSchema
test_san_dns = "foobar.com"
extensions = {
"sub_alt_names": {
"names": x509.SubjectAlternativeName([x509.DNSName(test_san_dns)])
}
}
csr, private_key = create_csr(
owner="joe@example.com",
common_name="ACommonName",
organization="test",
organizational_unit="Meters",
country="NL",
state="Noord-Holland",
location="Amsterdam",
key_type="RSA2048",
extensions=extensions,
)
input_data = {
"commonName": "test.example.com",
"owner": "jim@example.com",
"authority": {"id": authority.id},
"description": "testtestest",
"csr": csr,
"dnsProvider": None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
for san in data["extensions"]["sub_alt_names"]["names"]:
assert san.value == test_san_dns
assert data["key_type"] == "RSA2048"
def test_certificate_out_of_range_date(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
"commonName": "test.example.com",
"owner": "jim@example.com",
"authority": {"id": authority.id},
"description": "testtestest",
"validityYears": 100,
"dnsProvider": None,
}
data, errors = CertificateInputSchema().load(input_data)
assert errors
input_data["validityStart"] = "2017-04-30T00:12:34.513631"
data, errors = CertificateInputSchema().load(input_data)
assert errors
input_data["validityEnd"] = "2018-04-30T00:12:34.513631"
data, errors = CertificateInputSchema().load(input_data)
assert errors
def test_certificate_valid_years(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
"commonName": "test.example.com",
"owner": "jim@example.com",
"authority": {"id": authority.id},
"description": "testtestest",
"validityYears": 1,
"dnsProvider": None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_valid_dates(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
"commonName": "test.example.com",
"owner": "jim@example.com",
"authority": {"id": authority.id},
"description": "testtestest",
"validityStart": "2020-01-01T00:00:00",
"validityEnd": "2020-01-01T00:00:01",
"dnsProvider": None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_cn_admin(client, authority, logged_in_admin):
"""Admin is exempt from CN/SAN domain restrictions."""
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
"commonName": "*.admin-overrides-allowlist.com",
"owner": "jim@example.com",
"authority": {"id": authority.id},
"description": "testtestest",
"validityStart": "2020-01-01T00:00:00",
"validityEnd": "2020-01-01T00:00:01",
"dnsProvider": None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_allowed_names(client, authority, session, logged_in_user):
"""Test for allowed CN and SAN values."""
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
"commonName": "Names with spaces are not checked",
"owner": "jim@example.com",
"authority": {"id": authority.id},
"description": "testtestest",
"validityStart": "2020-01-01T00:00:00",
"validityEnd": "2020-01-01T00:00:01",
"extensions": {
"subAltNames": {
"names": [
{"nameType": "DNSName", "value": "allowed.example.com"},
{"nameType": "IPAddress", "value": "127.0.0.1"},
]
}
},
"dnsProvider": None,
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_incative_authority(client, authority, session, logged_in_user):
"""Cannot issue certificates with an inactive authority."""
from lemur.certificates.schemas import CertificateInputSchema
authority.active = False
session.add(authority)
input_data = {
"commonName": "foo.example.com",
"owner": "jim@example.com",
"authority": {"id": authority.id},
"description": "testtestest",
"validityStart": "2020-01-01T00:00:00",
"validityEnd": "2020-01-01T00:00:01",
"dnsProvider": None,
}
data, errors = CertificateInputSchema().load(input_data)
assert errors["authority"][0] == "The authority is inactive."
def test_certificate_disallowed_names(client, authority, session, logged_in_user):
"""The CN and SAN are disallowed by LEMUR_ALLOWED_DOMAINS."""
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
"commonName": "*.example.com",
"owner": "jim@example.com",
"authority": {"id": authority.id},
"description": "testtestest",
"validityStart": "2020-01-01T00:00:00",
"validityEnd": "2020-01-01T00:00:01",
"extensions": {
"subAltNames": {
"names": [
{"nameType": "DNSName", "value": "allowed.example.com"},
{"nameType": "DNSName", "value": "evilhacker.org"},
]
}
},
"dnsProvider": None,
}
data, errors = CertificateInputSchema().load(input_data)
assert errors["common_name"][0].startswith(
"Domain *.example.com does not match allowed domain patterns"
)
assert errors["extensions"]["sub_alt_names"]["names"][0].startswith(
"Domain evilhacker.org does not match allowed domain patterns"
)
def test_certificate_sensitive_name(client, authority, session, logged_in_user):
"""The CN is disallowed by 'sensitive' flag on Domain model."""
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
"commonName": "sensitive.example.com",
"owner": "jim@example.com",
"authority": {"id": authority.id},
"description": "testtestest",
"validityStart": "2020-01-01T00:00:00",
"validityEnd": "2020-01-01T00:00:01",
"dnsProvider": None,
}
session.add(Domain(name="sensitive.example.com", sensitive=True))
data, errors = CertificateInputSchema().load(input_data)
assert errors["common_name"][0].startswith(
"Domain sensitive.example.com has been marked as sensitive"
)
def test_certificate_upload_schema_ok(client):
from lemur.certificates.schemas import CertificateUploadInputSchema
data = {
"name": "Jane",
"owner": "pwner@example.com",
"body": SAN_CERT_STR,
"privateKey": SAN_CERT_KEY,
"chain": INTERMEDIATE_CERT_STR,
"csr": SAN_CERT_CSR,
"external_id": "1234",
}
data, errors = CertificateUploadInputSchema().load(data)
assert not errors
def test_certificate_upload_schema_minimal(client):
from lemur.certificates.schemas import CertificateUploadInputSchema
data = {"owner": "pwner@example.com", "body": SAN_CERT_STR}
data, errors = CertificateUploadInputSchema().load(data)
assert not errors
def test_certificate_upload_schema_long_chain(client):
from lemur.certificates.schemas import CertificateUploadInputSchema
data = {
"owner": "pwner@example.com",
"body": SAN_CERT_STR,
"chain": INTERMEDIATE_CERT_STR + "\n" + ROOTCA_CERT_STR,
}
data, errors = CertificateUploadInputSchema().load(data)
assert not errors
def test_certificate_upload_schema_invalid_body(client):
from lemur.certificates.schemas import CertificateUploadInputSchema
data = {
"owner": "pwner@example.com",
"body": "Hereby I certify that this is a valid body",
}
data, errors = CertificateUploadInputSchema().load(data)
assert errors == {"body": ["Public certificate presented is not valid."]}
def test_certificate_upload_schema_invalid_pkey(client):
from lemur.certificates.schemas import CertificateUploadInputSchema
data = {
"owner": "pwner@example.com",
"body": SAN_CERT_STR,
"privateKey": "Look at me Im a private key!!111",
}
data, errors = CertificateUploadInputSchema().load(data)
assert errors == {"private_key": ["Private key presented is not valid."]}
def test_certificate_upload_schema_invalid_chain(client):
from lemur.certificates.schemas import CertificateUploadInputSchema
data = {"body": SAN_CERT_STR, "chain": "CHAINSAW", "owner": "pwner@example.com"}
data, errors = CertificateUploadInputSchema().load(data)
assert errors == {"chain": ["Invalid certificate in certificate chain."]}
def test_certificate_upload_schema_wrong_pkey(client):
from lemur.certificates.schemas import CertificateUploadInputSchema
data = {
"body": SAN_CERT_STR,
"privateKey": ROOTCA_KEY,
"chain": INTERMEDIATE_CERT_STR,
"owner": "pwner@example.com",
}
data, errors = CertificateUploadInputSchema().load(data)
assert errors == {"_schema": ["Private key does not match certificate."]}
def test_certificate_upload_schema_wrong_chain(client):
from lemur.certificates.schemas import CertificateUploadInputSchema
data = {
"owner": "pwner@example.com",
"body": SAN_CERT_STR,
"chain": ROOTCA_CERT_STR,
}
data, errors = CertificateUploadInputSchema().load(data)
assert errors == {
"_schema": [
"Incorrect chain certificate(s) provided: 'san.example.org' is not signed by "
"'LemurTrust Unittests Root CA 2018'"
]
}
def test_certificate_upload_schema_wrong_chain_2nd(client):
from lemur.certificates.schemas import CertificateUploadInputSchema
data = {
"owner": "pwner@example.com",
"body": SAN_CERT_STR,
"chain": INTERMEDIATE_CERT_STR + "\n" + SAN_CERT_STR,
}
data, errors = CertificateUploadInputSchema().load(data)
assert errors == {
"_schema": [
"Incorrect chain certificate(s) provided: 'LemurTrust Unittests Class 1 CA 2018' is "
"not signed by 'san.example.org'"
]
}
def test_certificate_revoke_schema():
from lemur.certificates.schemas import CertificateRevokeSchema
input = {
"comments": "testing certificate revoke schema",
"crl_reason": "cessationOfOperation"
}
data, errors = CertificateRevokeSchema().load(input)
assert not errors
input["crl_reason"] = "fakeCrlReason"
data, errors = CertificateRevokeSchema().load(input)
assert errors == {
"crl_reason": ['Not a valid choice.']
}
def test_create_basic_csr(client):
csr_config = dict(
common_name="example.com",
organization="Example, Inc.",
organizational_unit="Operations",
country="US",
state="CA",
location="A place",
owner="joe@example.com",
key_type="RSA2048",
extensions=dict(
names=dict(
sub_alt_names=x509.SubjectAlternativeName(
[
x509.DNSName("test.example.com"),
x509.DNSName("test2.example.com"),
]
)
)
),
)
csr, pem = create_csr(**csr_config)
csr = x509.load_pem_x509_csr(csr.encode("utf-8"), default_backend())
for name in csr.subject:
assert name.value in csr_config.values()
def test_csr_empty_san(client):
"""Test that an empty "names" list does not produce a CSR with empty SubjectAltNames extension.
The Lemur UI always submits this extension even when no alt names are defined.
"""
csr_text, pkey = create_csr(
common_name="daniel-san.example.com",
owner="daniel-san@example.com",
key_type="RSA2048",
extensions={"sub_alt_names": {"names": x509.SubjectAlternativeName([])}},
)
csr = x509.load_pem_x509_csr(csr_text.encode("utf-8"), default_backend())
with pytest.raises(x509.ExtensionNotFound):
csr.extensions.get_extension_for_class(x509.SubjectAlternativeName)
def test_csr_disallowed_cn(client, logged_in_user):
"""Domain name CN is disallowed via LEMUR_ALLOWED_DOMAINS."""
from lemur.common import validators
request, pkey = create_csr(
common_name="evilhacker.org", owner="joe@example.com", key_type="RSA2048"
)
with pytest.raises(ValidationError) as err:
validators.csr(request)
assert str(err.value).startswith(
"Domain evilhacker.org does not match allowed domain patterns"
)
def test_csr_disallowed_san(client, logged_in_user):
"""SAN name is disallowed by LEMUR_ALLOWED_DOMAINS."""
from lemur.common import validators
request, pkey = create_csr(
common_name="CN with spaces isn't a domain and is thus allowed",
owner="joe@example.com",
key_type="RSA2048",
extensions={
"sub_alt_names": {
"names": x509.SubjectAlternativeName([x509.DNSName("evilhacker.org")])
}
},
)
with pytest.raises(ValidationError) as err:
validators.csr(request)
assert str(err.value).startswith(
"Domain evilhacker.org does not match allowed domain patterns"
)
def test_get_name_from_arn(client):
from lemur.certificates.service import get_name_from_arn
arn = "arn:aws:iam::11111111:server-certificate/mycertificate"
assert get_name_from_arn(arn) == "mycertificate"
def test_get_account_number(client):
from lemur.certificates.service import get_account_number
arn = "arn:aws:iam::11111111:server-certificate/mycertificate"
assert get_account_number(arn) == "11111111"
def test_mint_certificate(issuer_plugin, authority):
from lemur.certificates.service import mint
cert_body, private_key, chain, external_id, csr = mint(
authority=authority, csr=CSR_STR
)
assert cert_body == SAN_CERT_STR
def test_create_certificate(issuer_plugin, authority, user):
from lemur.certificates.service import create
cert = create(
authority=authority, csr=CSR_STR, owner="joe@example.com", creator=user["user"]
)
assert str(cert.not_after) == "2047-12-31T22:00:00+00:00"
assert str(cert.not_before) == "2017-12-31T22:00:00+00:00"
assert cert.issuer == "LemurTrustUnittestsClass1CA2018"
assert (
cert.name
== "SAN-san.example.org-LemurTrustUnittestsClass1CA2018-20171231-20471231-AFF2DB4F8D2D4D8E80FA382AE27C2333"
)
cert = create(
authority=authority,
csr=CSR_STR,
owner="joe@example.com",
name="ACustomName1",
creator=user["user"],
)
assert cert.name == "ACustomName1"
def test_reissue_certificate(
issuer_plugin, crypto_authority, certificate, logged_in_user
):
from lemur.certificates.service import reissue_certificate
from lemur.authorities.service import update_options
from lemur.tests.conf import LEMUR_DEFAULT_ORGANIZATION
# test-authority would return a mismatching private key, so use 'cryptography-issuer' plugin instead.
certificate.authority = crypto_authority
new_cert = reissue_certificate(certificate)
assert new_cert
assert new_cert.key_type == "RSA2048"
assert new_cert.organization != certificate.organization
# Check for default value since authority does not have cab_compliant option set
assert new_cert.organization == LEMUR_DEFAULT_ORGANIZATION
assert new_cert.description.startswith(f"Reissued by Lemur for cert ID {certificate.id}")
# update cab_compliant option to false for crypto_authority to maintain subject details
update_options(crypto_authority.id, '[{"name": "cab_compliant","value":false}]')
new_cert = reissue_certificate(certificate)
assert new_cert.organization == certificate.organization
def test_create_csr():
csr, private_key = create_csr(
owner="joe@example.com",
common_name="ACommonName",
organization="test",
organizational_unit="Meters",
country="US",
state="CA",
location="Here",
key_type="RSA2048",
)
assert csr
assert private_key
extensions = {
"sub_alt_names": {
"names": x509.SubjectAlternativeName([x509.DNSName("AnotherCommonName")])
}
}
csr, private_key = create_csr(
owner="joe@example.com",
common_name="ACommonName",
organization="test",
organizational_unit="Meters",
country="US",
state="CA",
location="Here",
extensions=extensions,
key_type="RSA2048",
)
assert csr
assert private_key
def test_import(user):
from lemur.certificates.service import import_certificate
cert = import_certificate(
body=SAN_CERT_STR,
chain=INTERMEDIATE_CERT_STR,
private_key=SAN_CERT_KEY,
creator=user["user"],
)
assert str(cert.not_after) == "2047-12-31T22:00:00+00:00"
assert str(cert.not_before) == "2017-12-31T22:00:00+00:00"
assert cert.issuer == "LemurTrustUnittestsClass1CA2018"
assert cert.name.startswith(
"SAN-san.example.org-LemurTrustUnittestsClass1CA2018-20171231-20471231"
)
cert = import_certificate(
body=SAN_CERT_STR,
chain=INTERMEDIATE_CERT_STR,
private_key=SAN_CERT_KEY,
owner="joe@example.com",
name="ACustomName2",
creator=user["user"],
)
assert cert.name == "ACustomName2"
@pytest.mark.skip
def test_upload(user):
from lemur.certificates.service import upload
cert = upload(
body=SAN_CERT_STR,
chain=INTERMEDIATE_CERT_STR,
private_key=SAN_CERT_KEY,
owner="joe@example.com",
creator=user["user"],
)
assert str(cert.not_after) == "2040-01-01T20:30:52+00:00"
assert str(cert.not_before) == "2015-06-26T20:30:52+00:00"
assert cert.issuer == "Example"
assert cert.name == "long.lived.com-Example-20150626-20400101-3"
cert = upload(
body=SAN_CERT_STR,
chain=INTERMEDIATE_CERT_STR,
private_key=SAN_CERT_KEY,
owner="joe@example.com",
name="ACustomName",
creator=user["user"],
)
assert "ACustomName" in cert.name
# verify upload with a private key as a str
def test_upload_private_key_str(user):
from lemur.certificates.service import upload
cert = upload(
body=SAN_CERT_STR,
chain=INTERMEDIATE_CERT_STR,
private_key=SAN_CERT_KEY,
owner="joe@example.com",
name="ACustomName",
creator=user["user"],
)
assert cert
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_certificate_get_private_key(client, token, status):
assert (
client.get(
api.url_for(Certificates, certificate_id=1), headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_certificate_get(client, token, status):
assert (
client.get(
api.url_for(Certificates, certificate_id=1), headers=token
).status_code
== status
)
def test_certificate_get_body(client):
response_body = client.get(
api.url_for(Certificates, certificate_id=1), headers=VALID_USER_HEADER_TOKEN
).json
assert response_body["serial"] == "211983098819107449768450703123665283596"
assert response_body["serialHex"] == "9F7A75B39DAE4C3F9524C68B06DA6A0C"
assert response_body["distinguishedName"] == (
"L=Earth,"
"ST=N/A,"
"C=EE,"
"OU=Unittesting Operations Center,"
"O=LemurTrust Enterprises Ltd,"
"CN=LemurTrust Unittests Class 1 CA 2018"
)
# No authority details are provided in this test, no information about being cab_compliant is available.
# Thus original subject details should be returned.
assert response_body["country"] == "EE"
assert response_body["state"] == "N/A"
assert response_body["location"] == "Earth"
assert response_body["organization"] == "LemurTrust Enterprises Ltd"
assert response_body["organizationalUnit"] == "Unittesting Operations Center"
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_certificate_post_update_notify(client, certificate, token, status):
# negate the current notify flag and pass it to update POST call to flip the notify
toggled_notify = not certificate.notify
response = client.post(
api.url_for(Certificates, certificate_id=certificate.id),
data=json.dumps({"notify": toggled_notify}),
headers=token
)
assert response.status_code == status
if status == 200:
assert response.json.get("notify") == toggled_notify
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
("", 401),
],
)
def test_certificate_put(client, token, status):
assert (
client.put(
api.url_for(Certificates, certificate_id=1), data={}, headers=token
).status_code
== status
)
def test_certificate_put_with_data(client, certificate, issuer_plugin):
resp = client.put(
api.url_for(Certificates, certificate_id=certificate.id),
data=json.dumps(
{"owner": "bob@example.com", "description": "test", "notify": True}
),
headers=VALID_ADMIN_HEADER_TOKEN,
)
assert resp.status_code == 200
assert len(certificate.notifications) == 3
assert certificate.roles[0].name == "bob@example.com"
assert certificate.notify
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 204),
(VALID_ADMIN_API_TOKEN, 412),
("", 401),
],
)
def test_certificate_delete(client, token, status):
assert (
client.delete(
api.url_for(Certificates, certificate_id=1), headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 204),
(VALID_ADMIN_API_TOKEN, 204),
("", 401),
],
)
def test_invalid_certificate_delete(client, invalid_certificate, token, status):
assert (
client.delete(
api.url_for(Certificates, certificate_id=invalid_certificate.id),
headers=token,
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_certificate_patch(client, token, status):
assert (
client.patch(
api.url_for(Certificates, certificate_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_certificates_get(client, token, status):
assert (
client.get(api.url_for(CertificatesList), headers=token).status_code == status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
("", 401),
],
)
def test_certificates_post(client, token, status):
assert (
client.post(api.url_for(CertificatesList), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_certificates_put(client, token, status):
assert (
client.put(api.url_for(CertificatesList), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_certificates_delete(client, token, status):
assert (
client.delete(api.url_for(CertificatesList), headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_certificates_patch(client, token, status):
assert (
client.patch(api.url_for(CertificatesList), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_certificate_credentials_post(client, token, status):
assert (
client.post(
api.url_for(CertificatePrivateKey, certificate_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_certificate_credentials_put(client, token, status):
assert (
client.put(
api.url_for(CertificatePrivateKey, certificate_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_certificate_credentials_delete(client, token, status):
assert (
client.delete(
api.url_for(CertificatePrivateKey, certificate_id=1), headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_certificate_credentials_patch(client, token, status):
assert (
client.patch(
api.url_for(CertificatePrivateKey, certificate_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_certificates_upload_get(client, token, status):
assert (
client.get(api.url_for(CertificatesUpload), headers=token).status_code == status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
("", 401),
],
)
def test_certificates_upload_post(client, token, status):
assert (
client.post(api.url_for(CertificatesUpload), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_certificates_upload_put(client, token, status):
assert (
client.put(api.url_for(CertificatesUpload), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_certificates_upload_delete(client, token, status):
assert (
client.delete(api.url_for(CertificatesUpload), headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_certificates_upload_patch(client, token, status):
assert (
client.patch(
api.url_for(CertificatesUpload), data={}, headers=token
).status_code
== status
)
def test_sensitive_sort(client):
resp = client.get(
api.url_for(CertificatesList) + "?sortBy=private_key&sortDir=asc",
headers=VALID_ADMIN_HEADER_TOKEN,
)
assert "'private_key' is not sortable or filterable" in resp.json["message"]
def test_boolean_filter(client):
resp = client.get(
api.url_for(CertificatesList) + "?filter=notify;true",
headers=VALID_ADMIN_HEADER_TOKEN,
)
assert resp.status_code == 200
# Also don't crash with invalid input (we currently treat that as false)
resp = client.get(
api.url_for(CertificatesList) + "?filter=notify;whatisthis",
headers=VALID_ADMIN_HEADER_TOKEN,
)
assert resp.status_code == 200
def test_issued_cert_count_for_authority(authority):
from lemur.tests.factories import CertificateFactory
from lemur.certificates.service import get_issued_cert_count_for_authority
assert get_issued_cert_count_for_authority(authority) == 0
# create a few certs issued by the authority
CertificateFactory(authority=authority, name="test_issued_cert_count_for_authority1")
CertificateFactory(authority=authority, name="test_issued_cert_count_for_authority2")
CertificateFactory(authority=authority, name="test_issued_cert_count_for_authority3")
assert get_issued_cert_count_for_authority(authority) == 3
def test_identify_and_persist_expiring_deployed_certificates():
from lemur.domains.models import Domain
"""
This test spins up three local servers, each serving the same default test cert with a non-matching CN/SANs.
The logic to check if a cert is still deployed ignores certificate validity; all it needs to know is whether
the certificate currently deployed at the cert's associated domain has the same serial number as the one in
Lemur's DB. The expiration check is done using the date in Lemur's DB, and is not parsed from the actual deployed
certificate - so we can get away with using a totally unrelated cert, as long as the serial number matches.
In this test, the serial number is always the same, since it's parsed from the hardcoded test cert.
"""
# one non-expiring cert, two expiring certs, one cert that doesn't match a running server, and one cert using an excluded domain
cert_1 = create_cert_that_expires_in_days(180, domains=[Domain(name='localhost')], owner='testowner1@example.com')
cert_2 = create_cert_that_expires_in_days(10, domains=[Domain(name='localhost')], owner='testowner2@example.com')
cert_3 = create_cert_that_expires_in_days(10, domains=[Domain(name='localhost')], owner='testowner3@example.com')
cert_4 = create_cert_that_expires_in_days(10, domains=[Domain(name='not-localhost')], owner='testowner4@example.com')
cert_5 = create_cert_that_expires_in_days(10, domains=[Domain(name='abc.excluded.com')], owner='testowner5@example.com')
# test certs are all hardcoded with the same body/chain so we don't need to use the created cert here
cert_file_data = SAN_CERT_STR + INTERMEDIATE_CERT_STR + ROOTCA_CERT_STR + SAN_CERT_KEY
f = NamedTemporaryFile(suffix='.pem', delete=True)
try:
f.write(cert_file_data.encode('utf-8'))
server_1 = run_server(65521, f.name)
server_2 = run_server(65522, f.name)
server_3 = run_server(65523, f.name)
if not (server_1.is_alive() and server_2.is_alive() and server_3.is_alive()):
fail('Servers not alive, test cannot proceed')
for c in [cert_1, cert_2, cert_3, cert_4]:
assert len(c.certificate_associations) == 1
for ca in c.certificate_associations:
assert ca.ports is None
identify_and_persist_expiring_deployed_certificates(['excluded.com'], True)
for c in [cert_1, cert_5]:
assert len(c.certificate_associations) == 1
for ca in c.certificate_associations:
assert ca.ports is None # cert_1 is not expiring, cert_5 is excluded, so neither should be update
for c in [cert_4]:
assert len(c.certificate_associations) == 1
for ca in c.certificate_associations:
assert ca.ports == [] # cert_4 is valid but doesn't match so the request runs but the cert isn't found
for c in [cert_2, cert_3]:
assert len(c.certificate_associations) == 1
for ca in c.certificate_associations:
assert ca.ports == [65521, 65522, 65523]
finally:
f.close() # close file (which also deletes it)
def run_server(port, cert_file_name):
"""Utility method to create a mock server that serves a specific certificate"""
def start_server():
server = HTTPServer(('localhost', port), SimpleHTTPRequestHandler)
server.socket = ssl.wrap_socket(server.socket,
server_side=True,
certfile=cert_file_name,
ssl_version=ssl.PROTOCOL_TLSv1_2)
server.serve_forever()
print(f"Started https server on port {port} using cert file {cert_file_name}")
daemon = threading.Thread(name=f'server_{cert_file_name}', target=start_server)
daemon.setDaemon(True) # Set as a daemon so it will be killed once the main thread is dead.
daemon.start()
return daemon
|
customDNSv1.py | """
BSD 3-Clause License
Copyright (c) 2019, Antti Koskimäki, Aalto University, Finland
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import argparse
import multiprocessing
import random
import socket
import socketserver
import struct
import sys
import threading
import time
from multiprocessing import Queue as mQueue
from multiprocessing import Process
from socket import IPPROTO_TCP, TCP_NODELAY
import dns.edns
import dns.exception
import dns.message
import dns.name
import dns.opcode
import dns.rcode
import dns.rdataclass
import dns.rdatatype
from dnshelperfunctions1 import getDNSInfoUDP, getDNSInfoUDPNoCname
from dnshelperfunctions1 import getDNSInfoTCP, getDNSReplyInfo
"""
This is a custom DNS relay program that uses the dnshelperfunctions file in
addition. The program runs 4 separate processes, where 2 are dedicated to
act as TCP and UDP servers, 1 is for data/query processing and 1 is for
relaying request to the Realm Gateway. Addresses, ports, security options
and limited DNS options can be given as input on the command line when the
program is started. See use the "-h" argument and check the README for
more details.
"""
# TODO: There may be a need for flushing/clearing the dict containing CNAME
# message state information periodically so that it doesn't end up taking too
# much space in memory. In principle this could be just done in a thread in
# the CP process on set time intervals by locking the dict and then clearing
# it. This would cause some ongoing DNS queries to fail, but the respective
# clients could do retries to remedy this.
# Some default buffer values for network connections
SERVUDPBFR = 4096
SERVTCPBFR = 4096
FWDUDPBFR = 4096
FWDTCPBFR = 4096
# ECS subnet mask accuracy - RGW supports properly only sizes 24/16/8
ECSMASK = 24
# Data structures and object locks to facilitate threading
rgwlist = []
cnames = {}
events = {}
P2datamap = {}
lock_cnames = threading.Lock()
lock_events = threading.Lock()
lock_P2datamap = threading.Lock()
# Values that are finally set by argument input on the program start
randomizeRGW = True
forwardECS = True
CNAMEstep = True
TCPstep = True
# Process 1 (UDP server) definition, threads, etc. below:
def process1_UDPsrv(q1, q2, servaddr):
# tempp
print("P1 - Starting process 1 - UDP sender & server\n")
thread1 = P1T1_UDPServer(q1, servaddr)
thread1.start()
time.sleep(1)
thread2 = P1T2_UDPSender(q2, thread1.return_server_socket())
thread2.start()
thread2.join()
thread1.join()
print("P1 - Exiting process 1...\n")
class P1T1_UDPServer(threading.Thread):
def __init__(self, q1, addr):
threading.Thread.__init__(self)
self.q1 = q1
self.addr = addr
self.serversocket = 0
# tempp
print("P1T1 - UDP server thread starting\n")
def run(self):
server = MyUDPServer(self.q1, self.addr, UDPClientHandler)
self.serversocket = server.socket
# tempp
print("P1T1 - Running UDP server loop forever\n")
server.serve_forever(5)
def return_server_socket(self):
return self.serversocket
class P1T2_UDPSender(threading.Thread):
def __init__(self, q2, serversocket):
threading.Thread.__init__(self)
self.q2 = q2
self.serversocket = serversocket
# tempp
print("P1T2 - UDP Sender Thread starting\n")
def run(self):
# tempp
print("P1T2 - UDP Sender listening loop starting\n")
while True:
data = self.q2.get()
self.serversocket.sendto(data[0], data[1])
# tempp
# print("P1T2 - UDP Sender sent reply to client\n")
class UDPClientHandler(socketserver.BaseRequestHandler):
def handle(self):
# tempp
# print("P1 - UDP Server got data\n")
self.server.q1.put((self.request[0],
self.client_address,
-1,
0))
# tempp
# print("P1T1 - UDP server fwd sg to q1 - handling done\n")
class MyUDPServer(socketserver.UDPServer):
def __init__(self, q1, *args, **kwargs):
super(MyUDPServer, self).__init__(*args, **kwargs)
self.q1 = q1
# tempp
print("P1T1 - UDP Server starting\n")
# Process 2 (TCP server) definition, threads, etc. below:
def process2_TCPsrv(q3, q4, servaddr):
# tempp
print("P2 - Starting process 2\n")
if TCPstep == 1:
tcpserverthread = P2T1_TCPServer(q3, servaddr)
tcpserverthread.start()
# tempp
print("P2 - Starting manager loop")
while True:
data = q4.get()
lock_P2datamap.acquire()
P2datamap[data[2]] = (data[0], data[1])
lock_P2datamap.release()
lock_events.acquire()
events[data[2]].set()
lock_events.release()
# tempp
# print("P2 - Manager received data, set event and put data to dict\n")
if TCPstep == 1:
tcpserverthread.join()
print("P2 - Exiting process 2\n")
class P2T1_TCPServer(threading.Thread):
def __init__(self, q3, addr):
threading.Thread.__init__(self)
self.q3 = q3
self.addr = addr
# tempp
print("P2T1 - TCP server thread starting\n")
def run(self):
server = MyThreadedTCPServer(self.q3,
self.addr,
TCPClientHandler)
# tempp
print("P2T1 - Running TCP forever loop\n")
server.socket.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
server.serve_forever(5)
class TCPClientHandler(socketserver.BaseRequestHandler):
def handle(self):
# tempp
# print("P2 - TCP Server received data\n")
data = self.request.recv(SERVTCPBFR)
event = threading.Event()
threadID = threading.current_thread().ident
events[threadID] = event
self.server.q3.put((data[2:],
self.client_address,
threadID,
0))
# tempp
# print("P2 - TCP server thread x event listening loop\n")
while True:
event.wait()
lock_P2datamap.acquire()
data = P2datamap[threadID]
del P2datamap[threadID]
lock_P2datamap.release()
lock_events.acquire()
del events[threadID]
lock_events.release()
break
if data[1] != 0:
self.request.sendall(struct.pack('!H', len(data[0])) + data[0])
# tempp
# print("P2 - TCP server send data back to client\n")
pass
else:
# tempp
print("P2 - TCP server endend connection, faulty query\n")
pass
class MyTCPServer(socketserver.TCPServer):
def __init__(self, q3, *args, **kwargs):
super(MyTCPServer, self).__init__(*args, **kwargs)
self.q3 = q3
# tempp
print("P2 - TCP Server starting\n")
class MyThreadedTCPServer(socketserver.ThreadingMixIn, MyTCPServer):
pass
# Process 3 (Central Processing) definition, threads, etc. below:
def process3_CP(q1, q2, q3, q4, q5, q6, scn, mask, ecs):
# tempp
print("P3 - Starting process 3\n")
thread1 = P3T1_UDPH(q1, q2, q5, scn, mask, ecs)
thread1.start()
thread2 = P3T2_TCPH(q3, q4, q5)
thread2.start()
thread3 = P3T3_Answer(q2, q4, q6)
thread3.start()
thread1.join()
thread2.join()
thread3.join()
class P3T1_UDPH(threading.Thread):
def __init__(self, q1, q2, q5, scn, mask, ecs):
threading.Thread.__init__(self)
self.q1 = q1
self.q2 = q2
self.q5 = q5
self.scn = scn
self.mask = mask
self.ecs = ecs
# tempp
print("P3T1 - CP UDPH thread starting\n")
def run(self):
# tempp
print("P3T1 - CP UDPH thread listening loop starting\n")
while True:
data = self.q1.get()
if ((TCPstep is True) and (CNAMEstep is True)):
dnsmsg_t = getDNSInfoUDP(data[0],
self.scn,
True,
data[1][0],
self.mask,
self.ecs)
elif ((TCPstep is False) and (CNAMEstep is True)):
dnsmsg_t = getDNSInfoUDP(data[0],
self.scn,
False,
data[1][0],
self.mask,
self.ecs)
elif ((TCPstep is True) and (CNAMEstep is False)):
dnsmsg_t = getDNSInfoUDPNoCname(data[0],
True,
data[1][0],
self.mask,
self.ecs)
else:
dnsmsg_t = getDNSInfoUDPNoCname(data[0],
False,
data[1][0],
self.mask,
self.ecs)
if dnsmsg_t[1] == 0:
# tempp
print("P3T1 - Malformed DNS message, discarding\n")
pass
elif dnsmsg_t[1] == 1:
# tempp
print("P3T1 - DNS message is not a proper query, discarding\n")
pass
elif dnsmsg_t[1] == 2:
# TODO: choose subnet from dict here and add it to the 4th in
# tuple
self.q2.put((dnsmsg_t[0], data[1]))
# tempp
# print("P3T1 - CP UDPH thread received and sent data:\n")
# print("Valid UDP DNS query - replying with trunc.\n")
elif dnsmsg_t[1] == 3:
# TODO: choose rgw addr based on subnet, now it's just "0"
try:
lock_cnames.acquire()
tempaddr = cnames[dnsmsg_t[2]]
del cnames[dnsmsg_t[2]]
lock_cnames.release()
self.q5.put((dnsmsg_t[0],
data[1],
-1,
dnsmsg_t[3],
dnsmsg_t[4],
tempaddr))
except KeyError:
lock_cnames.release()
print("P3T1 - DNS message CNAME not in dict\n")
pass
else:
self.q5.put((dnsmsg_t[0],
data[1],
-1,
dnsmsg_t[3],
dnsmsg_t[4],
0))
class P3T2_TCPH(threading.Thread):
def __init__(self, q3, q4, q5):
threading.Thread.__init__(self)
self.q3 = q3
self.q4 = q4
self.q5 = q5
# tempp
print("P3T2 - CP TCPH thread starting\n")
def run(self):
# TODO: change TCP/CNAMEsteps to local variables given as input
# tempp
print("P3T2 - CP TCPH thread listening loop starting\n")
while True:
data = self.q3.get()
if CNAMEstep is True:
dnsmsg_t = getDNSInfoTCP(data[0], True)
else:
dnsmsg_t = getDNSInfoTCP(data[0], False)
if dnsmsg_t[1] == 0:
self.q4.put((0, 0, data[2], -1))
# tempp
print("P3T2 - Malformed DNS message, discarding\n")
pass
elif dnsmsg_t[1] == 1:
self.q4.put((0, 0, data[2], -1))
# tempp
print("P3T2 - DNS message is not a proper query, discarding\n")
pass
else:
self.q5.put((dnsmsg_t[0],
data[1],
data[2],
dnsmsg_t[2],
dnsmsg_t[3],
0))
class P3T3_Answer(threading.Thread):
def __init__(self, q2, q4, q6):
threading.Thread.__init__(self)
self.q2 = q2
self.q4 = q4
self.q6 = q6
# tempp
print("P3T3 - CP Answer thread starting\n")
def run(self):
# TODO: change TCP/CNAMEsteps to local variables given as input
# tempp
print("P3T3 - CP Answer thread listening loop starting\n")
while True:
data = self.q6.get()
if ((TCPstep is True) and (CNAMEstep is True)):
dnsmsg_t = getDNSReplyInfo(data[0], data[4], True, True)
elif ((TCPstep is True) and (CNAMEstep is False)):
dnsmsg_t = getDNSReplyInfo(data[0], data[4], True, False)
elif ((TCPstep is False) and (CNAMEstep is True)):
dnsmsg_t = getDNSReplyInfo(data[0], data[4], False, True)
else:
dnsmsg_t = getDNSReplyInfo(data[0], data[4], False, False)
if dnsmsg_t[1] == 0:
# tempp
print("P3T3 - Malformed DNS answer from RGW, discarding\n")
pass
elif dnsmsg_t[1] == 1:
# tempp
print("P3T3 - DNS answer from RGW not proper, discarding\n")
pass
elif dnsmsg_t[1] == 2:
lock_cnames.acquire()
cnames[dnsmsg_t[2]] = data[5]
lock_cnames.release()
self.q2.put((data[0], data[1]))
# tempp
# print("P3T3 - CP Answer thread received and sent data:\n")
# print("Valid UDP Cname reply - forwarding to client UDP.\n")
elif dnsmsg_t[1] == 4:
lock_cnames.acquire()
cnames[dnsmsg_t[2]] = data[5]
lock_cnames.release()
self.q4.put((data[0], data[1], data[2]))
# tempp
# print("P3T3 - CP Answer thread received and sent data:\n")
# print("Valid UDP Cname reply - forwarding to client TCP.\n")
elif dnsmsg_t[1] == 3:
self.q2.put((data[0], data[1]))
# tempp
# print("P3T3 - CP Answer thread received and sent data:\n")
# print("Valid UDP reply - forwarding to client UDP.\n")
else:
self.q4.put((data[0], data[1], data[2]))
# tempp
# print("P3T3 - CP Answer thread received and sent data:\n")
# print("Valid UDP reply - forwarding to client TCP.\n")
# Process 4 (Data Forwarder, UDP) definition, threads, etc. below:
def process4_fwdUDP(q5, q6, dnstimeout, dnstries, rgwaddrlist, randomize):
# tempp
print("P4 - Starting process 4\n")
print("P4 - Starting listening loop\n")
while True:
data = q5.get()
# tempp
# print("P4 - Creating sender thread\n")
if randomize:
if data[5] == 0:
temp = int(len(rgwaddrlist) * random.random())
rgwaddr = rgwaddrlist[temp]
else:
rgwaddr = data[5]
P4TX_UDPSender(q6, data, dnstimeout, dnstries, rgwaddr).start()
else:
rgwaddr = rgwaddrlist[0]
P4TX_UDPSender(q6, data, dnstimeout, dnstries, rgwaddr).start()
class P4TX_UDPSender(threading.Thread):
def __init__(self, q6, data, dnstimeout, dnstries, rgwaddr):
threading.Thread.__init__(self)
self.q6 = q6
self.data = data
self.dnstimeout = dnstimeout
self.dnstries = dnstries
self.rgwaddr = rgwaddr
# tempp
# print("P4TX - UDP Sender Thread starting\n")
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(self.dnstimeout)
# TODO: temprorary, change this when testing with real rgw
sock.sendto(self.data[0], self.rgwaddr)
isTO = False
tempv = 1
while True:
try:
reply, addr = sock.recvfrom(4096)
break
except socket.timeout:
if tempv >= self.dnstries:
isTO = True
break
else:
tempv += 1
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(self.dnstimeout)
sock.sendto(self.data[0], self.rgwaddr)
sock.close()
if isTO:
print("Timeout for UDP DNS Query to RGW"
" address: " + self.rgwaddr[0] + "\n")
else:
self.q6.put((reply,
self.data[1],
self.data[2],
self.data[3],
self.data[4],
addr))
# tempp
# print("P4TX - Send and received data, forwarding to CP\n")
# TODO:
# (Optional) Process 4 (Data Forwarder, TCP) definition, threads, etc. below:
def process4_fwdTCP(q5, q6):
pass
# Functions below for help parsing valid command line arguments.
class saddrAction(argparse.Action):
"""Argparse action."""
def __call__(self, parser, namespace, values, option_string=None):
"""Check for IP address validity."""
try:
socket.inet_aton(values)
except socket.error:
parser.error("IP address should be in valid a.b.c.d IPv4 format.")
setattr(namespace, self.dest, values)
class sportAction(argparse.Action):
"""Argparse action."""
def __call__(self, parser, namespace, values, option_string=None):
"""Check for port validity."""
if ((values < 1) or (values > 65535)):
parser.error("Port numbers shoud be between 1 and 65535.")
setattr(namespace, self.dest, values)
class ecsAction(argparse.Action):
"""Argparse action."""
def __call__(self, parser, namespace, values, option_string=None):
"""Check for yes or no value validity."""
if not ((values == "yes") or (values == "no")):
parser.error("ECS option should be yes or no.")
setattr(namespace, self.dest, values)
class tcpAction(argparse.Action):
"""Argparse action."""
def __call__(self, parser, namespace, values, option_string=None):
"""Check for yes or no value validity."""
if not ((values == "yes") or (values == "no")):
parser.error("TCP option should be yes or no.")
setattr(namespace, self.dest, values)
class cnameAction(argparse.Action):
"""Argparse action."""
def __call__(self, parser, namespace, values, option_string=None):
"""Check for yes or no value validity."""
if not ((values == "yes") or (values == "no")):
parser.error("CNAME option should be yes or no.")
setattr(namespace, self.dest, values)
class randrgwAction(argparse.Action):
"""Argparse action."""
def __call__(self, parser, namespace, values, option_string=None):
"""Check for yes or no value validity."""
if not ((values == "yes") or (values == "no")):
parser.error("Randomize RGW option should be yes or no.")
setattr(namespace, self.dest, values)
class dnstoAction(argparse.Action):
"""Argparse action."""
def __call__(self, parser, namespace, values, option_string=None):
"""Check for int validity."""
if ((values < 1) or (values > 120)):
parser.error("Timeout should be between 1 and 120 seconds.")
setattr(namespace, self.dest, values)
class dnstryAction(argparse.Action):
"""Argparse action."""
def __call__(self, parser, namespace, values, option_string=None):
"""Check for int validity."""
if ((values < 0) or (values > 30)):
parser.error("Additional attempts should be between 0 and 30.")
setattr(namespace, self.dest, values)
class rgwsAction(argparse.Action):
"""Argparse action."""
def __call__(self, parser, namespace, values, option_string=None):
"""Check for addr/port validity."""
if values:
ipvalue = True
for x in values:
if ipvalue is True:
ipvalue = False
try:
socket.inet_aton(x)
except socket.error:
parser.error("IP address should be in valid a.b.c.d" +
" IPv4 format.")
else:
ipvalue = True
try:
tempx = int(x)
except ValueError:
parser.error("Port number shoud be a positive integer")
if ((tempx < 1) or (tempx > 65535)):
parser.error("Port numbers shoud be between" +
" 1 and 65535.")
if ipvalue is False:
parser.error("Address missing a respective port value")
else:
parser.error("Destination RGW list empty.")
setattr(namespace, self.dest, values)
# Main function below
def main():
"""Run the main program."""
parser = argparse.ArgumentParser(description="Custom DNS relay server")
parser.add_argument("-saddr",
"--dns_relay_server_address",
action=saddrAction,
help="Valid IPv4 address for the DNS relay server",
default="127.0.0.1")
parser.add_argument("-sport",
"--dns_relay_server_port",
help="Valid port for the DNS relay server",
action=sportAction,
type=int,
default=53)
parser.add_argument("-ecs",
"--forward_ecs",
action=ecsAction,
help="Forward ECS with DNS - yes/no",
default="yes")
parser.add_argument("-tcp",
"--use_tcp_security_step",
action=tcpAction,
help="Use TCP DNS security step - yes/no",
default="yes")
parser.add_argument("-cname",
"--use_cname_security_step",
action=cnameAction,
help="Use CNAME DNS security step - yes/no",
default="yes")
parser.add_argument("-randrgw",
"--randomize_destination_rgw",
action=randrgwAction,
help="Randomize destination RGW - yes/no",
default="yes")
parser.add_argument("-cnamestr",
"--rgw_cname_string_component",
help="Leftmost part of dest. RGW dns-cname-soa config",
default="cname")
parser.add_argument("-dnsto",
"--dns_timeout",
help="DNS request timeout towards RGW in seconds",
action=dnstoAction,
type=int,
default=3)
parser.add_argument("-dnstry",
"--dns_request_attempts",
help="Max. DNS request attempts towards RGW",
action=dnstryAction,
type=int,
default=3)
parser.add_argument('-rgws',
'--rgws_list',
nargs='+',
action=rgwsAction,
help='List of RGW address (str) and port (int) pairs')
args = parser.parse_args()
print("Starting the custom DNS relay server...\n")
print("Server IP address and port: {}, {}\n".
format(args.dns_relay_server_address,
str(args.dns_relay_server_port)))
servaddr = (args.dns_relay_server_address, args.dns_relay_server_port)
if(args.forward_ecs == "yes"):
print("Client subnet forwarding with DNS ECS is ON.\n")
forwardECS = True
else:
print("Client subnet forwarding with DNS ECS is OFF.\n")
forwardECS = False
if(args.use_tcp_security_step == "yes"):
print("DNS TCP security step is ON.\n")
TCPstep = True
else:
print("DNS TCP security step is OFF.\n")
TCPstep = False
if(args.use_cname_security_step == "yes"):
print("DNS CNAME security step is ON.\n")
CNAMEstep = True
else:
print("DNS CNAME security step is OFF.\n")
CNAMEstep = False
if(args.randomize_destination_rgw == "yes"):
print("Destination RGW randomization is ON.\n")
randomizeRGW = True
else:
print("Destination RGW randomization is OFF.\n")
randomizeRGW = False
print("CNAME string component in use: ")
servicecname = args.rgw_cname_string_component
print(servicecname)
print("\n")
dnstimeout = args.dns_timeout
dnstries = args.dns_request_attempts
print("DNS request timeout in seconds: ")
print(str(dnstimeout))
print("\n")
print("Maximum additional DNS request attempts: ")
print(str(dnstries))
print("\n")
# Populating the destination RGW list
if args.rgws_list:
tempaddr = 0
tempport = 0
ipvalue = True
for x in args.rgws_list:
if ipvalue is True:
ipvalue = False
tempaddr = x
else:
ipvalue = True
tempport = x
rgwlist.append((tempaddr, int(tempport)))
print("Following destination RGWs were given:\n")
for x in rgwlist:
print(x)
print("\n")
else:
print("No destination RGWs given, using the default: \n")
print("addr 127.0.0.1 port 54\n ")
rgwlist.append(("127.0.0.1", 54))
print("Server serves forever; exit by pressing CTRL-C")
# Creating queues for communication between processes
# p1 -> p3 (From UDP _server_ to Data handler)
q1 = mQueue()
# p3 -> p1 (From Data handler to clientside UDP _sender_)
q2 = mQueue()
# p2 -> p3 (From TCP server to data handler)
q3 = mQueue()
# p3 -> p2 (From data handler to TCP server)
q4 = mQueue()
# p3 -> p4 (From data handler to rgwside UDP/TCP sender)
q5 = mQueue()
# p4 -> p3 (From rgwside UDP/TCP sender to data handler)
q6 = mQueue()
p1 = Process(target=process1_UDPsrv, args=(q1, q2, servaddr))
p2 = Process(target=process2_TCPsrv, args=(q3, q4, servaddr))
p3 = Process(target=process3_CP, args=(q1,
q2,
q3,
q4,
q5,
q6,
servicecname,
ECSMASK,
forwardECS))
p4 = Process(target=process4_fwdUDP, args=(q5,
q6,
dnstimeout,
dnstries,
rgwlist,
randomizeRGW))
p1.start()
p2.start()
p3.start()
p4.start()
try:
p1.join()
p2.join()
p3.join()
p4.join()
except KeyboardInterrupt:
p1.terminate()
p2.terminate()
p3.terminate()
p4.terminate()
# TODO: Remember to flush IP tables
print("--Exiting Custom DNS server program (Ctrl-C)--\n")
sys.exit()
print("Exiting Custom DNS server program...\n")
if __name__ == "__main__":
main()
|
test_ai.py | __author__ = "Jerry Overton"
__copyright__ = "Copyright (C) 2022 appliedAIstudio LLC"
__version__ = "0.0.1"
import os
import time
import unittest
# needed to start up the remote ai server
import rpyc
from ai_framework.ai_server import start_ai_server
# needed to create test actions
from ai_framework.ai_actions import AIaction
# needed to run the ai server in a separate, non-blocking thread
from threading import Thread
# used to format the goal file
import json
# todo: test against invalid goal lists at startup
# create a test action template that will be used by all tests
class TestAction(AIaction):
def __init__(self, server=os.environ['ai_server'], port=os.environ['ai_server_port'], preconditions={}, effects={}):
super().__init__(preconditions=preconditions, initial_effects=effects, server=server, port=port)
def behavior(self):
pass
# use the test action template to create an action that fails
class FailedTestAction(TestAction):
_num_failures = 0
_max_num_failures = 2
def behavior(self):
# this action should fail only a certain number of times
if self._num_failures <= self._max_num_failures:
self._num_failures += 1
raise Exception('This is a failed action')
# use the test action template to create an action that unregisters itself
class UnregisterTestAction(TestAction):
_first_this_action_was_called = True
# todo: what do i do when the actual effects are changed and need to be changed back
# todo: in the base class, set actual effects equal to intended effects
def behavior(self):
# on the first run of this action, report a failure so that the action will be called again
if self._first_this_action_was_called:
self.actual_effects["run_unregister_action"] = False
self._first_this_action_was_called = False
# on the second run of this action, unregister the action
else:
self._unregister()
# use the test action template to create an action for a goal state unknown to the ai's world
class UnheardOfTestAction(TestAction):
def behavior(self):
pass
class TestAI(unittest.TestCase):
@staticmethod
def _build_ai_server_goals_list_json_file(num_goals):
with open('../../general_digital_twin/complex_load_testing/config/complex_load_testing_ai_goals_list.json', 'w') as f:
# create a batch of goals designed to stress test the ai server
goals_list = []
for n in range(num_goals):
# write and collect the goals into a dictionary
goal = {
'goal_state_name': f"goal_{n}_achieved",
'goal_state_value': True,
'criticality': 0.1,
'time_sensitivity': 0.1
}
goals_list.append(goal)
# create a goal to test failed actions
failed_action_goal = {
'goal_state_name': "run_failed_action",
'goal_state_value': True,
'criticality': 0.1,
'time_sensitivity': 1
}
#goals_list.append(failed_action_goal)
# create a goal to test an action that unregisters itself
unregister_action_goal = {
'goal_state_name': "run_unregister_action",
'goal_state_value': True,
'criticality': 0.4,
'time_sensitivity': 0.4
}
#goals_list.append(unregister_action_goal)
# create a goal to test an unachievable goal
unachievable_goal = {
'goal_state_name': "pursue_the_unachievable",
'goal_state_value': True,
'criticality': 0.5,
'time_sensitivity': 0.5
}
#goals_list.append(unachievable_goal)
# create a goal to test an un-heard-of goal
un_heard_of_goal = {
'goal_state_name': "pursue_the_un_heard_of",
'goal_state_value': True,
'criticality': 0.3,
'time_sensitivity': 0.3
}
#goals_list.append(un_heard_of_goal)
# convert the goals dictionary into pretty-printed json and write to file
goals_json = json.dumps(goals_list, indent=4)
f.write(goals_json)
@staticmethod
def _build_initial_world_state_json_file(num_goals):
with open(
'../../general_digital_twin/complex_load_testing/config/complex_load_testing_initial_world_state.json', 'w') as f:
initial_world_state = {}
# create states designed to prompt the ai to begin stress testing
for n in range(num_goals):
initial_world_state[f"goal_{n}_achieved"] = False
# create a state to test failed actions
initial_world_state['run_failed_action'] = False
# create a state to test an action that unregisters itself
initial_world_state['run_unregister_action'] = False
# create a goal to test an unachievable goal
initial_world_state['pursue_the_unachievable'] = False
# convert the states dictionary into pretty-printed json and write to file
initial_world_state_json = json.dumps(initial_world_state, indent=4)
f.write(initial_world_state_json)
@classmethod
def setUpClass(cls) -> None:
# build the required initialization and configuration files
num_goals = 1
cls._build_ai_server_goals_list_json_file(num_goals)
cls._build_initial_world_state_json_file(num_goals)
# start the ai server in a separate, non-blocking daemon thread
server_thread = Thread(target=start_ai_server, kwargs={"ai_server_config_file": "complex_load_testing_ai_server_config.ini"})
server_thread.setDaemon(True)
server_thread.start()
port = os.environ['ai_server_port']
ai_server = os.environ['ai_server']
# register load-testing actions in daemon threads
for n in range(num_goals):
preconditions = {f"goal_{n}_achieved": False}
effects = {f"goal_{n}_achieved": True}
test_action_thread = Thread(target=TestAction, kwargs={"preconditions": preconditions, "effects": effects})
test_action_thread.setDaemon(True)
test_action_thread.start()
# register failing action
preconditions = {"run_failed_action": False}
effects = {"run_failed_action": True}
failed_test_action_thread = Thread(target=FailedTestAction,
kwargs={"preconditions": preconditions, "effects": effects})
failed_test_action_thread.setDaemon(True)
failed_test_action_thread.start()
# register self-removing actions
preconditions = {"run_unregister_action": False}
effects = {"run_unregister_action": True}
unregister_test_action_thread = Thread(target=UnregisterTestAction,
kwargs={"preconditions": preconditions, "effects": effects})
unregister_test_action_thread.setDaemon(True)
unregister_test_action_thread.start()
# build the un-heard of action
preconditions = {"pursue_the_un_heard_of": False}
effects = {"pursue_the_un_heard_of": True}
unheard_of_test_action_thread = Thread(target=UnheardOfTestAction,
kwargs={"preconditions": preconditions, "effects": effects})
unheard_of_test_action_thread.setDaemon(True)
unheard_of_test_action_thread.start()
# connect to the ai server and get a copy of the ai's diary
cls._ai_server_connection = rpyc.connect(ai_server, port)
cls._ai_diary = cls._ai_server_connection.root.ai_diary()
# give the ai server time to achieve given objectives
seconds_to_wait_for_the_ai_server_to_complete_objectives = 60
time.sleep(seconds_to_wait_for_the_ai_server_to_complete_objectives)
@classmethod
def tearDownClass(cls) -> None:
pass
def test_contents_of_diary(self):
# make sure the diary has contents as expected
# test action name and action preconditions
pass
def test_large_ai_server_connections(self):
# create a ton of actions, a ton of goals, and check for a ton of successful diary entries
pass
def test_continued_ai_operations_when_an_action_fails(self):
# the ai should continue to run even when one of the registered actions fails
pass
def test_ai_executes_goals_in_priority_order(self):
# the ai should prioritize goals and execute them in order
pass
def test_removing_remote_capability(self):
# the ai should recognize when an action removes itself
pass
def test_not_enough_resources_to_achieve_a_goal(self):
# if the ai does not have the proper registered resources to achieve a goal
# it should record that it has no plan for that goal
pass
def test_goal_not_already_in_the_world(self):
# if the ai encounters a goal not already in the world (in some state).
# it should record that goal as unmet in the world
pass
def test_aimless_iterations(self):
# the ai should be able to handle iterations with no goals
pass
if __name__ == '__main__':
unittest.main()
|
test_sys.py | # -*- coding: iso-8859-1 -*-
import unittest, test.test_support
import sys, cStringIO
class SysModuleTest(unittest.TestCase):
def test_original_displayhook(self):
import __builtin__
savestdout = sys.stdout
out = cStringIO.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(__builtin__, "_"):
del __builtin__._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assert_(not hasattr(__builtin__, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(__builtin__._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
sys.stdout = savestdout
def test_lost_displayhook(self):
olddisplayhook = sys.displayhook
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
sys.displayhook = olddisplayhook
def test_custom_displayhook(self):
olddisplayhook = sys.displayhook
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
sys.displayhook = olddisplayhook
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assert_(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assert_(typ is not None)
self.assert_(value is exc)
self.assert_(traceback is not None)
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assert_(typ is None)
self.assert_(value is None)
self.assert_(traceback is None)
def clear():
try:
raise ValueError, 42
except ValueError, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assert_(typ1 is typ2)
self.assert_(value1 is exc)
self.assert_(value1 is value2)
self.assert_(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc)
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit, exc:
self.assertEquals(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit, exc:
self.assertEquals(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit, exc:
self.assertEquals(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
import subprocess
# both unnormalized...
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit, 46"])
self.assertEqual(rc, 46)
# ... and normalized
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assert_(isinstance(sys.getdefaultencoding(), str))
# testing sys.settrace() is done in test_trace.py
# testing sys.setprofile() is done in test_profile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEquals(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
if hasattr(sys, "getwindowsversion"):
v = sys.getwindowsversion()
self.assert_(isinstance(v, tuple))
self.assertEqual(len(v), 5)
self.assert_(isinstance(v[0], int))
self.assert_(isinstance(v[1], int))
self.assert_(isinstance(v[2], int))
self.assert_(isinstance(v[3], int))
self.assert_(isinstance(v[4], str))
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assert_(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assert_(isinstance(sys.gettotalrefcount(), int))
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assert_(
SysModuleTest.test_getframe.im_func.func_code \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
def current_frames_with_threads(self):
import threading, thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = thread.get_ident()
self.assert_(main_id in d)
self.assert_(thread_id in d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assert_(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assert_(sourceline in ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assert_(0 in d)
self.assert_(d[0] is sys._getframe())
def test_attributes(self):
self.assert_(isinstance(sys.api_version, int))
self.assert_(isinstance(sys.argv, list))
self.assert_(sys.byteorder in ("little", "big"))
self.assert_(isinstance(sys.builtin_module_names, tuple))
self.assert_(isinstance(sys.copyright, basestring))
self.assert_(isinstance(sys.exec_prefix, basestring))
self.assert_(isinstance(sys.executable, basestring))
self.assert_(isinstance(sys.hexversion, int))
self.assert_(isinstance(sys.maxint, int))
if test.test_support.have_unicode:
self.assert_(isinstance(sys.maxunicode, int))
self.assert_(isinstance(sys.platform, basestring))
self.assert_(isinstance(sys.prefix, basestring))
self.assert_(isinstance(sys.version, basestring))
vi = sys.version_info
self.assert_(isinstance(vi, tuple))
self.assertEqual(len(vi), 5)
self.assert_(isinstance(vi[0], int))
self.assert_(isinstance(vi[1], int))
self.assert_(isinstance(vi[2], int))
self.assert_(vi[3] in ("alpha", "beta", "candidate", "final"))
self.assert_(isinstance(vi[4], int))
def test_43581(self):
# Can't use sys.stdout, as this is a cStringIO object when
# the test runs under regrtest.
self.assert_(sys.__stdout__.encoding == sys.__stderr__.encoding)
def test_main():
test.test_support.run_unittest(SysModuleTest)
if __name__ == "__main__":
test_main()
|
ml_model.py | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the strategy class."""
import threading
from pathlib import Path
from queue import Queue
import tensorflow as tf
from tensorflow import keras
from aea.skills.base import Model
DEFAULT_MODEL_CONFIG_PATH = str(Path("..", "..", "model.config").resolve())
class MLModel(Model):
"""This class defines a machine learning model."""
def __init__(self, **kwargs):
"""Initialize the machine learning model."""
self._model_config_path = kwargs.pop(
"model_config_path", DEFAULT_MODEL_CONFIG_PATH
)
super().__init__(**kwargs)
# TODO this at the moment does not work - need to compile the model according to the network configuration
# A better alternative is to save/load in HDF5 format, but that might require some system level dependencies
# https://keras.io/getting-started/faq/#how-can-i-install-hdf5-or-h5py-to-save-my-models-in-keras
# self._model = keras.Model.from_config(json.load(open(self._model_config_path)))
self._lock = threading.RLock()
self._weights = None
self.graph = tf.get_default_graph()
self.data_queue = Queue()
self.training_thread = threading.Thread(target=self.training_loop)
def setup(self) -> None:
self.training_thread.start()
def training_loop(self):
"""
Start the training loop.
:return: None
"""
with self.graph.as_default():
model = self._make_model()
self._set_weights(model.get_weights())
while True:
data = self.data_queue.get()
if data is None:
break
X, y, kwargs = data
model.fit(X, y, **kwargs)
loss, acc = model.evaluate(X, y, verbose=2)
self.context.logger.info("Loss: {}, Acc: {}".format(loss, acc))
self._set_weights(model.get_weights())
@staticmethod
def _make_model():
"""Make the model."""
model = keras.Sequential(
[
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(10, activation="softmax"),
]
)
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
return model
def _get_weights(self):
"""Get the weights, thread-safe."""
with self._lock:
return self._weights
def _set_weights(self, weights):
"""Set the weights, thread-safe."""
with self._lock:
self._weights = weights
def predict(self, *args, **kwargs):
"""Predict."""
with self._lock:
with self.graph.as_default():
model = self._make_model()
weights = self._get_weights()
model.set_weights(weights)
return model.predict(*args, **kwargs)
def evaluate(self, *args, **kwargs):
"""Predict."""
with self._lock:
with self.graph.as_default():
model = self._make_model()
weights = self._get_weights()
model.set_weights(weights)
return model.evaluate(*args, **kwargs)
def save(self):
"""Save the model weights."""
# TODO to implement.
def update(self, X, y, epochs):
"""Update the ML model."""
self.data_queue.put((X, y, dict(epochs=epochs)))
def teardown(self) -> None:
self.data_queue.put(None)
self.training_thread.join()
|
train.py | #!/usr/bin/env python
from worker import *
from network import *
import threading
from time import sleep
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('is_approaching_policy', False, 'If learning approaching policy.')
flags.DEFINE_integer('max_episodes', 100000, 'Maximum episodes.')
flags.DEFINE_integer('max_episode_steps', 500, 'Maximum steps for each episode.')
flags.DEFINE_integer('max_lowlevel_episode_steps', 25,
'Maximum number of steps the robot can take during one episode in low-level policy.')
flags.DEFINE_integer('batch_size', 64,
'The size of replay memory used for training.')
flags.DEFINE_float('gamma', 0.99, 'Discount factor.')
flags.DEFINE_boolean('use_gt', False, 'If use ground truth detection.')
flags.DEFINE_integer('window_size', 10, 'The size of vision window.')
flags.DEFINE_integer('num_labels', 78, 'The size of option space.')
flags.DEFINE_integer('a_size', 6, 'The size of action space.')
flags.DEFINE_integer('history_steps', 4, 'The number of steps need to remember during training.')
flags.DEFINE_multi_float('er', [0.01, 10000, 0.01],
'[Initial exploration rate, anneal steps, final exploration rate]')
flags.DEFINE_float('highlevel_lr', 0.0001, 'Highlevel learning rate.')
flags.DEFINE_float('lowlevel_lr', 0.0001, 'Lowlevel learning rate.')
flags.DEFINE_string('vision_feature_pattern', '_deeplab_depth_logits_10', 'Which feature to use to represent vision.')
flags.DEFINE_string('depth_feature_pattern', '_deeplab_depth_depth1_10', 'Which feature to use to represent depth.')
flags.DEFINE_integer('replay_start_size', 0, 'The number of observations stored in the replay buffer before training.')
flags.DEFINE_integer('skip_frames', 1, 'The times for low-level action to repeat.')
flags.DEFINE_integer('highlevel_update_freq', 100, 'Highlevel network update frequency.')
flags.DEFINE_integer('lowlevel_update_freq', 10, 'Lowlevel network update frequency.')
flags.DEFINE_integer('target_update_freq', 100000, 'Target network update frequency.')
flags.DEFINE_multi_float('epsilon', [1, 10000, 0.1], ['Initial exploration rate', 'anneal steps', 'final exploration rate'] )
flags.DEFINE_boolean('load_model', False, 'If load previous trained model or not.')
flags.DEFINE_boolean('curriculum_training', False, 'If use curriculum training or not.')
flags.DEFINE_boolean('continuing_training', False, 'If continue training or not.')
flags.DEFINE_string('pretrained_model_path', '', 'The path to load pretrained model from.')
flags.DEFINE_string('model_path', '', 'The path to store or load model from.')
flags.DEFINE_integer('num_scenes', 1, 'The number of scenes used for training.')
flags.DEFINE_integer('num_targets', 6, 'The number of targets for each scene that are used for training ')
flags.DEFINE_integer('num_threads', 1, 'The number of threads to train one scene one target.')
flags.DEFINE_boolean('use_default_scenes', True, 'If use default scenes for training.')
flags.DEFINE_boolean('use_default_targets', True, 'If use default targets for training.')
flags.DEFINE_multi_string('default_scenes',['5cf0e1e9493994e483e985c436b9d3bc',
'0c9a666391cc08db7d6ca1a926183a76',
'00d9be7210856e638fa3b1addf2237d6',
'0880799c157b4dff08f90db221d7f884'
],
'Default scenes')
flags.DEFINE_multi_string('default_targets',
['music', 'television', 'table', 'stand', 'dressing_table', 'heater', 'sofa',
'tv_stand', 'bed', 'toilet', 'bathtub'
],
'Default targets.')
flags.DEFINE_string('evaluate_file', '', '')
flags.DEFINE_integer('min_step_threshold', 0, 'The minimal steps to start with.')
def get_trainable_scenes_and_targets():
if FLAGS.use_default_scenes:
scenes = FLAGS.default_scenes
else:
scenes = json.load(open('%s/Environment/collected_houses.json' % cfg['codeDir'], 'r'))['houses']
targets = []
starting_points = []
target_points = []
for scene in scenes[:FLAGS.num_scenes]:
scene_dir = '%s/Environment/houses/%s/' % (cfg['codeDir'], scene)
all_target_points = get_target_points(scene, class2id.keys(), use_gt=True)
if FLAGS.use_default_targets:
all_targets = FLAGS.default_targets
else:
if FLAGS.use_gt:
all_targets = json.load(open('%s/targets_info_all.json' % scene_dir, 'r')).keys()
else:
all_targets = json.load(open('%s/targets_info_all_pred.json' % scene_dir, 'r')).keys()
all_starting_points = get_starting_points(scene, all_targets, use_gt=FLAGS.use_gt) \
if FLAGS.is_approaching_policy else get_starting_points_according_to_distance(scene, all_targets)
scene_targets = []
#scene_target_points = []
scene_starting_points = []
num_targets = 0
for i,t in enumerate(all_targets):
t_points = all_target_points[t]
s_points = [p for p in all_starting_points[i] if p not in t_points]
if len(t_points) != 0 and len(s_points) != 0:
scene_targets.append(t)
#scene_target_points.append(t_points)
scene_starting_points.append(s_points)
num_targets += 1
if num_targets == FLAGS.num_targets: break
if FLAGS.is_approaching_policy and FLAGS.curriculum_training:
scene_starting_points = sort_starting_points_according_to_distance(scene, scene_targets, scene_starting_points)
targets.append(scene_targets)
starting_points.append(scene_starting_points)
target_points.append(all_target_points)
return scenes, targets, starting_points, target_points
def select_starting_points(starting_points, targets, min_steps):
selected_starting_points = []
for sid in range(len(targets)):
selected_scene_starting_points = []
scene_min_steps = min_steps[sid]
for tid in range(len(targets[sid])):
target_starting_points = starting_points[sid][tid]
target = targets[sid][tid]
selected_target_starting_points = [sp for sp in target_starting_points
if scene_min_steps[str(sp)][target] > FLAGS.min_step_threshold]
selected_scene_starting_points.append(selected_target_starting_points)
selected_starting_points.append(selected_scene_starting_points)
return selected_starting_points
def set_up():
if not os.path.exists(FLAGS.model_path):
os.makedirs(FLAGS.model_path)
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
global_episodes = tf.Variable(0, dtype=tf.int32, name='global_episodes', trainable=False)
global_frames = tf.Variable(0, dtype=tf.int32, name='global_frames', trainable=False)
lowlevel_network = Lowlevel_Network_full(window_size=FLAGS.window_size,
num_labels=FLAGS.num_labels,
action_size=FLAGS.a_size,
history_steps=FLAGS.history_steps,
scope='global')
scenes, targets, starting_points, target_points = get_trainable_scenes_and_targets()
envs = []
min_steps = []
for scene in scenes:
env = Semantic_Environment(env=scene,
use_gt=FLAGS.use_gt,
vision_feature=FLAGS.vision_feature_pattern,
depth_feature=FLAGS.depth_feature_pattern)
envs.append(env)
min_steps.append(
json.load(open('%s/Environment/houses/%s/minimal_steps_1.json' % (cfg['codeDir'], scene), 'r')))
starting_points = select_starting_points(starting_points=starting_points,
targets=targets,
min_steps=min_steps)
workers = []
for i in range(FLAGS.num_threads):
local_lowlevel_network = Lowlevel_Network_full(window_size=FLAGS.window_size,
num_labels=FLAGS.num_labels,
action_size=FLAGS.a_size,
history_steps=FLAGS.history_steps,
scope='local_%d'%i)
worker = Worker(name=i,
envs=envs,
scenes=scenes,
targets=targets,
min_steps=min_steps,
starting_points=starting_points,
target_points=target_points,
lowlevel_network=local_lowlevel_network,
global_episodes=global_episodes,
global_frames=global_frames)
workers.append(worker)
return graph, workers
def train():
graph, workers = set_up()
with tf.Session(graph=graph) as sess:
coord = tf.train.Coordinator()
all_threads = []
for worker in workers:
thread = threading.Thread(target=lambda: worker.work(sess))
thread.start()
sleep(0.1)
all_threads.append(thread)
coord.join(all_threads)
if __name__ == '__main__':
train()
|
run.py | import sys
import threading
def log(*args):
print(*args, file=sys.stderr, flush=True)
def read_message():
sender = ""
while len(sender) < 4:
sender += sys.stdin.read(1)
sys.stdin.read(1) # newline
data = ""
while True:
ch = sys.stdin.read(1)
if ch == "\n":
break
data += ch
return sender, data
def send_message(to, content):
sys.stdout.write(to + "\n" + content + "\n")
sys.stdout.flush()
log(f"INTR has started!")
with open(sys.argv[1], "r") as fr, open(sys.argv[1], "w") as fw:
def t1():
while True:
sender, msg = read_message()
fw.write(f"{sender} sent {repr(msg)}\n")
fw.flush()
threading.Thread(target=t1, daemon=True).start()
while True:
msg = fr.readline().strip()
log("Got input", msg)
send_message("RLAY", "CONS" + msg)
|
main.py | import requests, json
import matplotlib.pyplot as plt
import pandas as pd
import time
import os
import numpy as np
from argparse import ArgumentParser
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
from threading import Thread
from sklearn.ensemble import BaggingClassifier
from iqoptionapi.stable_api import IQ_Option
import userdata
import traceback
## get args
parser = ArgumentParser()
parser.add_argument("-a", "--asset", dest="asset", required=True,
help="Pass the asset:(EURUSD, AUDUSD, XEMUSD-l ...)", metavar="ASSET")
parser.add_argument("-i", "--interval", dest="interval", required=False,
help="Pass interval in seconds: [1,5,10,15,30,60,120,300,600,900,1800,3600,7200,14400,28800,43200,86400,604800,2592000,'all']", metavar="TIME")
parser.add_argument("-f", "--file-sufix", dest="filesufix", required=False,
help="Pass sufix to file ", metavar="SUFIX")
args = parser.parse_args()
asset = args.asset
fileSufix = ''
if args.filesufix is not None:
fileSufix = args.filesufix
## AUTH
user = userdata.mainUser
Iq= IQ_Option(user["username"],user["password"])
## SETUP ASSETS
assetFrom = asset[:3]
assetTo = asset[3:]
goal= asset
fileName= 'data/' + asset + fileSufix + '.csv'
isThereFile = os.path.isfile(fileName)
lgr = LogisticRegression(random_state=0 , solver='lbfgs',multi_class='auto', max_iter=5000, verbose=1, tol=0.0001)
bagging = BaggingClassifier(lgr, max_samples=0.5, max_features=0.5)
## SETUP PARAMS
window = 360 # window Dimension for calculation of indicators
variance = 3.5 # Indicators window variance dimension
batch_size = 360 # Lot Memory Dimension to train the model ( features - X )
interval = args.interval # Interval between ticker queries on the server
dim = 360 # window Dimension for displaying the indicators
## ---------------------------------
X = []
Y = []
bets = 0
history_bid = []
history_ask = []
history_lowerBand = []
history_upperBand = []
history_buys = []
history_sells = []
history_signal = []
index_buys = []
index_sells = []
history = ["","","","","",""]
X_temp = [0,0,0,0,0,0]
epoch = 0
batch = []
buys = [0,0,0,0,0,0]
sells = [0,0,0,0,0,0]
signal_action = ['','','','','','']
if isThereFile:
pass
else:
record = open(fileName,"w")
record.write("bid,ask\n")
record.close()
fig = plt.figure(figsize=(10,10))
ax = fig.gca()
def resetMemory():
global history_buys, history_sells, index_buys, index_sells
history_buys = []
history_sells = []
index_buys = []
index_sells = []
def bollingerBands(bid, ask, window, variance):
if len(bid) > window:
media = bid.rolling(window= window).mean()
rolling_std = bid.rolling(window= window).std()
upperBand = media + (rolling_std * variance)
lowerBand = media - (rolling_std * variance)
#ax.plot(media, '--', color = 'gray', alpha = 0.3)
ax.plot(upperBand, '--', color = 'green', alpha = 0.5)
ax.plot(lowerBand, '--', color = 'red', alpha = 0.2)
#ax.scatter(len(ask),media[-1:], color = 'gray', alpha = 0.1)
ax.scatter(len(ask),upperBand[-1:], color = 'green', alpha = 0.1)
ax.scatter(len(ask),lowerBand[-1:], color = 'red', alpha = 0.1)
return lowerBand, upperBand
else:
print("Not enough data to create Bollinger bands")
def detect_cross(bid, ask, lowerBand, upperBand, index, spread):
history_bid.append(bid)
history_ask.append(ask)
history_lowerBand.append(lowerBand)
history_upperBand.append(upperBand)
del history_bid[:-dim]
del history_ask[:-dim]
del history_lowerBand[:-dim]
del history_upperBand[:-dim]
# percentSpread = spread / 100
if len(history_signal) > 1:
if history_bid[-1:] > history_lowerBand[-1:] and history_bid[-2:-1] < history_lowerBand[-2:-1]:
history_buys.append(float(ask))
index_buys.append(index)
signal_action = 1
elif history_bid[-1:] < history_upperBand[-1:] and history_bid[-2:-1] > history_upperBand[-2:-1]:
history_sells.append(float(bid))
index_sells.append(index)
signal_action =2
else:
signal_action = 0
else:
signal_action = 0
history_signal.append(signal_action)
return signal_action
def plotsTrading(bid,ask, lowerBand, upperBand, spread):
resetMemory()
for i in range(len(bid)-(window), len(bid)):
signal_action = detect_cross(float(bid[i]), float(ask[i]), float(lowerBand[i]), float(upperBand[i]), i, spread)
if len(history_buys) > 0:
ax.scatter(index_buys, history_buys, marker = '^', color = "green", label ="Buy")
for c in range(len(index_buys)):
ax.text(index_buys[c], history_buys[c], '- buy', color = "black", alpha = 0.8)
if len(history_sells) > 0:
ax.scatter(index_sells, history_sells, marker = 'v', color = "red", label = "Sell" )
for v in range(len(index_sells)):
ax.text(index_sells[v], history_sells[v], '- sell', color = "black", alpha = 0.8)
return signal_action
def spread(bid,ask):
porcento = ask / 100
diferenca = ask - bid
porcentagem = diferenca / porcento
return porcentagem
def get_tickers():
#size=[1,5,10,15,30,60,120,300,600,900,1800,3600,7200,14400,28800,43200,86400,604800,2592000,"all"]
size =5
if interval is not None:
size=int(interval)
maxdict=100
Iq.start_candles_stream(goal,size,maxdict)
time.sleep(1)
cc=Iq.get_realtime_candles(goal,size)
for k in cc:
if "ask" in cc[k]:
ask = cc[k]["ask"]
bid = cc[k]["bid"]
## tempo = cc[k]["at"]
print("ask: " + str(ask) + " bid: " + str(bid))
record = open(fileName, "a")
record.write(str(bid)+","+str(ask)+'\n')
record.close()
def ema(values, period):
""" Numpy implementation of EMA
"""
weights = np.exp(np.linspace(-1., 0., period))
weights /= weights.sum()
a = np.convolve(values, weights, mode='full')[:len(values)]
a[:period] = a[period]
return a
def main():
global epoch, history, bets
df = pd.read_csv(fileName)
if len(df) > 1:
ax.clear()
bid = df['bid']
ask = df['ask']
# tempo = pd.to_datetime(df['time'], unit="ns")
j = window * 3
diferenca = ask[-1:] - bid[-1:]
porcentagem = spread(bid[-1:],ask[-1:])
ax.text(len(ask) + 10, bid[-1:] + (diferenca/2), "Spread " + str(np.around(float(porcentagem),3)) + "%")
plt.title("TRAINNING - " + goal + " - " + interval)
if len(bid) < window:
ax.set_xlim(0, len(bid)+(len(bid)/4)+5)
else:
ax.set_xlim(len(bid)-window, len(bid)+100)
bid_min = np.array(bid[-window:]).min()
ask_max = np.array(ask[-window:]).max()
ax.set_ylim(bid_min-(bid_min * .001),ask_max+(ask_max * .001))
ax.plot(bid, label = "Bid - Sell "+ assetFrom + " " + str(np.around(float(bid[-1:]),8)), color = 'black', alpha = 0.5)
ax.plot(ask, label = "Ask - Buy "+ assetFrom + " " + str(np.around(float(ask[-1:]),8)), color = 'gray', alpha = 0.5)
plt.legend()
ax.scatter(len(ask)-1,ask[-1:], color = 'black', alpha = 1)
ax.scatter(len(bid)-1,bid[-1:], color = 'gray', alpha = 1)
if len(bid) > window * 3:
bid_mean = float(bid[-1:] / bid[0])
ask_mean = float(ask[-1:] / ask[0])
worthIt = 0.5
for ind in range(0,6):
lowerBand, upperBand = bollingerBands(bid, ask, int(window*worthIt), variance)
signal_action[ind] = plotsTrading(bid,ask, lowerBand, upperBand, spread)
worthIt += .5
del batch[:-batch_size - 10]
batch.append([[signal_action[0]], [signal_action[1]],[signal_action[2]],[signal_action[3]], [signal_action[4]], [signal_action[5]], [bid_mean], [ask_mean]])
if len(batch) >= batch_size:
for ind in range(0,6):
if signal_action[ind] == 1:
if history[ind] != "BUY":
buys[ind] = float(ask[-1:])
X_temp[ind] = batch[-batch_size:]
# print("--**--** BUY - ", str(float( buys[ind])))
bets += 1
elif history[ind] == "BUY":
X.append(X_temp[ind])
Y.append(0)
X_temp[ind] = batch[-batch_size:]
buys[ind] = float(ask[-1:])
epoch += 1
history[ind] = "BUY"
if signal_action[ind] == 2 and history[ind] == "BUY":
sells[ind] = float(bid[-1:])
epoch += 1
bets += 1
profit = float(float(sells[ind]) - float(buys[ind]))
# print("--**--** SELL ", str(float( sells[ind]))," - Lucro = US$ ", str(profit))
if profit > 0:
try:
X.append(X_temp[ind])
Y.append(np.array(1))
X.append(batch[-batch_size:])
Y.append(np.array(2))
except:
pass
if profit <= 0 or history[ind] =="SELL":
try:
X.append(X_temp[ind])
Y.append(np.array(0))
X.append(batch[-batch_size:])
Y.append(np.array(0))
except:
pass
history[ind] = "SELL"
try:
X_0 = np.array(X)
X0 = X_0.reshape(len(Y),-1)
y = np.array(Y)
except:
pass
if epoch % 50 == 0 and epoch > 0:
tt = Thread(target=save, args=[lgr,epoch,X0,y])
tt.start()
if len(batch) < batch_size:
print("Batch Total", len(batch))
# print("Epoch - ", str(epoch))
sleepFor = 5
if interval is not None:
sleepFor = int(interval)
plt.pause(sleepFor)
volta = 1
def save(lgr,epoch,X0,y):
bagging.fit(X0,y)
joblib.dump(bagging, "models/model-"+str(epoch)+".pkl", compress=3)
# print("--*--* saved Model - model-"+str(epoch)+".pkl")
while True:
print("--------------------------- ")
print("Lances = ", bets)
print("Tickers - ", volta)
volta += 1
try:
get_tickers()
except Exception as e:
print("Server Error - await 5 seconds.: " + str(e))
print(traceback.format_exc())
time.sleep(3)
pass
main()
|
test_csv.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import bz2
from datetime import date, datetime
from decimal import Decimal
import gc
import gzip
import io
import itertools
import os
import pickle
import shutil
import signal
import string
import tempfile
import threading
import time
import unittest
import weakref
import pytest
import numpy as np
import pyarrow as pa
from pyarrow.csv import (
open_csv, read_csv, ReadOptions, ParseOptions, ConvertOptions, ISO8601,
write_csv, WriteOptions, CSVWriter)
from pyarrow.tests import util
def generate_col_names():
# 'a', 'b'... 'z', then 'aa', 'ab'...
letters = string.ascii_lowercase
yield from letters
for first in letters:
for second in letters:
yield first + second
def make_random_csv(num_cols=2, num_rows=10, linesep='\r\n', write_names=True):
arr = np.random.RandomState(42).randint(0, 1000, size=(num_cols, num_rows))
csv = io.StringIO()
col_names = list(itertools.islice(generate_col_names(), num_cols))
if write_names:
csv.write(",".join(col_names))
csv.write(linesep)
for row in arr.T:
csv.write(",".join(map(str, row)))
csv.write(linesep)
csv = csv.getvalue().encode()
columns = [pa.array(a, type=pa.int64()) for a in arr]
expected = pa.Table.from_arrays(columns, col_names)
return csv, expected
def make_empty_csv(column_names):
csv = io.StringIO()
csv.write(",".join(column_names))
csv.write("\n")
return csv.getvalue().encode()
def check_options_class(cls, **attr_values):
"""
Check setting and getting attributes of an *Options class.
"""
opts = cls()
for name, values in attr_values.items():
assert getattr(opts, name) == values[0], \
"incorrect default value for " + name
for v in values:
setattr(opts, name, v)
assert getattr(opts, name) == v, "failed setting value"
with pytest.raises(AttributeError):
opts.zzz_non_existent = True
# Check constructor named arguments
non_defaults = {name: values[1] for name, values in attr_values.items()}
opts = cls(**non_defaults)
for name, value in non_defaults.items():
assert getattr(opts, name) == value
# The various options classes need to be picklable for dataset
def check_options_class_pickling(cls, **attr_values):
opts = cls(**attr_values)
new_opts = pickle.loads(pickle.dumps(opts,
protocol=pickle.HIGHEST_PROTOCOL))
for name, value in attr_values.items():
assert getattr(new_opts, name) == value
def test_read_options():
cls = ReadOptions
opts = cls()
check_options_class(cls, use_threads=[True, False],
skip_rows=[0, 3],
column_names=[[], ["ab", "cd"]],
autogenerate_column_names=[False, True],
encoding=['utf8', 'utf16'],
skip_rows_after_names=[0, 27])
check_options_class_pickling(cls, use_threads=True,
skip_rows=3,
column_names=["ab", "cd"],
autogenerate_column_names=False,
encoding='utf16',
skip_rows_after_names=27)
assert opts.block_size > 0
opts.block_size = 12345
assert opts.block_size == 12345
opts = cls(block_size=1234)
assert opts.block_size == 1234
opts.validate()
match = "ReadOptions: block_size must be at least 1: 0"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.block_size = 0
opts.validate()
match = "ReadOptions: skip_rows cannot be negative: -1"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.skip_rows = -1
opts.validate()
match = "ReadOptions: skip_rows_after_names cannot be negative: -1"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.skip_rows_after_names = -1
opts.validate()
match = "ReadOptions: autogenerate_column_names cannot be true when" \
" column_names are provided"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.autogenerate_column_names = True
opts.column_names = ('a', 'b')
opts.validate()
def test_parse_options():
cls = ParseOptions
check_options_class(cls, delimiter=[',', 'x'],
escape_char=[False, 'y'],
quote_char=['"', 'z', False],
double_quote=[True, False],
newlines_in_values=[False, True],
ignore_empty_lines=[True, False])
check_options_class_pickling(cls, delimiter='x',
escape_char='y',
quote_char=False,
double_quote=False,
newlines_in_values=True,
ignore_empty_lines=False)
cls().validate()
opts = cls()
opts.delimiter = "\t"
opts.validate()
match = "ParseOptions: delimiter cannot be \\\\r or \\\\n"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.delimiter = "\n"
opts.validate()
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.delimiter = "\r"
opts.validate()
match = "ParseOptions: quote_char cannot be \\\\r or \\\\n"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.quote_char = "\n"
opts.validate()
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.quote_char = "\r"
opts.validate()
match = "ParseOptions: escape_char cannot be \\\\r or \\\\n"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.escape_char = "\n"
opts.validate()
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.escape_char = "\r"
opts.validate()
def test_convert_options():
cls = ConvertOptions
opts = cls()
check_options_class(
cls, check_utf8=[True, False],
strings_can_be_null=[False, True],
quoted_strings_can_be_null=[True, False],
include_columns=[[], ['def', 'abc']],
include_missing_columns=[False, True],
auto_dict_encode=[False, True],
timestamp_parsers=[[], [ISO8601, '%y-%m']])
check_options_class_pickling(
cls, check_utf8=False,
strings_can_be_null=True,
quoted_strings_can_be_null=False,
include_columns=['def', 'abc'],
include_missing_columns=False,
auto_dict_encode=True,
timestamp_parsers=[ISO8601, '%y-%m'])
assert opts.auto_dict_max_cardinality > 0
opts.auto_dict_max_cardinality = 99999
assert opts.auto_dict_max_cardinality == 99999
assert opts.column_types == {}
# Pass column_types as mapping
opts.column_types = {'b': pa.int16(), 'c': pa.float32()}
assert opts.column_types == {'b': pa.int16(), 'c': pa.float32()}
opts.column_types = {'v': 'int16', 'w': 'null'}
assert opts.column_types == {'v': pa.int16(), 'w': pa.null()}
# Pass column_types as schema
schema = pa.schema([('a', pa.int32()), ('b', pa.string())])
opts.column_types = schema
assert opts.column_types == {'a': pa.int32(), 'b': pa.string()}
# Pass column_types as sequence
opts.column_types = [('x', pa.binary())]
assert opts.column_types == {'x': pa.binary()}
with pytest.raises(TypeError, match='DataType expected'):
opts.column_types = {'a': None}
with pytest.raises(TypeError):
opts.column_types = 0
assert isinstance(opts.null_values, list)
assert '' in opts.null_values
assert 'N/A' in opts.null_values
opts.null_values = ['xxx', 'yyy']
assert opts.null_values == ['xxx', 'yyy']
assert isinstance(opts.true_values, list)
opts.true_values = ['xxx', 'yyy']
assert opts.true_values == ['xxx', 'yyy']
assert isinstance(opts.false_values, list)
opts.false_values = ['xxx', 'yyy']
assert opts.false_values == ['xxx', 'yyy']
assert opts.timestamp_parsers == []
opts.timestamp_parsers = [ISO8601]
assert opts.timestamp_parsers == [ISO8601]
opts = cls(column_types={'a': pa.null()},
null_values=['N', 'nn'], true_values=['T', 'tt'],
false_values=['F', 'ff'], auto_dict_max_cardinality=999,
timestamp_parsers=[ISO8601, '%Y-%m-%d'])
assert opts.column_types == {'a': pa.null()}
assert opts.null_values == ['N', 'nn']
assert opts.false_values == ['F', 'ff']
assert opts.true_values == ['T', 'tt']
assert opts.auto_dict_max_cardinality == 999
assert opts.timestamp_parsers == [ISO8601, '%Y-%m-%d']
def test_write_options():
cls = WriteOptions
opts = cls()
check_options_class(
cls, include_header=[True, False])
assert opts.batch_size > 0
opts.batch_size = 12345
assert opts.batch_size == 12345
opts = cls(batch_size=9876)
assert opts.batch_size == 9876
opts.validate()
match = "WriteOptions: batch_size must be at least 1: 0"
with pytest.raises(pa.ArrowInvalid, match=match):
opts = cls()
opts.batch_size = 0
opts.validate()
class BaseTestCSVRead:
def read_bytes(self, b, **kwargs):
return self.read_csv(pa.py_buffer(b), **kwargs)
def check_names(self, table, names):
assert table.num_columns == len(names)
assert table.column_names == names
def test_file_object(self):
data = b"a,b\n1,2\n"
expected_data = {'a': [1], 'b': [2]}
bio = io.BytesIO(data)
table = self.read_csv(bio)
assert table.to_pydict() == expected_data
# Text files not allowed
sio = io.StringIO(data.decode())
with pytest.raises(TypeError):
self.read_csv(sio)
def test_header(self):
rows = b"abc,def,gh\n"
table = self.read_bytes(rows)
assert isinstance(table, pa.Table)
self.check_names(table, ["abc", "def", "gh"])
assert table.num_rows == 0
def test_bom(self):
rows = b"\xef\xbb\xbfa,b\n1,2\n"
expected_data = {'a': [1], 'b': [2]}
table = self.read_bytes(rows)
assert table.to_pydict() == expected_data
def test_one_chunk(self):
# ARROW-7661: lack of newline at end of file should not produce
# an additional chunk.
rows = [b"a,b", b"1,2", b"3,4", b"56,78"]
for line_ending in [b'\n', b'\r', b'\r\n']:
for file_ending in [b'', line_ending]:
data = line_ending.join(rows) + file_ending
table = self.read_bytes(data)
assert len(table.to_batches()) == 1
assert table.to_pydict() == {
"a": [1, 3, 56],
"b": [2, 4, 78],
}
def test_header_skip_rows(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
opts = ReadOptions()
opts.skip_rows = 1
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ef", "gh"])
assert table.to_pydict() == {
"ef": ["ij", "mn"],
"gh": ["kl", "op"],
}
opts.skip_rows = 3
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["mn", "op"])
assert table.to_pydict() == {
"mn": [],
"op": [],
}
opts.skip_rows = 4
with pytest.raises(pa.ArrowInvalid):
# Not enough rows
table = self.read_bytes(rows, read_options=opts)
# Can skip rows with a different number of columns
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
opts.skip_rows = 2
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ij", "kl"])
assert table.to_pydict() == {
"ij": ["mn"],
"kl": ["op"],
}
def test_skip_rows_after_names(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
opts = ReadOptions()
opts.skip_rows_after_names = 1
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ab", "cd"])
assert table.to_pydict() == {
"ab": ["ij", "mn"],
"cd": ["kl", "op"],
}
opts.skip_rows_after_names = 3
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ab", "cd"])
assert table.to_pydict() == {
"ab": [],
"cd": [],
}
opts.skip_rows_after_names = 4
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ab", "cd"])
assert table.to_pydict() == {
"ab": [],
"cd": [],
}
# Can skip rows with a different number of columns
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
opts.skip_rows_after_names = 2
opts.column_names = ["f0", "f1"]
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["f0", "f1"])
assert table.to_pydict() == {
"f0": ["ij", "mn"],
"f1": ["kl", "op"],
}
opts = ReadOptions()
# Can skip rows with new lines in the value
rows = b'ab,cd\n"e\nf","g\n\nh"\n"ij","k\nl"\nmn,op'
opts.skip_rows_after_names = 2
parse_opts = ParseOptions()
parse_opts.newlines_in_values = True
table = self.read_bytes(rows, read_options=opts,
parse_options=parse_opts)
self.check_names(table, ["ab", "cd"])
assert table.to_pydict() == {
"ab": ["mn"],
"cd": ["op"],
}
# Can skip rows when block ends in middle of quoted value
opts.skip_rows_after_names = 2
opts.block_size = 26
table = self.read_bytes(rows, read_options=opts,
parse_options=parse_opts)
self.check_names(table, ["ab", "cd"])
assert table.to_pydict() == {
"ab": ["mn"],
"cd": ["op"],
}
opts = ReadOptions()
# Can skip rows that are beyond the first block without lexer
rows, expected = make_random_csv(num_cols=5, num_rows=1000)
opts.skip_rows_after_names = 900
opts.block_size = len(rows) / 11
table = self.read_bytes(rows, read_options=opts)
assert table.schema == expected.schema
assert table.num_rows == 100
table_dict = table.to_pydict()
for name, values in expected.to_pydict().items():
assert values[900:] == table_dict[name]
# Can skip rows that are beyond the first block with lexer
table = self.read_bytes(rows, read_options=opts,
parse_options=parse_opts)
assert table.schema == expected.schema
assert table.num_rows == 100
table_dict = table.to_pydict()
for name, values in expected.to_pydict().items():
assert values[900:] == table_dict[name]
# Skip rows and skip rows after names
rows, expected = make_random_csv(num_cols=5, num_rows=200,
write_names=False)
opts = ReadOptions()
opts.skip_rows = 37
opts.skip_rows_after_names = 41
opts.column_names = expected.schema.names
table = self.read_bytes(rows, read_options=opts,
parse_options=parse_opts)
assert table.schema == expected.schema
assert (table.num_rows ==
expected.num_rows - opts.skip_rows -
opts.skip_rows_after_names)
table_dict = table.to_pydict()
for name, values in expected.to_pydict().items():
assert (values[opts.skip_rows + opts.skip_rows_after_names:] ==
table_dict[name])
def test_header_column_names(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
opts = ReadOptions()
opts.column_names = ["x", "y"]
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": ["ab", "ef", "ij", "mn"],
"y": ["cd", "gh", "kl", "op"],
}
opts.skip_rows = 3
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": ["mn"],
"y": ["op"],
}
opts.skip_rows = 4
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": [],
"y": [],
}
opts.skip_rows = 5
with pytest.raises(pa.ArrowInvalid):
# Not enough rows
table = self.read_bytes(rows, read_options=opts)
# Unexpected number of columns
opts.skip_rows = 0
opts.column_names = ["x", "y", "z"]
with pytest.raises(pa.ArrowInvalid,
match="Expected 3 columns, got 2"):
table = self.read_bytes(rows, read_options=opts)
# Can skip rows with a different number of columns
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
opts.skip_rows = 2
opts.column_names = ["x", "y"]
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": ["ij", "mn"],
"y": ["kl", "op"],
}
def test_header_autogenerate_column_names(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
opts = ReadOptions()
opts.autogenerate_column_names = True
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["f0", "f1"])
assert table.to_pydict() == {
"f0": ["ab", "ef", "ij", "mn"],
"f1": ["cd", "gh", "kl", "op"],
}
opts.skip_rows = 3
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["f0", "f1"])
assert table.to_pydict() == {
"f0": ["mn"],
"f1": ["op"],
}
# Not enough rows, impossible to infer number of columns
opts.skip_rows = 4
with pytest.raises(pa.ArrowInvalid):
table = self.read_bytes(rows, read_options=opts)
def test_include_columns(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
convert_options = ConvertOptions()
convert_options.include_columns = ['ab']
table = self.read_bytes(rows, convert_options=convert_options)
self.check_names(table, ["ab"])
assert table.to_pydict() == {
"ab": ["ef", "ij", "mn"],
}
# Order of include_columns is respected, regardless of CSV order
convert_options.include_columns = ['cd', 'ab']
table = self.read_bytes(rows, convert_options=convert_options)
schema = pa.schema([('cd', pa.string()),
('ab', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
"cd": ["gh", "kl", "op"],
"ab": ["ef", "ij", "mn"],
}
# Include a column not in the CSV file => raises by default
convert_options.include_columns = ['xx', 'ab', 'yy']
with pytest.raises(KeyError,
match="Column 'xx' in include_columns "
"does not exist in CSV file"):
self.read_bytes(rows, convert_options=convert_options)
def test_include_missing_columns(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
read_options = ReadOptions()
convert_options = ConvertOptions()
convert_options.include_columns = ['xx', 'ab', 'yy']
convert_options.include_missing_columns = True
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('xx', pa.null()),
('ab', pa.string()),
('yy', pa.null())])
assert table.schema == schema
assert table.to_pydict() == {
"xx": [None, None, None],
"ab": ["ef", "ij", "mn"],
"yy": [None, None, None],
}
# Combining with `column_names`
read_options.column_names = ["xx", "yy"]
convert_options.include_columns = ["yy", "cd"]
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('yy', pa.string()),
('cd', pa.null())])
assert table.schema == schema
assert table.to_pydict() == {
"yy": ["cd", "gh", "kl", "op"],
"cd": [None, None, None, None],
}
# And with `column_types` as well
convert_options.column_types = {"yy": pa.binary(),
"cd": pa.int32()}
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('yy', pa.binary()),
('cd', pa.int32())])
assert table.schema == schema
assert table.to_pydict() == {
"yy": [b"cd", b"gh", b"kl", b"op"],
"cd": [None, None, None, None],
}
def test_simple_ints(self):
# Infer integer columns
rows = b"a,b,c\n1,2,3\n4,5,6\n"
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.int64()),
('b', pa.int64()),
('c', pa.int64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1, 4],
'b': [2, 5],
'c': [3, 6],
}
def test_simple_varied(self):
# Infer various kinds of data
rows = b"a,b,c,d\n1,2,3,0\n4.0,-5,foo,True\n"
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.float64()),
('b', pa.int64()),
('c', pa.string()),
('d', pa.bool_())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1.0, 4.0],
'b': [2, -5],
'c': ["3", "foo"],
'd': [False, True],
}
def test_simple_nulls(self):
# Infer various kinds of data, with nulls
rows = (b"a,b,c,d,e,f\n"
b"1,2,,,3,N/A\n"
b"nan,-5,foo,,nan,TRUE\n"
b"4.5,#N/A,nan,,\xff,false\n")
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.float64()),
('b', pa.int64()),
('c', pa.string()),
('d', pa.null()),
('e', pa.binary()),
('f', pa.bool_())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1.0, None, 4.5],
'b': [2, -5, None],
'c': ["", "foo", "nan"],
'd': [None, None, None],
'e': [b"3", b"nan", b"\xff"],
'f': [None, True, False],
}
def test_simple_timestamps(self):
# Infer a timestamp column
rows = (b"a,b,c\n"
b"1970,1970-01-01 00:00:00,1970-01-01 00:00:00.123\n"
b"1989,1989-07-14 01:00:00,1989-07-14 01:00:00.123456\n")
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.int64()),
('b', pa.timestamp('s')),
('c', pa.timestamp('ns'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1970, 1989],
'b': [datetime(1970, 1, 1), datetime(1989, 7, 14, 1)],
'c': [datetime(1970, 1, 1, 0, 0, 0, 123000),
datetime(1989, 7, 14, 1, 0, 0, 123456)],
}
def test_timestamp_parsers(self):
# Infer timestamps with custom parsers
rows = b"a,b\n1970/01/01,1980-01-01 00\n1970/01/02,1980-01-02 00\n"
opts = ConvertOptions()
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.string()),
('b', pa.timestamp('s'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': ['1970/01/01', '1970/01/02'],
'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)],
}
opts.timestamp_parsers = ['%Y/%m/%d']
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.timestamp('s')),
('b', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)],
'b': ['1980-01-01 00', '1980-01-02 00'],
}
opts.timestamp_parsers = ['%Y/%m/%d', ISO8601]
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.timestamp('s')),
('b', pa.timestamp('s'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)],
'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)],
}
def test_dates(self):
# Dates are inferred as date32 by default
rows = b"a,b\n1970-01-01,1970-01-02\n1971-01-01,1971-01-02\n"
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.date32()),
('b', pa.date32())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [date(1970, 1, 1), date(1971, 1, 1)],
'b': [date(1970, 1, 2), date(1971, 1, 2)],
}
# Can ask for date types explicitly
opts = ConvertOptions()
opts.column_types = {'a': pa.date32(), 'b': pa.date64()}
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.date32()),
('b', pa.date64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [date(1970, 1, 1), date(1971, 1, 1)],
'b': [date(1970, 1, 2), date(1971, 1, 2)],
}
# Can ask for timestamp types explicitly
opts = ConvertOptions()
opts.column_types = {'a': pa.timestamp('s'), 'b': pa.timestamp('ms')}
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.timestamp('s')),
('b', pa.timestamp('ms'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': [datetime(1970, 1, 1), datetime(1971, 1, 1)],
'b': [datetime(1970, 1, 2), datetime(1971, 1, 2)],
}
def test_auto_dict_encode(self):
opts = ConvertOptions(auto_dict_encode=True)
rows = "a,b\nab,1\ncdé,2\ncdé,3\nab,4".encode()
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.string())),
('b', pa.int64())])
expected = {
'a': ["ab", "cdé", "cdé", "ab"],
'b': [1, 2, 3, 4],
}
assert table.schema == schema
assert table.to_pydict() == expected
opts.auto_dict_max_cardinality = 2
table = self.read_bytes(rows, convert_options=opts)
assert table.schema == schema
assert table.to_pydict() == expected
# Cardinality above max => plain-encoded
opts.auto_dict_max_cardinality = 1
table = self.read_bytes(rows, convert_options=opts)
assert table.schema == pa.schema([('a', pa.string()),
('b', pa.int64())])
assert table.to_pydict() == expected
# With invalid UTF8, not checked
opts.auto_dict_max_cardinality = 50
opts.check_utf8 = False
rows = b"a,b\nab,1\ncd\xff,2\nab,3"
table = self.read_bytes(rows, convert_options=opts,
validate_full=False)
assert table.schema == schema
dict_values = table['a'].chunk(0).dictionary
assert len(dict_values) == 2
assert dict_values[0].as_py() == "ab"
assert dict_values[1].as_buffer() == b"cd\xff"
# With invalid UTF8, checked
opts.check_utf8 = True
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.binary())),
('b', pa.int64())])
expected = {
'a': [b"ab", b"cd\xff", b"ab"],
'b': [1, 2, 3],
}
assert table.schema == schema
assert table.to_pydict() == expected
def test_custom_nulls(self):
# Infer nulls with custom values
opts = ConvertOptions(null_values=['Xxx', 'Zzz'])
rows = b"""a,b,c,d\nZzz,"Xxx",1,2\nXxx,#N/A,,Zzz\n"""
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.null()),
('b', pa.string()),
('c', pa.string()),
('d', pa.int64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [None, None],
'b': ["Xxx", "#N/A"],
'c': ["1", ""],
'd': [2, None],
}
opts = ConvertOptions(null_values=['Xxx', 'Zzz'],
strings_can_be_null=True)
table = self.read_bytes(rows, convert_options=opts)
assert table.to_pydict() == {
'a': [None, None],
'b': [None, "#N/A"],
'c': ["1", ""],
'd': [2, None],
}
opts.quoted_strings_can_be_null = False
table = self.read_bytes(rows, convert_options=opts)
assert table.to_pydict() == {
'a': [None, None],
'b': ["Xxx", "#N/A"],
'c': ["1", ""],
'd': [2, None],
}
opts = ConvertOptions(null_values=[])
rows = b"a,b\n#N/A,\n"
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.string()),
('b', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'a': ["#N/A"],
'b': [""],
}
def test_custom_bools(self):
# Infer booleans with custom values
opts = ConvertOptions(true_values=['T', 'yes'],
false_values=['F', 'no'])
rows = (b"a,b,c\n"
b"True,T,t\n"
b"False,F,f\n"
b"True,yes,yes\n"
b"False,no,no\n"
b"N/A,N/A,N/A\n")
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.string()),
('b', pa.bool_()),
('c', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'a': ["True", "False", "True", "False", "N/A"],
'b': [True, False, True, False, None],
'c': ["t", "f", "yes", "no", "N/A"],
}
def test_column_types(self):
# Ask for specific column types in ConvertOptions
opts = ConvertOptions(column_types={'b': 'float32',
'c': 'string',
'd': 'boolean',
'e': pa.decimal128(11, 2),
'zz': 'null'})
rows = b"a,b,c,d,e\n1,2,3,true,1.0\n4,-5,6,false,0\n"
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.int64()),
('b', pa.float32()),
('c', pa.string()),
('d', pa.bool_()),
('e', pa.decimal128(11, 2))])
expected = {
'a': [1, 4],
'b': [2.0, -5.0],
'c': ["3", "6"],
'd': [True, False],
'e': [Decimal("1.00"), Decimal("0.00")]
}
assert table.schema == schema
assert table.to_pydict() == expected
# Pass column_types as schema
opts = ConvertOptions(
column_types=pa.schema([('b', pa.float32()),
('c', pa.string()),
('d', pa.bool_()),
('e', pa.decimal128(11, 2)),
('zz', pa.bool_())]))
table = self.read_bytes(rows, convert_options=opts)
assert table.schema == schema
assert table.to_pydict() == expected
# One of the columns in column_types fails converting
rows = b"a,b,c,d,e\n1,XXX,3,true,5\n4,-5,6,false,7\n"
with pytest.raises(pa.ArrowInvalid) as exc:
self.read_bytes(rows, convert_options=opts)
err = str(exc.value)
assert "In CSV column #1: " in err
assert "CSV conversion error to float: invalid value 'XXX'" in err
def test_column_types_dict(self):
# Ask for dict-encoded column types in ConvertOptions
column_types = [
('a', pa.dictionary(pa.int32(), pa.utf8())),
('b', pa.dictionary(pa.int32(), pa.int64())),
('c', pa.dictionary(pa.int32(), pa.decimal128(11, 2))),
('d', pa.dictionary(pa.int32(), pa.large_utf8()))]
opts = ConvertOptions(column_types=dict(column_types))
rows = (b"a,b,c,d\n"
b"abc,123456,1.0,zz\n"
b"defg,123456,0.5,xx\n"
b"abc,N/A,1.0,xx\n")
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema(column_types)
expected = {
'a': ["abc", "defg", "abc"],
'b': [123456, 123456, None],
'c': [Decimal("1.00"), Decimal("0.50"), Decimal("1.00")],
'd': ["zz", "xx", "xx"],
}
assert table.schema == schema
assert table.to_pydict() == expected
# Unsupported index type
column_types[0] = ('a', pa.dictionary(pa.int8(), pa.utf8()))
opts = ConvertOptions(column_types=dict(column_types))
with pytest.raises(NotImplementedError):
table = self.read_bytes(rows, convert_options=opts)
def test_column_types_with_column_names(self):
# When both `column_names` and `column_types` are given, names
# in `column_types` should refer to names in `column_names`
rows = b"a,b\nc,d\ne,f\n"
read_options = ReadOptions(column_names=['x', 'y'])
convert_options = ConvertOptions(column_types={'x': pa.binary()})
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('x', pa.binary()),
('y', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'x': [b'a', b'c', b'e'],
'y': ['b', 'd', 'f'],
}
def test_no_ending_newline(self):
# No \n after last line
rows = b"a,b,c\n1,2,3\n4,5,6"
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a': [1, 4],
'b': [2, 5],
'c': [3, 6],
}
def test_trivial(self):
# A bit pointless, but at least it shouldn't crash
rows = b",\n\n"
table = self.read_bytes(rows)
assert table.to_pydict() == {'': []}
def test_empty_lines(self):
rows = b"a,b\n\r1,2\r\n\r\n3,4\r\n"
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a': [1, 3],
'b': [2, 4],
}
parse_options = ParseOptions(ignore_empty_lines=False)
table = self.read_bytes(rows, parse_options=parse_options)
assert table.to_pydict() == {
'a': [None, 1, None, 3],
'b': [None, 2, None, 4],
}
read_options = ReadOptions(skip_rows=2)
table = self.read_bytes(rows, parse_options=parse_options,
read_options=read_options)
assert table.to_pydict() == {
'1': [None, 3],
'2': [None, 4],
}
def test_invalid_csv(self):
# Various CSV errors
rows = b"a,b,c\n1,2\n4,5,6\n"
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 2"):
self.read_bytes(rows)
rows = b"a,b,c\n1,2,3\n4"
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 1"):
self.read_bytes(rows)
for rows in [b"", b"\n", b"\r\n", b"\r", b"\n\n"]:
with pytest.raises(pa.ArrowInvalid, match="Empty CSV file"):
self.read_bytes(rows)
def test_options_delimiter(self):
rows = b"a;b,c\nde,fg;eh\n"
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a;b': ['de'],
'c': ['fg;eh'],
}
opts = ParseOptions(delimiter=';')
table = self.read_bytes(rows, parse_options=opts)
assert table.to_pydict() == {
'a': ['de,fg'],
'b,c': ['eh'],
}
def test_small_random_csv(self):
csv, expected = make_random_csv(num_cols=2, num_rows=10)
table = self.read_bytes(csv)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
def test_stress_block_sizes(self):
# Test a number of small block sizes to stress block stitching
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
block_sizes = [11, 12, 13, 17, 37, 111]
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
for csv in csvs:
for block_size in block_sizes:
read_options = ReadOptions(block_size=block_size)
table = self.read_bytes(csv, read_options=read_options)
assert table.schema == expected.schema
if not table.equals(expected):
# Better error output
assert table.to_pydict() == expected.to_pydict()
def test_stress_convert_options_blowup(self):
# ARROW-6481: A convert_options with a very large number of columns
# should not blow memory and CPU time.
try:
clock = time.thread_time
except AttributeError:
clock = time.time
num_columns = 10000
col_names = ["K{}".format(i) for i in range(num_columns)]
csv = make_empty_csv(col_names)
t1 = clock()
convert_options = ConvertOptions(
column_types={k: pa.string() for k in col_names[::2]})
table = self.read_bytes(csv, convert_options=convert_options)
dt = clock() - t1
# Check that processing time didn't blow up.
# This is a conservative check (it takes less than 300 ms
# in debug mode on my local machine).
assert dt <= 10.0
# Check result
assert table.num_columns == num_columns
assert table.num_rows == 0
assert table.column_names == col_names
def test_cancellation(self):
if (threading.current_thread().ident !=
threading.main_thread().ident):
pytest.skip("test only works from main Python thread")
# Skips test if not available
raise_signal = util.get_raise_signal()
# Make the interruptible workload large enough to not finish
# before the interrupt comes, even in release mode on fast machines
large_csv = b"a,b,c\n" + b"1,2,3\n" * 200_000_000
def signal_from_thread():
time.sleep(0.2)
raise_signal(signal.SIGINT)
t1 = time.time()
try:
try:
t = threading.Thread(target=signal_from_thread)
with pytest.raises(KeyboardInterrupt) as exc_info:
t.start()
self.read_bytes(large_csv)
finally:
t.join()
except KeyboardInterrupt:
# In case KeyboardInterrupt didn't interrupt `self.read_bytes`
# above, at least prevent it from stopping the test suite
self.fail("KeyboardInterrupt didn't interrupt CSV reading")
dt = time.time() - t1
assert dt <= 1.0
e = exc_info.value.__context__
assert isinstance(e, pa.ArrowCancelled)
assert e.signum == signal.SIGINT
def test_cancellation_disabled(self):
# ARROW-12622: reader would segfault when the cancelling signal
# handler was not enabled (e.g. if disabled, or if not on the
# main thread)
t = threading.Thread(target=lambda: self.read_bytes(b"f64\n0.1"))
t.start()
t.join()
class TestSerialCSVRead(BaseTestCSVRead, unittest.TestCase):
def read_csv(self, *args, validate_full=True, **kwargs):
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = False
table = read_csv(*args, **kwargs)
table.validate(full=validate_full)
return table
def test_row_numbers_in_errors(self):
""" Row numbers are only correctly counted in serial reads """
csv, _ = make_random_csv(4, 100, write_names=True)
read_options = ReadOptions()
read_options.block_size = len(csv) / 3
convert_options = ConvertOptions()
convert_options.column_types = {"a": pa.int32(), "d": pa.int32()}
# Test without skip_rows and column names in the csv
csv_bad_columns = csv + b"1,2\r\n"
with pytest.raises(pa.ArrowInvalid,
match="Row #102: Expected 4 columns, got 2"):
self.read_bytes(csv_bad_columns, read_options=read_options,
convert_options=convert_options)
csv_bad_type = csv + b"a,b,c,d\r\n"
message_value = ("In CSV column #0: Row #102: " +
"CSV conversion error to int32: invalid value 'a'")
with pytest.raises(pa.ArrowInvalid, match=message_value):
self.read_bytes(csv_bad_type, read_options=read_options,
convert_options=convert_options)
long_row = (b"this is a long row" * 15) + b",3\r\n"
csv_bad_columns_long = csv + long_row
message_long = ("Row #102: Expected 4 columns, got 2: " +
long_row[0:96].decode("utf-8") + " ...")
with pytest.raises(pa.ArrowInvalid, match=message_long):
self.read_bytes(csv_bad_columns_long, read_options=read_options,
convert_options=convert_options)
# Test skipping rows after the names
read_options.skip_rows_after_names = 47
with pytest.raises(pa.ArrowInvalid,
match="Row #102: Expected 4 columns, got 2"):
self.read_bytes(csv_bad_columns, read_options=read_options,
convert_options=convert_options)
with pytest.raises(pa.ArrowInvalid, match=message_value):
self.read_bytes(csv_bad_type, read_options=read_options,
convert_options=convert_options)
with pytest.raises(pa.ArrowInvalid, match=message_long):
self.read_bytes(csv_bad_columns_long, read_options=read_options,
convert_options=convert_options)
read_options.skip_rows_after_names = 0
# Test without skip_rows and column names not in the csv
csv, _ = make_random_csv(4, 100, write_names=False)
read_options.column_names = ["a", "b", "c", "d"]
csv_bad_columns = csv + b"1,2\r\n"
with pytest.raises(pa.ArrowInvalid,
match="Row #101: Expected 4 columns, got 2"):
self.read_bytes(csv_bad_columns, read_options=read_options,
convert_options=convert_options)
csv_bad_columns_long = csv + long_row
message_long = ("Row #101: Expected 4 columns, got 2: " +
long_row[0:96].decode("utf-8") + " ...")
with pytest.raises(pa.ArrowInvalid, match=message_long):
self.read_bytes(csv_bad_columns_long, read_options=read_options,
convert_options=convert_options)
csv_bad_type = csv + b"a,b,c,d\r\n"
message_value = ("In CSV column #0: Row #101: " +
"CSV conversion error to int32: invalid value 'a'")
with pytest.raises(pa.ArrowInvalid, match=message_value):
self.read_bytes(csv_bad_type, read_options=read_options,
convert_options=convert_options)
# Test with skip_rows and column names not in the csv
read_options.skip_rows = 23
with pytest.raises(pa.ArrowInvalid,
match="Row #101: Expected 4 columns, got 2"):
self.read_bytes(csv_bad_columns, read_options=read_options,
convert_options=convert_options)
with pytest.raises(pa.ArrowInvalid, match=message_value):
self.read_bytes(csv_bad_type, read_options=read_options,
convert_options=convert_options)
class TestParallelCSVRead(BaseTestCSVRead, unittest.TestCase):
def read_csv(self, *args, validate_full=True, **kwargs):
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = True
table = read_csv(*args, **kwargs)
table.validate(full=validate_full)
return table
@pytest.mark.parametrize('use_threads', [False, True])
class TestStreamingCSVRead:
def open_bytes(self, b, use_threads, **kwargs):
return self.open_csv(pa.py_buffer(b), use_threads, **kwargs)
def open_csv(self, b, use_threads, *args, **kwargs):
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = use_threads
return open_csv(b, *args, **kwargs)
def check_reader(self, reader, expected_schema, expected_data):
assert reader.schema == expected_schema
batches = list(reader)
assert len(batches) == len(expected_data)
for batch, expected_batch in zip(batches, expected_data):
batch.validate(full=True)
assert batch.schema == expected_schema
assert batch.to_pydict() == expected_batch
def test_file_object(self, use_threads):
data = b"a,b\n1,2\n3,4\n"
expected_data = {'a': [1, 3], 'b': [2, 4]}
bio = io.BytesIO(data)
reader = self.open_csv(bio, use_threads)
expected_schema = pa.schema([('a', pa.int64()),
('b', pa.int64())])
self.check_reader(reader, expected_schema, [expected_data])
def test_header(self, use_threads):
rows = b"abc,def,gh\n"
reader = self.open_bytes(rows, use_threads)
expected_schema = pa.schema([('abc', pa.null()),
('def', pa.null()),
('gh', pa.null())])
self.check_reader(reader, expected_schema, [])
def test_inference(self, use_threads):
# Inference is done on first block
rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n"
expected_schema = pa.schema([('a', pa.string()),
('b', pa.binary())])
read_options = ReadOptions()
read_options.block_size = len(rows)
reader = self.open_bytes(rows, use_threads, read_options=read_options)
self.check_reader(reader, expected_schema,
[{'a': ['123', 'abc', 'gh'],
'b': [b'456', b'de\xff', b'ij']}])
read_options.block_size = len(rows) - 1
reader = self.open_bytes(rows, use_threads, read_options=read_options)
self.check_reader(reader, expected_schema,
[{'a': ['123', 'abc'],
'b': [b'456', b'de\xff']},
{'a': ['gh'],
'b': [b'ij']}])
def test_inference_failure(self, use_threads):
# Inference on first block, then conversion failure on second block
rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n"
read_options = ReadOptions()
read_options.block_size = len(rows) - 7
reader = self.open_bytes(rows, use_threads, read_options=read_options)
expected_schema = pa.schema([('a', pa.int64()),
('b', pa.int64())])
assert reader.schema == expected_schema
assert reader.read_next_batch().to_pydict() == {
'a': [123], 'b': [456]
}
# Second block
with pytest.raises(ValueError,
match="CSV conversion error to int64"):
reader.read_next_batch()
# EOF
with pytest.raises(StopIteration):
reader.read_next_batch()
def test_invalid_csv(self, use_threads):
# CSV errors on first block
rows = b"a,b\n1,2,3\n4,5\n6,7\n"
read_options = ReadOptions()
read_options.block_size = 10
with pytest.raises(pa.ArrowInvalid,
match="Expected 2 columns, got 3"):
reader = self.open_bytes(
rows, use_threads, read_options=read_options)
# CSV errors on second block
rows = b"a,b\n1,2\n3,4,5\n6,7\n"
read_options.block_size = 8
reader = self.open_bytes(rows, use_threads, read_options=read_options)
assert reader.read_next_batch().to_pydict() == {'a': [1], 'b': [2]}
with pytest.raises(pa.ArrowInvalid,
match="Expected 2 columns, got 3"):
reader.read_next_batch()
# Cannot continue after a parse error
with pytest.raises(StopIteration):
reader.read_next_batch()
def test_options_delimiter(self, use_threads):
rows = b"a;b,c\nde,fg;eh\n"
reader = self.open_bytes(rows, use_threads)
expected_schema = pa.schema([('a;b', pa.string()),
('c', pa.string())])
self.check_reader(reader, expected_schema,
[{'a;b': ['de'],
'c': ['fg;eh']}])
opts = ParseOptions(delimiter=';')
reader = self.open_bytes(rows, use_threads, parse_options=opts)
expected_schema = pa.schema([('a', pa.string()),
('b,c', pa.string())])
self.check_reader(reader, expected_schema,
[{'a': ['de,fg'],
'b,c': ['eh']}])
def test_no_ending_newline(self, use_threads):
# No \n after last line
rows = b"a,b,c\n1,2,3\n4,5,6"
reader = self.open_bytes(rows, use_threads)
expected_schema = pa.schema([('a', pa.int64()),
('b', pa.int64()),
('c', pa.int64())])
self.check_reader(reader, expected_schema,
[{'a': [1, 4],
'b': [2, 5],
'c': [3, 6]}])
def test_empty_file(self, use_threads):
with pytest.raises(ValueError, match="Empty CSV file"):
self.open_bytes(b"", use_threads)
def test_column_options(self, use_threads):
# With column_names
rows = b"1,2,3\n4,5,6"
read_options = ReadOptions()
read_options.column_names = ['d', 'e', 'f']
reader = self.open_bytes(rows, use_threads, read_options=read_options)
expected_schema = pa.schema([('d', pa.int64()),
('e', pa.int64()),
('f', pa.int64())])
self.check_reader(reader, expected_schema,
[{'d': [1, 4],
'e': [2, 5],
'f': [3, 6]}])
# With include_columns
convert_options = ConvertOptions()
convert_options.include_columns = ['f', 'e']
reader = self.open_bytes(rows, use_threads, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('f', pa.int64()),
('e', pa.int64())])
self.check_reader(reader, expected_schema,
[{'e': [2, 5],
'f': [3, 6]}])
# With column_types
convert_options.column_types = {'e': pa.string()}
reader = self.open_bytes(rows, use_threads, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('f', pa.int64()),
('e', pa.string())])
self.check_reader(reader, expected_schema,
[{'e': ["2", "5"],
'f': [3, 6]}])
# Missing columns in include_columns
convert_options.include_columns = ['g', 'f', 'e']
with pytest.raises(
KeyError,
match="Column 'g' in include_columns does not exist"):
reader = self.open_bytes(rows, use_threads,
read_options=read_options,
convert_options=convert_options)
convert_options.include_missing_columns = True
reader = self.open_bytes(rows, use_threads, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('g', pa.null()),
('f', pa.int64()),
('e', pa.string())])
self.check_reader(reader, expected_schema,
[{'g': [None, None],
'e': ["2", "5"],
'f': [3, 6]}])
convert_options.column_types = {'e': pa.string(), 'g': pa.float64()}
reader = self.open_bytes(rows, use_threads, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('g', pa.float64()),
('f', pa.int64()),
('e', pa.string())])
self.check_reader(reader, expected_schema,
[{'g': [None, None],
'e': ["2", "5"],
'f': [3, 6]}])
def test_encoding(self, use_threads):
# latin-1 (invalid utf-8)
rows = b"a,b\nun,\xe9l\xe9phant"
read_options = ReadOptions()
reader = self.open_bytes(rows, use_threads, read_options=read_options)
expected_schema = pa.schema([('a', pa.string()),
('b', pa.binary())])
self.check_reader(reader, expected_schema,
[{'a': ["un"],
'b': [b"\xe9l\xe9phant"]}])
read_options.encoding = 'latin1'
reader = self.open_bytes(rows, use_threads, read_options=read_options)
expected_schema = pa.schema([('a', pa.string()),
('b', pa.string())])
self.check_reader(reader, expected_schema,
[{'a': ["un"],
'b': ["éléphant"]}])
# utf-16
rows = (b'\xff\xfea\x00,\x00b\x00\n\x00u\x00n\x00,'
b'\x00\xe9\x00l\x00\xe9\x00p\x00h\x00a\x00n\x00t\x00')
read_options.encoding = 'utf16'
reader = self.open_bytes(rows, use_threads, read_options=read_options)
expected_schema = pa.schema([('a', pa.string()),
('b', pa.string())])
self.check_reader(reader, expected_schema,
[{'a': ["un"],
'b': ["éléphant"]}])
def test_small_random_csv(self, use_threads):
csv, expected = make_random_csv(num_cols=2, num_rows=10)
reader = self.open_bytes(csv, use_threads)
table = reader.read_all()
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
def test_stress_block_sizes(self, use_threads):
# Test a number of small block sizes to stress block stitching
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
block_sizes = [19, 21, 23, 26, 37, 111]
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
for csv in csvs:
for block_size in block_sizes:
# Need at least two lines for type inference
assert csv[:block_size].count(b'\n') >= 2
read_options = ReadOptions(block_size=block_size)
reader = self.open_bytes(
csv, use_threads, read_options=read_options)
table = reader.read_all()
assert table.schema == expected.schema
if not table.equals(expected):
# Better error output
assert table.to_pydict() == expected.to_pydict()
def test_batch_lifetime(self, use_threads):
gc.collect()
old_allocated = pa.total_allocated_bytes()
# Memory occupation should not grow with CSV file size
def check_one_batch(reader, expected):
batch = reader.read_next_batch()
assert batch.to_pydict() == expected
rows = b"10,11\n12,13\n14,15\n16,17\n"
read_options = ReadOptions()
read_options.column_names = ['a', 'b']
read_options.block_size = 6
reader = self.open_bytes(rows, use_threads, read_options=read_options)
check_one_batch(reader, {'a': [10], 'b': [11]})
allocated_after_first_batch = pa.total_allocated_bytes()
check_one_batch(reader, {'a': [12], 'b': [13]})
assert pa.total_allocated_bytes() <= allocated_after_first_batch
check_one_batch(reader, {'a': [14], 'b': [15]})
assert pa.total_allocated_bytes() <= allocated_after_first_batch
check_one_batch(reader, {'a': [16], 'b': [17]})
assert pa.total_allocated_bytes() <= allocated_after_first_batch
with pytest.raises(StopIteration):
reader.read_next_batch()
assert pa.total_allocated_bytes() == old_allocated
reader = None
assert pa.total_allocated_bytes() == old_allocated
class BaseTestCompressedCSVRead:
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='arrow-csv-test-')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def read_csv(self, csv_path):
try:
return read_csv(csv_path)
except pa.ArrowNotImplementedError as e:
pytest.skip(str(e))
def test_random_csv(self):
csv, expected = make_random_csv(num_cols=2, num_rows=100)
csv_path = os.path.join(self.tmpdir, self.csv_filename)
self.write_file(csv_path, csv)
table = self.read_csv(csv_path)
table.validate(full=True)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
class TestGZipCSVRead(BaseTestCompressedCSVRead, unittest.TestCase):
csv_filename = "compressed.csv.gz"
def write_file(self, path, contents):
with gzip.open(path, 'wb', 3) as f:
f.write(contents)
def test_concatenated(self):
# ARROW-5974
csv_path = os.path.join(self.tmpdir, self.csv_filename)
with gzip.open(csv_path, 'wb', 3) as f:
f.write(b"ab,cd\nef,gh\n")
with gzip.open(csv_path, 'ab', 3) as f:
f.write(b"ij,kl\nmn,op\n")
table = self.read_csv(csv_path)
assert table.to_pydict() == {
'ab': ['ef', 'ij', 'mn'],
'cd': ['gh', 'kl', 'op'],
}
class TestBZ2CSVRead(BaseTestCompressedCSVRead, unittest.TestCase):
csv_filename = "compressed.csv.bz2"
def write_file(self, path, contents):
with bz2.BZ2File(path, 'w') as f:
f.write(contents)
def test_read_csv_does_not_close_passed_file_handles():
# ARROW-4823
buf = io.BytesIO(b"a,b,c\n1,2,3\n4,5,6")
read_csv(buf)
assert not buf.closed
def test_write_read_round_trip():
t = pa.Table.from_arrays([[1, 2, 3], ["a", "b", "c"]], ["c1", "c2"])
record_batch = t.to_batches(max_chunksize=4)[0]
for data in [t, record_batch]:
# Test with header
buf = io.BytesIO()
write_csv(data, buf, WriteOptions(include_header=True))
buf.seek(0)
assert t == read_csv(buf)
# Test without header
buf = io.BytesIO()
write_csv(data, buf, WriteOptions(include_header=False))
buf.seek(0)
read_options = ReadOptions(column_names=t.column_names)
assert t == read_csv(buf, read_options=read_options)
# Test with writer
for read_options, write_options in [
(None, WriteOptions(include_header=True)),
(ReadOptions(column_names=t.column_names),
WriteOptions(include_header=False)),
]:
buf = io.BytesIO()
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
writer.write_table(t)
buf.seek(0)
assert t == read_csv(buf, read_options=read_options)
buf = io.BytesIO()
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
for batch in t.to_batches(max_chunksize=1):
writer.write_batch(batch)
buf.seek(0)
assert t == read_csv(buf, read_options=read_options)
def test_read_csv_reference_cycle():
# ARROW-13187
def inner():
buf = io.BytesIO(b"a,b,c\n1,2,3\n4,5,6")
table = read_csv(buf)
return weakref.ref(table)
with util.disabled_gc():
wr = inner()
assert wr() is None
|
multi_threads.py | from threading import Thread
class MultiThreads:
threads = None
max_threads = 4
to_run = None
def __init__(self):
self.threads = []
self.to_run = []
def add(self, target: callable, args: tuple):
self.threads.append(Thread(target=target, args=args, daemon=True))
def _run_processes(self, callback: callable = None, n: int = None):
for t in self.to_run:
if not n:
t.join()
callback is not None and callback()
def start(self, callback: callable = None):
for n, t in enumerate(self.threads): # starting all threads
t.start()
self.to_run.append(t)
self._run_processes(callback, (n + 1) % self.max_threads)
self._run_processes(callback)
self.threads = []
|
main.py | import threading
from typing import Dict
from commands import COMMANDS
from loggers import logger
from pika import BlockingConnection, ConnectionParameters
from pika.exceptions import (
AMQPChannelError,
AMQPConnectionError,
ConnectionClosedByBroker,
)
from pydantic import ValidationError
from schemas import Package, Storage
from settings import QUEUE_NAME, RABBITMQ_HOST, THREADS
storage: Dict[str, str] = Storage()
lock = threading.Lock()
def on_message_callback(ch, method, _properties, body):
t_id = threading.get_ident()
try:
package = Package.parse_raw(body)
logger.debug(f"Thread-{t_id}: {package}")
except ValidationError:
logger.error(f"Invalid message format: {body}")
return
try:
processor = COMMANDS[package.command]
params = dict(package)
params["storage"] = storage
with lock:
processor(**params)
ch.basic_ack(delivery_tag=method.delivery_tag)
except KeyError:
logger.error(f"Command is not implemented: {package.command}")
def consume() -> None:
t_id = threading.get_ident()
while True:
try:
connection = BlockingConnection(
ConnectionParameters(RABBITMQ_HOST)
)
channel = connection.channel()
channel.queue_declare(queue=QUEUE_NAME)
channel.basic_consume(QUEUE_NAME, on_message_callback)
logger.debug(f"Thread-{t_id}: Consuming started")
channel.start_consuming()
except ConnectionClosedByBroker:
logger.error(f"Thread-{t_id}: Connection was closed by broker")
break
except AMQPChannelError:
logger.error(f"Thread-{t_id}: Channel error")
break
except AMQPConnectionError:
logger.debug(f"Thread-{t_id}: Recovering connection")
continue
if __name__ == "__main__":
for _ in range(THREADS):
new_thread = threading.Thread(target=consume)
new_thread.start()
|
shell2restapi.py | #!/usr/bin/env python
##########################################################################
#
# Name: Shell to REST API
__version__ = '1.5'
__author__ = 'Grigol Sirbiladze'
# E-mail: grigoli@gmail.com
# Date: 10/2019
# License: MIT
#
##########################################################################
import json
import subprocess
import signal
from time import sleep
from datetime import datetime
from textwrap import dedent
from threading import Thread
from sys import exit, version_info, argv
from os.path import dirname, splitext, basename, join
from argparse import ArgumentParser, RawTextHelpFormatter
if version_info[0] < 3:
from SocketServer import ThreadingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
else:
from socketserver import ThreadingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
def dprint(string):
""" Just print string with datetime """
print("%s:> %s" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S%Z'), string) )
class MThreadHTTPServer(HTTPServer, ThreadingMixIn):
"""Threaded HTTP Server"""
class ShellToWebHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
# Since GET gets initialized after we won't have "cmd_status_store" instantiate here
# I has to be done from outside
self.cmd_status_store = {}
@staticmethod
def jresponse(data):
""" Returns JSON tructured string """
return json.dumps(data) if type(data) is dict else json.dumps({"data" : str(data)})
def response_header(self, response=200, complete_response=True, headers={}):
""" Response Header Assembler
-------------------------
response: HTTP response code (Default: 200)
headers: dictianry of headers {"Key1":"Value1", "Key2":"Value2", ...} (Default: Empty)
complete_response: Finalize header response True/False (Default: True)
"""
self.send_response(response)
self.send_header('Content-Type', 'application/json')
# self.send_header('Python-Verion-Info', str(version_info))
if type(headers) is dict:
for hk, hv in headers.items(): self.send_header(hk, str(hv))
if complete_response: self.end_headers()
def do_GET(self):
""" """
if not hasattr(self, 'cmd_status_store') or self.path not in self.cmd_status_store.keys():
# dprint("cmd_status_store: %s" % self.cmd_status_store)
response_content = self.jresponse({
"name" : "Shell to REST API",
"version": __version__,
"commands_list" : list(self.cmd_status_store.keys())
})
else:
response_content = self.jresponse(self.cmd_status_store[self.path].get('status', {}))
self.response_header()
self.wfile.write(response_content.encode())
class SafeShellToWebServer(object):
""" SafeShellToWebServer """
def __init__(self, configuration, server_address='', port=8989):
if not configuration or type(configuration) is not dict:
raise(Exception("Shell commands are missing (Invalid parameter 'configuration=%s')" % (str(configuration))))
self.config_and_status = configuration
self.web_2_shell_handler = ShellToWebHandler
self.web_2_shell_server = MThreadHTTPServer((server_address, port), self.web_2_shell_handler)
self.web_2_shell_handler.cmd_status_store = self.config_and_status
self._thread_list = set()
self._running = True
def execute_command(self, timeout, command, *arguments):
""" Execute shell command """
def pwait(ep, seconds=5):
steps = 0
max_step = seconds * 5
while ep.poll() is None and steps<max_step and self._running:
sleep(0.2)
steps+=1
if ep.poll() is None:
ep.kill()
cmdln = ' '.join(ep.args)
raise(Exception('%s\nCOMMAND TIMEOUT' % (cmdln)))
return ep.communicate()
exec_command, out, err = [], b'',b''
exec_command.append(command)
if len(arguments)>0: exec_command.extend(arguments)
try:
ep = subprocess.Popen(exec_command, bufsize=0, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out, err = pwait(ep, timeout)
except Exception as e:
err_msg = """command: "%s"\narguments: %s\nfailed with:%s"""%(command, arguments, str(e))
err = bytes(err_msg.encode('utf8'))
finally:
return (out.decode('utf8'), err.decode('utf8'))
def _command_dedicated_process(self, cmdpath):
""" Run all commands and propagate status """
cmd = self.config_and_status[cmdpath]
# Timeout can be overwritten by setting up overwrite_timeout
# If command wants more than 60 seconds
timeout = cmd.get('timeout', 5) if cmd.get('timeout', 5) < 60 else cmd.get('overwrite_timeout', 60)
interval = cmd.get('interval', 5)
command = cmd.get('command')
arguments = cmd.get('arguments', [])
# Make sure all are string
arguments = [ str(a) for a in arguments ]
def wait(seconds=5):
steps = 0
max_step = seconds*5
while steps<max_step and self._running:
sleep(0.2)
steps+=1
dprint("Starting executing '%s(%s)', timeout: %s, interval: %s" % (cmdpath, command, timeout, interval))
while self._running:
(out, err) = self.execute_command(timeout, command, *arguments)
try:
out = json.loads(out)
except:
out = out.split('\n')
try:
err = json.loads(err)
except:
err = err.split('\n')
cmd['status'] = { 'message' : out, 'error' : err}
if interval >= 0:
wait(interval)
else:
break
dprint("End executing '%s(%s)' " % (cmdpath, command))
def _start_command_threads(self):
""" Run each command in a dedicated thread """
for cmdpath in self.config_and_status.keys():
thrd = Thread(target=self._command_dedicated_process, args=(cmdpath,), name=cmdpath)
self._thread_list.add(thrd)
thrd.start()
@property
def thread_list(self):
return set([ tl.name for tl in self._thread_list ])
def _count_live_threads(self):
""" Clear stack from stopped threads and return live count"""
stopped_threads = [ thrd for thrd in self._thread_list if not thrd.is_alive() ]
for thrd in stopped_threads: self._thread_list.remove(thrd)
# Return live threads count
return len(self._thread_list)
def _signal_handler(self, signum, frame):
""" Signals process stopper """
dprint('Shutting down server ...')
self._running = False
if hasattr(self.web_2_shell_server, 'socket'):
self.web_2_shell_server.socket.close()
self.web_2_shell_server.shutdown()
while self._count_live_threads() > 0: sleep(0.01)
def _register_signals(self):
""" Start listening to OS signals """
signal.signal(signal.SIGINT, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
def run_server(self):
""" Run Server """
try:
self._register_signals()
self._start_command_threads()
Thread(target=self.web_2_shell_server.serve_forever).start()
dprint("HTTP Server is listening on '%s:%s' ..." % (self.web_2_shell_server.server_address))
except Exception as e:
dprint("Unable to start HTTP Server ...\n ---> %s\n ---> Exiting"%e)
exit(1)
while self._running: sleep(0.2)
def get_cli_parameters():
parser = ArgumentParser(usage="%(prog)s", formatter_class=RawTextHelpFormatter)
parser.add_argument("-a", "--server-address", default="0.0.0.0", help="Server address (default: %(default)s)")
parser.add_argument("-p", "--port", default=8989, type=int, help="Server port (default: %(default)s)")
parser.add_argument("-c", "--config", default="file://%s.json" % join(dirname(argv[0]), splitext(basename(argv[0]))[0]),
help=dedent("""\
JSON format String or configuration file (default: %(default)s)
Configuration format example:
{
"/path1" : {
"interval": (default: 5), # If it's set to less than 0, command will be executed only once
"timeout": (default: 5), # Not more than 1 minute (60 seconds)
"command" : "cmd",
"arguments" : ["arg1", "arg2", "arg3", ...]
},
"/path2" : {
"command" : "cmd",
"arguments" : ["arg1", "arg2", "arg3", ...]
}
}
""")
)
return parser
if __name__ == '__main__':
parser = get_cli_parameters()
arguments = parser.parse_args()
try:
if arguments.config[:7] == 'file://':
with open(arguments.config[7:]) as config: configuration = json.load(config)
else:
configuration = json.loads(arguments.config)
SafeShellToWebServer(configuration=configuration, server_address=arguments.server_address, port=arguments.port).run_server()
except Exception as e:
dprint(e)
parser.print_help()
|
WeaveBluezMgr.py | #
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# BLE Central support for Weave Device Manager via BlueZ APIs.
#
import abc
import dbus
import dbus.service
import dbus.mainloop.glib
import gc
import logging
import os
import pprint
import subprocess
import shlex
import sys
import threading
import time
import traceback
import uuid
import Queue
import optparse
from optparse import OptionParser, Option, OptionValueError
from ctypes import *
try:
from gi.repository import GObject
except:
from pgi.repository import GObject
from WeaveBleUtility import *
from WeaveBleUtility import _VoidPtrToUUIDString
from WeaveBleUtility import _VoidPtrToByteArray
from WeaveBleBase import WeaveBleBase
weave_service = uuid.UUID('0000FEAF-0000-1000-8000-00805F9B34FB')
weave_tx = uuid.UUID('18EE2EF5-263D-4559-959F-4F9C429F9D11')
weave_rx = uuid.UUID('18EE2EF5-263D-4559-959F-4F9C429F9D12')
weave_service_short = uuid.UUID('0000FEAF-0000-0000-0000-000000000000')
BLUEZ_NAME = 'org.bluez'
ADAPTER_INTERFACE = BLUEZ_NAME + '.Adapter1'
DEVICE_INTERFACE = BLUEZ_NAME + '.Device1'
SERVICE_INTERFACE = BLUEZ_NAME + '.GattService1'
CHARACTERISTIC_INTERFACE = BLUEZ_NAME + '.GattCharacteristic1'
DBUS_PROPERTIES = 'org.freedesktop.DBus.Properties'
bleScanConnectGuardSec = 2.0
bleStatusTransitionTimeoutSec = 5.0
bleScanDefaultTimeoutSec = 10.0
bleConnectTimeoutSec = 15.0
bleDisConnectTimeoutSec = 10.0
bleSeviceDiscoveryTimeoutSec = 5.0
bleCharDiscoveryTimeoutSec = 5.0
bleSubscribeTimeoutSec = 5.0
bleWriteCharacteristicTimeoutSec = 10.0
bleIdleDelta = 0.1
secondsToMilliseconds= 1000
def get_bluez_objects(bluez, bus, interface, prefix_path):
results = []
if bluez is None or bus is None or interface is None or prefix_path is None:
return results
for item in bluez.GetManagedObjects().iteritems():
delegates = item[1].get(interface)
if not delegates:
continue
slice = {}
if item[0].startswith(prefix_path):
slice['object'] = bus.get_object(BLUEZ_NAME, item[0])
slice['path'] = item[0]
results.append(slice)
return results
class BluezDbusAdapter():
def __init__(self, bluez_obj, bluez, bus, logger=None):
self.logger = logger if logger else logging.getLogger('WeaveBLEMgr')
self.object = bluez_obj
self.adapter = dbus.Interface(bluez_obj, ADAPTER_INTERFACE)
self.adapter_properties = dbus.Interface(bluez_obj, DBUS_PROPERTIES)
self.adapter_event = threading.Event()
self.bluez = bluez
self.bus = bus
self.path = self.adapter.object_path
self.signalReceiver = None
def __del__(self):
self.destroy()
def destroy(self):
self.logger.debug("destroy adapter")
self.adapter_unregister_signal()
self.adapter = None
self.adapter_properties = None
self.adapter_event.clear()
self.bluez = None
self.bus = None
self.object = None
self.path = None
self.signalReceiver = None
def adapter_register_signal(self):
if self.signalReceiver is None:
self.logger.debug("add adapter signal")
self.signalReceiver = self.bus.add_signal_receiver(self.adapter_on_prop_changed_cb,
bus_name=BLUEZ_NAME,
dbus_interface=DBUS_PROPERTIES,
signal_name="PropertiesChanged",
path=self.path)
def adapter_unregister_signal(self):
if self.signalReceiver is not None:
self.logger.debug(" remove adapter signal")
self.bus.remove_signal_receiver(self.signalReceiver,
signal_name="PropertiesChanged",
dbus_interface="org.freedesktop.DBus.Properties")
def adapter_on_prop_changed_cb(self, interface, changed_properties, invalidated_properties):
if len(changed_properties) == 0:
self.logger.debug( "changed_properties is empty")
return
if len(invalidated_properties) > 0:
self.logger.debug( "invalidated_properties is not empty %s" % str(invalidated_properties))
return
if interface == ADAPTER_INTERFACE:
if 'Discovering' in changed_properties:
self.adapter_event.set()
def adapter_bg_scan(self, enable):
self.adapter_event.clear()
action_flag = False
try:
if enable:
if not self.Discovering:
action_flag = True
self.logger.info( "scanning started")
self.adapter.StartDiscovery()
else:
self.logger.info("it has started scanning")
else:
if self.Discovering:
action_flag = True
self.adapter.StopDiscovery()
self.logger.info("scanning stopped")
else:
print "it has stopped scanning"
if action_flag:
if not self.adapter_event.wait(bleStatusTransitionTimeoutSec):
if enable:
self.logger.debug("scan start error")
else:
self.logger.debug("scan stop error")
self.adapter_event.clear()
except dbus.exceptions.DBusException as ex:
self.adapter_event.clear()
self.logger.debug(str(ex))
except:
self.logger.debug(traceback.format_exc())
@property
def Address(self):
try:
result = self.adapter_properties.Get(ADAPTER_INTERFACE, 'Address')
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def UUIDs(self):
try:
return self.adapter_properties.Get(ADAPTER_INTERFACE, 'UUIDs')
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
def SetDiscoveryFilter(self, dict):
try:
self.adapter.SetDiscoveryFilter(dict)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
except:
self.logger.debug(traceback.format_exc())
@property
def Discovering(self):
try:
result = self.adapter_properties.Get(ADAPTER_INTERFACE, 'Discovering')
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except:
self.logger.debug(traceback.format_exc())
return False
def DiscoverableTimeout(self, timeoutSec):
try:
result = self.adapter_properties.Set(ADAPTER_INTERFACE, 'DiscoverableTimeout', timeoutSec)
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except:
self.logger.debug(traceback.format_exc())
return False
def Powered(self, enable):
try:
result = self.adapter_properties.Set(ADAPTER_INTERFACE, 'Powered', enable)
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except:
self.logger.debug(traceback.format_exc())
return False
def find_devices(self, uuids):
devices = [BluezDbusDevice(p['object'], self.bluez, self.bus, self.logger) for p in get_bluez_objects(self.bluez, self.bus, DEVICE_INTERFACE, self.path)]
found = []
for device in devices:
for i in device.uuids:
if i in uuids:
found.append(device)
break
return found
def clear_adapter(self):
devices = [BluezDbusDevice(p['object'], self.bluez, self.bus, self.logger) for p in get_bluez_objects(self.bluez, self.bus, DEVICE_INTERFACE, self.path)]
for device in devices:
try:
if device.Connected:
device.device_bg_connect(False)
self.adapter.RemoveDevice(device.device.object_path)
except:
pass
class BluezDbusDevice():
def __init__(self, bluez_obj, bluez, bus, logger=None):
self.logger = logger if logger else logging.getLogger('WeaveBLEMgr')
self.object = bluez_obj
self.device = dbus.Interface(bluez_obj, DEVICE_INTERFACE)
self.device_properties = dbus.Interface(bluez_obj, DBUS_PROPERTIES)
self.path = self.device.object_path
self.device_event = threading.Event()
if self.Name:
self.device_id = uuid.uuid3(uuid.NAMESPACE_DNS, self.Name.encode('utf-8'))
else:
self.device_id = uuid.uuid4()
self.bluez = bluez
self.bus = bus
self.signalReceiver = None
self.path = self.device.object_path
def __del__(self):
self.destroy()
def destroy(self):
self.logger.debug("destroy device")
self.device_unregister_signal()
self.device = None
self.device_properties = None
self.device_event = None
self.device_id = None
self.bluez = None
self.bus = None
self.object = None
self.signalReceiver = None
def device_register_signal(self):
if self.signalReceiver is None:
self.logger.debug("add device signal")
self.signalReceiver = self.bus.add_signal_receiver(self.device_on_prop_changed_cb,
bus_name=BLUEZ_NAME,
dbus_interface=DBUS_PROPERTIES,
signal_name="PropertiesChanged",
path=self.path)
def device_unregister_signal(self):
if self.signalReceiver is not None:
self.logger.debug("remove device signal")
self.bus.remove_signal_receiver(self.signalReceiver,
signal_name="PropertiesChanged",
dbus_interface=DBUS_PROPERTIES)
def device_on_prop_changed_cb(self, interface, changed_properties, invalidated_properties):
if len(changed_properties) == 0:
self.logger.debug( "changed_properties is empty")
return
if len(invalidated_properties) > 0:
self.logger.debug( "invalidated_properties is not empty %s" % str(invalidated_properties))
return
if interface == DEVICE_INTERFACE:
if 'Connected' in changed_properties:
self.device_event.set()
def device_bg_connect(self, enable):
time.sleep(bleScanConnectGuardSec)
action_flag = False
self.device_event.clear()
try:
if enable:
if not self.Connected:
action_flag = True
self.device.Connect()
self.logger.info("BLE connecting")
else:
self.logger.info("BLE has connected")
else:
if self.Connected:
action_flag = True
self.device.Disconnect()
self.logger.info("BLE disconnected")
else:
self.logger.info("BLE has disconnected")
if action_flag:
if not self.device_event.wait(bleStatusTransitionTimeoutSec):
if enable:
self.logger.info("BLE connect error")
else:
self.logger.info("BLE disconnect error")
self.device_event.clear()
except dbus.exceptions.DBusException as ex:
self.device_event.clear()
self.logger.info(str(ex))
except:
self.logger.debug(traceback.format_exc())
def service_discover(self, gatt_dic):
self.logger.info('Discovering services')
try:
expired = time.time() + bleSeviceDiscoveryTimeoutSec
while time.time() < expired:
if self.ServicesResolved:
services = [BluezDbusGattService(p['object'], self.bluez, self.bus, self.logger) for p in get_bluez_objects(self.bluez, self.bus, SERVICE_INTERFACE, self.path)]
for service in services:
if service.uuid in gatt_dic['services']:
self.logger.info("Service discovering success")
return service
time.sleep(bleIdleDelta)
self.logger.error("Service discovering fail")
return None
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def uuids(self):
try:
uuids = self.device_properties.Get(DEVICE_INTERFACE, 'UUIDs')
uuid_result = []
for i in uuids:
if len(str(i)) == 4:
uuid_normal = '0000%s-0000-0000-0000-000000000000' % i
else:
uuid_normal = i
uuid_result.append(uuid.UUID(str(uuid_normal)))
return uuid_result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def Address(self):
try:
return self.device_properties.Get(DEVICE_INTERFACE, 'Address')
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def Name(self):
try:
name = self.device_properties.Get(DEVICE_INTERFACE, 'Name')
return name
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def Connected(self):
try:
result = self.device_properties.Get(DEVICE_INTERFACE, 'Connected')
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except:
self.logger.debug(traceback.format_exc())
return False
@property
def TxPower(self):
try:
return self.device_properties.Get(DEVICE_INTERFACE, 'TxPower')
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def RSSI(self):
try:
result = self.device_properties.Get(DEVICE_INTERFACE, 'RSSI')
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def Adapter(self):
try:
return self.device_properties.Get(DEVICE_INTERFACE, 'Adapter')
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def ServiceData(self):
try:
return self.device_properties.Get(DEVICE_INTERFACE, 'ServiceData')
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def ServicesResolved(self):
try:
result = self.device_properties.Get(DEVICE_INTERFACE, 'ServicesResolved')
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except:
self.logger.debug(traceback.format_exc())
return False
class BluezDbusGattService():
def __init__(self, bluez_obj, bluez, bus, logger=None):
self.logger = logger if logger else logging.getLogger('WeaveBLEMgr')
self.object = bluez_obj
self.service = dbus.Interface(bluez_obj, SERVICE_INTERFACE)
self.service_properties = dbus.Interface(bluez_obj, DBUS_PROPERTIES)
self.bluez = bluez
self.bus = bus
self.path = self.service.object_path
def __del__(self):
self.destroy()
def destroy(self):
self.logger.debug("destroy GattService")
self.service = None
self.service_properties = None
self.bluez = None
self.bus = None
self.object = None
self.path = None
@property
def uuid(self):
try:
result = uuid.UUID(str(self.service_properties.Get(SERVICE_INTERFACE, 'UUID')))
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def Primary(self):
try:
result =bool(self.service_properties.Get(SERVICE_INTERFACE, 'Primary'))
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except:
self.logger.debug(traceback.format_exc())
return False
@property
def Device(self):
try:
result = self.service_properties.Get(SERVICE_INTERFACE, 'Device')
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
def find_characteristic(self, uuid):
try:
expired = time.time() + bleCharDiscoveryTimeoutSec
while time.time() < expired:
characteristics = [BluezDbusGattCharacteristic(p['object'], self.bluez, self.bus, self.logger) for p in get_bluez_objects(self.bluez, self.bus, CHARACTERISTIC_INTERFACE, self.path)]
for characteristic in characteristics:
if characteristic.uuid == uuid:
return characteristic
time.sleep(bleIdleDelta)
self.logger.error("Char discovering fail")
return None
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
class BluezDbusGattCharacteristic():
def __init__(self, bluez_obj, bluez, bus, logger=None):
self.logger = logger if logger else logging.getLogger('WeaveBLEMgr')
self.object = bluez_obj
self.characteristic = dbus.Interface(bluez_obj, CHARACTERISTIC_INTERFACE)
self.characteristic_properties = dbus.Interface(bluez_obj, DBUS_PROPERTIES)
self.received = None
self.path = self.characteristic.object_path
self.bluez = bluez
self.bus = bus
self.signalReceiver = None
def __del__(self):
self.destroy()
def destroy(self):
self.logger.debug("destroy GattCharacteristic")
self.gattCharacteristic_unregister_signal()
self.characteristic = None
self.object = None
self.characteristic_properties = None
self.received = None
self.bluez = None
self.bus = None
self.path = None
self.signalReceiver = None
def gattCharacteristic_register_signal(self):
if not self.signalReceiver:
self.logger.debug("add GattCharacteristic signal")
self.signalReceiver = self.bus.add_signal_receiver(self.gatt_on_characteristic_changed_cb,
bus_name=BLUEZ_NAME,
dbus_interface=DBUS_PROPERTIES,
signal_name="PropertiesChanged",
path=self.path)
def gattCharacteristic_unregister_signal(self):
if self.signalReceiver:
self.logger.debug("remove GattCharacteristic signal")
self.bus.remove_signal_receiver(self.signalReceiver,
bus_name=BLUEZ_NAME,
signal_name="PropertiesChanged",
dbus_interface=DBUS_PROPERTIES,
path=self.path)
self.signalReceiver = None
def gatt_on_characteristic_changed_cb(self, interface, changed_properties, invalidated_properties):
self.logger.debug("property change in" + str(self.characteristic) + str(changed_properties))
if len(changed_properties) == 0:
return
if len(invalidated_properties) > 0:
return
if interface == CHARACTERISTIC_INTERFACE:
if 'Value' in changed_properties:
if self.received:
self.received(changed_properties['Value'])
def WriteValue(self, value, options, reply_handler, error_handler, timeout):
try:
self.characteristic.WriteValue(value, options, reply_handler=reply_handler, error_handler=error_handler, timeout=timeout)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
except:
self.logger.debug(traceback.format_exc())
@property
def uuid(self):
try:
result = uuid.UUID(str(self.characteristic_properties.Get(CHARACTERISTIC_INTERFACE, 'UUID')))
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
def StartNotify(self, cbfunct, reply_handler, error_handler, timeout):
try:
if not cbfunct:
self.logger.info("please provide the notify callback function")
self.received = cbfunct
self.gattCharacteristic_register_signal()
self.characteristic.StartNotify(reply_handler=reply_handler, error_handler=error_handler, timeout=timeout)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
except:
self.logger.debug(traceback.format_exc())
def StopNotify(self, reply_handler, error_handler, timeout):
try:
self.logger.debug("stopping notifying")
self.characteristic.StopNotify(reply_handler=reply_handler, error_handler=error_handler, timeout=timeout)
self.gattCharacteristic_unregister_signal()
self.received = None
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
except:
self.logger.debug(traceback.format_exc())
@property
def Notifying(self):
try:
result = self.characteristic_properties.Get(CHARACTERISTIC_INTERFACE, 'Notifying')
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except:
self.logger.debug(traceback.format_exc())
return False
class BluezManager(WeaveBleBase):
def __init__(self, devMgr, logger=None):
if logger:
self.logger = logger
else:
self.logger = logging.getLogger('WeaveBLEMgr')
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
self.scan_quiet= False
self.peripheral_list = []
self.weave_queue = Queue.Queue()
self.Gmainloop = None
self.daemon_thread = None
self.adapter = None
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
GObject.threads_init()
dbus.mainloop.glib.threads_init()
self.bus = dbus.SystemBus()
self.bluez = dbus.Interface(self.bus.get_object(BLUEZ_NAME, '/'), 'org.freedesktop.DBus.ObjectManager')
self.target = None
self.service = None
self.orig_input_hook = None
self.hookFuncPtr = None
self.connect_state = False
self.loop_condition = True
self.tx = None
self.rx = None
self.setInputHook(self.readlineCB)
self.devMgr = devMgr
self.devMgr.SetBlockingCB(self.devMgrCB)
def HandleBleEventCB():
return self.GetBleEvent()
def HandleBleWriteCharCB(connObj, svcId, charId, buffer, length):
return self.WriteBleCharacteristic(connObj, svcId, charId, buffer, length)
def HandleBleSubscribeCB(connObj, svcId, charId, subscribe):
return self.SubscribeBleCharacteristic(connObj, svcId, charId, subscribe)
def HandleBleCloseCB(connObj):
return self.CloseBle(connObj)
self.devMgr.SetBleEventCB(HandleBleEventCB)
self.devMgr.SetBleWriteCharCB(HandleBleWriteCharCB)
self.devMgr.SetBleSubscribeCharCB(HandleBleSubscribeCB)
self.devMgr.SetBleCloseCB(HandleBleCloseCB)
def __del__(self):
self.disconnect()
self.setInputHook(self.orig_input_hook)
self.devMgr.SetBlockingCB(None)
self.devMgr.SetBleEventCB(None)
def ble_adapter_select(self, identifier=None):
if self.adapter:
self.adapter.destroy()
self.adapter = None
self.adapter = self.get_adapter_by_addr(identifier)
self.adapter.adapter_register_signal()
self.adapter.Powered(False)
self.adapter.Powered(True)
def ble_adapter_print(self):
try:
adapters = [BluezDbusAdapter(p['object'], self.bluez, self.bus, self.logger) for p in get_bluez_objects(self.bluez, self.bus, ADAPTER_INTERFACE, '/org/bluez')]
for i in range(len(adapters)):
self.logger.info("adapter %s = %s" % (i, adapters[i].Address))
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
def get_adapter_by_addr(self, identifier):
try:
adapters = [BluezDbusAdapter(p['object'], self.bluez, self.bus, self.logger) for p in get_bluez_objects(self.bluez, self.bus, ADAPTER_INTERFACE, '/org/bluez')]
if identifier is None:
return adapters[0]
if len(adapters) > 0:
for adapter in adapters:
if str(adapter.Address).upper() == str(identifier).upper():
return adapter
self.logger.info("adapter %s cannot be found, expect the ble mac address" % (identifier))
return None
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
def stop_thread(self, userData):
self.logger.info("stop_thread")
self.timeout_happen = True
self.loop_condition = False
self.Gmainloop.quit()
return False
def runLoopUntil(self, target=None, **kwargs):
if target:
self.daemon_thread = threading.Thread(target=self.running_thread, args=(target, kwargs))
self.daemon_thread.daemon = True
self.daemon_thread.start()
self.timeout_happen = False
if kwargs and 'timeout' in kwargs:
self.source_id = GObject.timeout_add(kwargs['timeout'] * secondsToMilliseconds, self.stop_thread, None)
try:
self.Gmainloop = GObject.MainLoop()
self.Gmainloop.run()
if kwargs and 'timeout' in kwargs:
if not self.timeout_happen:
GObject.source_remove(self.source_id)
except KeyboardInterrupt:
self.loop_condition = False
self.Gmainloop.quit()
sys.exit(1)
def running_thread(self, target, kwargs):
try:
while not self.Gmainloop or not self.Gmainloop.is_running():
time.sleep(0.00001)
target(**kwargs)
except Exception, err:
traceback.print_exc()
finally:
self.Gmainloop.quit()
def setInputHook(self, hookFunc):
"""Set the PyOS_InputHook to call the specific function."""
hookFunctionType = CFUNCTYPE(None)
self.hookFuncPtr = hookFunctionType(hookFunc)
pyos_inputhook_ptr = c_void_p.in_dll(pythonapi, "PyOS_InputHook")
# save the original so that on del we can revert it back to the way it was.
self.orig_input_hook = cast(pyos_inputhook_ptr.value, PYFUNCTYPE(c_int))
# set the new hook. readLine will call this periodically as it polls for input.
pyos_inputhook_ptr.value = cast(self.hookFuncPtr, c_void_p).value
def runIdleLoop(self, **kwargs):
time.sleep(0)
pass
def devMgrCB(self):
self.runLoopUntil(self.runIdleLoop)
def readlineCB(self):
self.runLoopUntil(self.runIdleLoop)
if self.orig_input_hook:
self.orig_input_hook()
def Usage(self, cmd):
line = "USAGE: "
if cmd == "scan":
line += "ble-scan [-t <timeout>] [<name>|<identifier>] [-q quiet]"
elif cmd == "scan-connect":
line += "ble-scan-connect [-t <timeout>] <name> [-q quiet]"
self.logger.info(line)
def scan_connect(self, line):
""" API to perform both scan and connect operations in one call."""
args = self.ParseInputLine(line, "scan-connect")
if not args:
return False
if not self.adapter:
self.logger.info("use default adapter")
self.ble_adapter_select()
self.scan_quiet= args[1]
self.scan(line)
if self.target:
return self.connect(args[2])
else:
self.logger.info("Failed to scan device named: " + args[2] + ". Connection skipped.")
return False
def ParseInputLine(self, line, cmd=None):
if cmd == "scan" or cmd == "scan-connect":
args = shlex.split(line)
optParser = OptionParser(usage=optparse.SUPPRESS_USAGE)
optParser.add_option("-t", "--timeout", action="store", dest="timeout", type="float", default=bleScanDefaultTimeoutSec)
optParser.add_option("-q", "--quiet", action="store_true", dest="quiet")
try:
(options, remainingArgs) = optParser.parse_args(args)
except SystemExit:
self.Usage(cmd)
return None
if len(remainingArgs) > 1:
self.Usage(cmd)
return None
name = None
if len(remainingArgs):
name = str(remainingArgs[0])
elif cmd == "scan-connect":
self.Usage(cmd)
return None
return (options.timeout, options.quiet, name)
else:
args = shlex.split(line)
optParser = OptionParser(usage=optparse.SUPPRESS_USAGE)
try:
(options, remainingArgs) = optParser.parse_args(args)
except SystemExit:
return None
return remainingArgs
def dump_scan_result(self, device):
self.logger.info("{0:<10}{1}".format("Name =", device.Name))
self.logger.info("{0:<10}{1}".format("ID =", device.device_id))
self.logger.info("{0:<10}{1}".format("RSSI =", device.RSSI))
self.logger.info("{0:<10}{1}".format("address =", device.Address))
self.logger.info("ADV data: " + ("".join([str(i) for i in dict(device.ServiceData).keys()])) if device.ServiceData else '')
self.logger.info("")
def scan_bg_implementation(self, **kwargs):
self.adapter.clear_adapter()
with self.weave_queue.mutex:
self.weave_queue.queue.clear()
self.adapter.adapter_bg_scan(True)
found = False
identifier = kwargs['identifier']
while True:
if not self.loop_condition:
break
self.peripheral_list = self.adapter.find_devices([weave_service, weave_service_short])
for device in self.peripheral_list:
try:
if not self.scan_quiet:
# display all scanned results
self.dump_scan_result(device)
if device.Name == identifier or str(device.Address).upper() == str(identifier.upper()):
if self.scan_quiet:
# only display the scanned target's info when quiet
self.dump_scan_result(device)
self.target = device
found = True
break
except:
pass
if found:
break
time.sleep(bleIdleDelta)
self.adapter.adapter_bg_scan(False)
self.Gmainloop.quit()
def scan(self, line):
args = self.ParseInputLine(line, "scan")
if not args:
return False
self.target = None
if not self.adapter:
self.logger.info("use default adapter")
self.ble_adapter_select()
del self.peripheral_list[:]
self.scan_quiet= args[1]
self.loop_condition = True
self.runLoopUntil(self.scan_bg_implementation, timeout=args[0], identifier=args[2])
return True
def weaveServieCharConnect(self):
gatt_dic={'services': [weave_service, weave_service_short], 'chars': [weave_tx, weave_rx]}
self.service = self.target.service_discover(gatt_dic)
if self.service is None:
self.logger.info("weave service cannot be found")
return False
self.rx = self.service.find_characteristic(weave_rx)
if self.rx is None:
self.logger.info("weave rx char cannot be found")
return False
self.tx = self.service.find_characteristic(weave_tx)
if self.tx is None:
self.logger.info("weave tx char cannot be found")
self.connect_state = False
return False
return True
def connect_bg_implementation(self, **kwargs):
self.connect_state = False
if self.adapter is None:
self.logger.info("adapter is not configured")
return
self.target.device_register_signal()
self.target.device_bg_connect(True)
if self.weaveServieCharConnect():
self.logger.info("connect success")
self.connect_state = True
else:
self.logger.info("connect fail")
self.connect_state = False
def disconnect_bg_implementation(self, **kwargs):
if self.target:
self.target.device_bg_connect(False)
if self.tx:
self.tx.destroy()
self.tx = None
if self.rx:
self.rx.destroy()
self.rx = None
if self.service:
self.service.destroy()
self.service = None
def connect(self, identifier):
self.loop_condition = True
found = False
self.logger.info("trying to connect to " + identifier)
for p in self.peripheral_list:
p_id = str(p.device_id)
p_name = str(p.Name)
p_address = str(p.Address)
self.logger.debug(p_id + " vs " + str(identifier))
self.logger.debug(p_name + " vs " + str(identifier))
self.logger.debug(p_address + " vs " + str(identifier))
if p_id == str(identifier) or p_name == str(identifier) or p_address.upper() == str(identifier).upper():
self.target = p
found = True
break
if found:
self.runLoopUntil(self.connect_bg_implementation, identifier = identifier, timeout=bleConnectTimeoutSec)
if self.connect_state:
return True
else:
return False
else:
print "device cannot be found"
return False
def disconnect(self):
self.loop_condition = True
self.runLoopUntil(self.disconnect_bg_implementation)
for i in range(2):
n = gc.collect()
self.logger.debug("Unreached objects: %d", n)
self.logger.debug("Final Garbage:")
self.logger.debug(pprint.pformat(gc.garbage))
def WriteCharactertisticSuccessCB(self, *args):
self.logger.debug("write complete")
if self.devMgr:
txEvent = BleTxEvent(charId=self.charId_tx, svcId=self.svcId_tx, status=True)
self.weave_queue.put(txEvent)
self.devMgr.DriveBleIO()
def WriteCharactertisticErrorCB(self, *args):
self.logger.debug("write fail, error:" + repr(args))
if self.devMgr:
txEvent = BleTxEvent(charId=self.charId_tx, svcId=self.svcId_tx, status=False)
self.weave_queue.put(txEvent)
self.devMgr.DriveBleIO()
def WriteBleCharacteristic(self, connObj, svcId, charId, buffer, length):
self.logger.debug("write start")
result = False
if self.target and self.target.Connected:
converted_data = str(_VoidPtrToByteArray(buffer, length))
self.charId_tx = bytearray(uuid.UUID(str(_VoidPtrToUUIDString(charId, 16))).bytes)
self.svcId_tx = bytearray(uuid.UUID(str(_VoidPtrToUUIDString(svcId, 16))).bytes)
self.tx.WriteValue(dbus.Array([dbus.Byte(ord(i)) for i in converted_data], 'y'),
options="",
reply_handler=self.WriteCharactertisticSuccessCB,
error_handler=self.WriteCharactertisticErrorCB,
timeout=bleWriteCharacteristicTimeoutSec)
result = True
else:
self.logger.warning("WARNING: peripheral is no longer connected.")
return result
def receivedNotificationCB(self, data):
self.logger.debug("received data")
bytes = bytearray(data)
if self.devMgr:
rxEvent = BleRxEvent(charId=self.charId_rx, svcId=self.svcId_rx, buffer=bytes)
self.weave_queue.put(rxEvent)
self.devMgr.DriveBleIO()
def subscribeSuccessCb(self, *args):
self.logger.debug("subscribe complete")
if self.rx.Notifying:
success = True
else:
success = False
operation = BleSubscribeOperation_Subscribe
if self.devMgr:
subscribeEvent = BleSubscribeEvent(charId=self.charId_rx,
svcId=self.svcId_rx,
status=success,
operation=operation)
self.weave_queue.put(subscribeEvent)
self.devMgr.DriveBleIO()
def subscribeErrorCb(self, *args):
self.logger.error("subscribe fail, error:" + repr(args))
success = False
operation = BleSubscribeOperation_Subscribe
if self.devMgr:
subscribeEvent = BleSubscribeEvent(charId=self.charId_rx,
svcId=self.svcId_rx,
status=success,
operation=operation)
self.weave_queue.put(subscribeEvent)
self.devMgr.DriveBleIO()
def unsubscribeSuccessCb(self, *args):
self.logger.debug("unsubscribe complete")
success = True
operation = BleSubscribeOperation_Unsubscribe
if self.devMgr:
subscribeEvent = BleSubscribeEvent(charId=self.charId_rx,
svcId=self.svcId_rx,
status=success,
operation=operation)
self.weave_queue.put(subscribeEvent)
self.devMgr.DriveBleIO()
def unsubscribeErrorCb(self, *args):
self.logger.error("unsubscribe fail, error:" + repr(args))
success = False
operation = BleSubscribeOperation_Unsubscribe
if self.devMgr:
subscribeEvent = BleSubscribeEvent(charId=self.charId_rx,
svcId=self.svcId_rx,
status=success,
operation=operation)
self.weave_queue.put(subscribeEvent)
self.devMgr.DriveBleIO()
def SubscribeBleCharacteristic(self, connObj, svcId, charId, subscribe):
result = False
self.charId_rx = bytearray(uuid.UUID(_VoidPtrToUUIDString(charId, 16)).bytes)
self.svcId_rx = bytearray(uuid.UUID(str(_VoidPtrToUUIDString(svcId, 16))).bytes)
if self.target and self.target.Connected:
try:
if subscribe:
self.logger.debug("try to subscribe")
self.rx.StartNotify(cbfunct=self.receivedNotificationCB,
reply_handler=self.subscribeSuccessCb,
error_handler=self.subscribeErrorCb,
timeout=bleSubscribeTimeoutSec)
else:
self.logger.debug("try to unsubscribe")
self.rx.StopNotify(reply_handler=self.unsubscribeSuccessCb,
error_handler=self.unsubscribeErrorCb,
timeout=bleSubscribeTimeoutSec)
except:
self.logger.debug(traceback.format_exc())
self.logger.debug("(un)subscribe error")
result = True
else:
self.logger.warning("WARNING: peripheral is no longer connected.")
return result
def GetBleEvent(self):
""" Called by WeaveDeviceMgr.py on behalf of Weave to retrieve a queued message."""
if not self.weave_queue.empty():
ev = self.weave_queue.get()
if isinstance(ev, BleRxEvent):
eventStruct = BleRxEventStruct.fromBleRxEvent(ev)
return cast( pointer(eventStruct), c_void_p).value
elif isinstance(ev, BleTxEvent):
eventStruct = BleTxEventStruct.fromBleTxEvent(ev)
return cast( pointer(eventStruct), c_void_p).value
elif isinstance(ev, BleSubscribeEvent):
eventStruct = BleSubscribeEventStruct.fromBleSubscribeEvent(ev)
return cast( pointer(eventStruct), c_void_p).value
elif isinstance(ev, BleDisconnectEvent):
eventStruct = BleDisconnectEventStruct.fromBleDisconnectEvent(ev)
return cast( pointer(eventStruct), c_void_p).value
return None
def ble_debug_log(self, line):
args = self.ParseInputLine(line)
if int(args[0]) == 1:
self.logger.setLevel(logging.DEBUG)
self.logger.debug("current logging level is debug")
else:
self.logger.setLevel(logging.INFO)
self.logger.debug("current logging level is info")
return True
def CloseBle(self, connObj):
""" Called by Weave to close the BLE connection."""
# Workaround: comment out disconnect because of hang when close, plz call disconnect explicitly after close
# Need to fix it
# self.disconnect()
if self.devMgr:
dcEvent = BleDisconnectEvent(BLE_ERROR_REMOTE_DEVICE_DISCONNECTED)
self.weave_queue.put(dcEvent)
self.devMgr.DriveBleIO()
return True
|
test_submit_multithreaded.py | import cv2
import numpy as np
import pandas as pd
import threading
import Queue
import io
import tensorflow as tf
from tensorflow.python.lib.io import file_io
from tqdm import tqdm
import params
rles = []
def test(output_dir, weights_dir, test_dir):
"""
Tests the given model and generates a csv containing segmentation masks compressed using rle
Parameters:
output_dir -- directory path to store output csv
weights_dir -- directory path to load weights of given model
test_dir -- directory path containing test images
"""
batch_size = params.batch_size
model = params.model_factory()
# Copy file from gcs to local directory
with file_io.FileIO(weights_dir, mode='r') as input_f:
with file_io.FileIO('weights.hdf5', mode='w+') as output_f:
output_f.write(input_f.read())
model.load_weights(filepath='weights.hdf5')
graph = tf.get_default_graph()
with file_io.FileIO(test_dir + '/sample_submission.csv', mode='r') as f:
csv_bytes = f.read()
df_test = pd.read_csv(io.BytesIO(csv_bytes))
ids_test = df_test['img'].map(lambda s: s.split('.')[0])
names = []
for id in ids_test:
names.append('{}.jpg'.format(id))
q_size = 10
q = Queue.Queue(maxsize=q_size)
t1 = threading.Thread(target=data_loader, name='DataLoader', args=(q, ids_test, test_dir))
t2 = threading.Thread(target=predictor, name='Predictor', args=(q, len(ids_test), graph, model))
print('Predicting on {} samples with batch_size = {}...'.format(len(ids_test), batch_size))
t1.start()
t2.start()
# Wait for both threads to finish
t1.join()
t2.join()
print("Generating submission file...")
df = pd.DataFrame({'img': names, 'rle_mask': rles})
# Copy submission file to gcs
df.to_csv('submission.csv.gz', index=False, compression='gzip')
with file_io.FileIO('submission.csv.gz', mode='r') as input_f:
with file_io.FileIO(output_dir + '/submission.csv.gz', mode='w+') as output_f:
output_f.write(input_f.read())
# https://www.kaggle.com/stainsby/fast-tested-rle
def run_length_encode(mask):
"""
Encodes given mask using run length encoding
"""
inds = mask.flatten()
runs = np.where(inds[1:] != inds[:-1])[0] + 2
runs[1::2] = runs[1::2] - runs[:-1:2]
rle = ' '.join([str(r) for r in runs])
return rle
def data_loader(q, ids_test, test_dir):
"""
Loads images, puts them in a batch, and adds them to a queue
"""
batch_size = params.batch_size
input_size = params.input_size
for start in range(0, len(ids_test), batch_size):
x_batch = []
end = min(start + batch_size, len(ids_test))
ids_test_batch = ids_test[start:end]
for idx in ids_test_batch.values:
with file_io.FileIO(test_dir + '/{}.jpg'.format(idx), mode='r') as f:
image_bytes = f.read()
nparr = np.fromstring(image_bytes, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
input_size = params.input_size
img = cv2.resize(img, (input_size, input_size))
x_batch.append(img)
x_batch = np.array(x_batch, np.float32) / 255
q.put(x_batch)
def predictor(q, len_ids_test, graph, model):
"""
Predicts segmentation masks of given images
"""
batch_size = params.batch_size
orig_width = params.orig_width
orig_height = params.orig_height
for i in tqdm(range(0, len_ids_test, batch_size)):
x_batch = q.get()
with graph.as_default():
preds = model.predict_on_batch(x_batch)
preds = np.squeeze(preds, axis=3)
for pred in preds:
prob = cv2.resize(pred, (orig_width, orig_height))
mask = prob > params.threshold
rle = run_length_encode(mask)
rles.append(rle)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--job-dir',
help='GCS or local paths to test data',
required=True
)
parser.add_argument(
'--weights-dir',
help='GCS location or local paths to weights',
required=True
)
parser.add_argument(
'--output-dir',
help='GCS location to write output',
required=True
)
args = parser.parse_args()
arguments = args.__dict__
output_dir = arguments.pop('output_dir')
weights_dir = arguments.pop('weights_dir')
test_dir = arguments.pop('job_dir')
test(output_dir, weights_dir, test_dir)
|
thread_unsafe.py | # author: leisurexi
# date: 2021/1/24
# file name: thread_unsafe.py
# Python 线程不安全示例
import threading
n = 0
def foo():
global n
for i in range(50000):
n += 1
if __name__ == '__main__':
thread1 = threading.Thread(target=foo)
thread2 = threading.Thread(target=foo)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print(n)
|
ProxyRefreshSchedule.py | # -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
File Name: ProxyRefreshSchedule.py
Description : 代理定时刷新
Author : JHao
date: 2016/12/4
-------------------------------------------------
Change Activity:
2016/12/4: 代理定时刷新
2017/03/06: 使用LogHandler添加日志
2017/04/26: raw_proxy_queue验证通过但useful_proxy_queue中已经存在的代理不在放入
-------------------------------------------------
"""
import sys
import time
import logging
from threading import Thread
# 使用后台调度,不使用阻塞式~
from apscheduler.schedulers.background import BackgroundScheduler as Sch
sys.path.append('../')
from Util.utilFunction import validUsefulProxy
from Manager.ProxyManager import ProxyManager
from Util.LogHandler import LogHandler
__author__ = 'JHao'
logging.basicConfig()
class ProxyRefreshSchedule(ProxyManager):
"""
代理定时刷新
"""
def __init__(self):
ProxyManager.__init__(self)
self.log = LogHandler('refresh_schedule')
def validProxy(self):
"""
验证raw_proxy_queue中的代理, 将可用的代理放入useful_proxy_queue
:return:
"""
self.db.changeTable(self.raw_proxy_queue)
raw_proxy_item = self.db.pop()
self.log.info('ProxyRefreshSchedule: %s start validProxy' % time.ctime())
# 计算剩余代理,用来减少重复计算
remaining_proxies = self.getAll()
while raw_proxy_item:
raw_proxy = raw_proxy_item.get('proxy')
if isinstance(raw_proxy, bytes):
# 兼容Py3
raw_proxy = raw_proxy.decode('utf8')
if (raw_proxy not in remaining_proxies) and validUsefulProxy(raw_proxy):
self.db.changeTable(self.useful_proxy_queue)
self.db.put(raw_proxy)
self.log.info('ProxyRefreshSchedule: %s validation pass' % raw_proxy)
else:
self.log.info('ProxyRefreshSchedule: %s validation fail' % raw_proxy)
self.db.changeTable(self.raw_proxy_queue)
raw_proxy_item = self.db.pop()
remaining_proxies = self.getAll()
self.log.info('ProxyRefreshSchedule: %s validProxy complete' % time.ctime())
def refreshPool():
pp = ProxyRefreshSchedule()
pp.validProxy()
def batch_refresh(process_num=30):
# 检验新代理
pl = []
for num in range(process_num):
proc = Thread(target=refreshPool, args=())
pl.append(proc)
for num in range(process_num):
pl[num].daemon = True
pl[num].start()
for num in range(process_num):
pl[num].join()
def fetch_all():
p = ProxyRefreshSchedule()
# 获取新代理
p.refresh()
def run():
sch = Sch()
sch.add_job(fetch_all, 'interval', minutes=5) # 每5分钟抓取一次
sch.add_job(batch_refresh, "interval", minutes=1) # 每分钟检查一次
sch.start()
fetch_all()
while True:
time.sleep(1)
if __name__ == '__main__':
run()
|
clang_format.py | #!/usr/bin/env python
"""
A script that provides:
1. Ability to grab binaries where possible from LLVM.
2. Ability to download binaries from MongoDB cache for clang-format.
3. Validates clang-format is the right version.
4. Has support for checking which files are to be checked.
5. Supports validating and updating a set of files to the right coding style.
"""
from __future__ import print_function, absolute_import
import Queue
import difflib
import glob
import itertools
import os
import re
import shutil
import string
import subprocess
import sys
import tarfile
import tempfile
import threading
import time
import urllib2
from distutils import spawn
from optparse import OptionParser
from multiprocessing import cpu_count
# Get relative imports to work when the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__)))))
from buildscripts import moduleconfig
##############################################################################
#
# Constants for clang-format
#
#
# Expected version of clang-format
CLANG_FORMAT_VERSION = "3.8.0"
CLANG_FORMAT_SHORT_VERSION = "3.8"
# Name of clang-format as a binary
CLANG_FORMAT_PROGNAME = "clang-format"
# URL location of the "cached" copy of clang-format to download
# for users which do not have clang-format installed
CLANG_FORMAT_HTTP_LINUX_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/clang-format-3.8-rhel55.tar.gz"
CLANG_FORMAT_HTTP_DARWIN_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/clang%2Bllvm-3.8.0-x86_64-apple-darwin.tar.xz"
# Path in the tarball to the clang-format binary
CLANG_FORMAT_SOURCE_TAR_BASE = string.Template("clang+llvm-$version-$tar_path/bin/" + CLANG_FORMAT_PROGNAME)
# Path to the modules in the mongodb source tree
# Has to match the string in SConstruct
MODULE_DIR = "src/mongo/db/modules"
##############################################################################
def callo(args):
"""Call a program, and capture its output
"""
return subprocess.check_output(args)
def get_tar_path(version, tar_path):
""" Get the path to clang-format in the llvm tarball
"""
return CLANG_FORMAT_SOURCE_TAR_BASE.substitute(
version=version,
tar_path=tar_path)
def extract_clang_format(tar_path):
# Extract just the clang-format binary
# On OSX, we shell out to tar because tarfile doesn't support xz compression
if sys.platform == 'darwin':
subprocess.call(['tar', '-xzf', tar_path, '*clang-format*'])
# Otherwise we use tarfile because some versions of tar don't support wildcards without
# a special flag
else:
tarfp = tarfile.open(tar_path)
for name in tarfp.getnames():
if name.endswith('clang-format'):
tarfp.extract(name)
tarfp.close()
def get_clang_format_from_cache_and_extract(url, tarball_ext):
"""Get clang-format from mongodb's cache
and extract the tarball
"""
dest_dir = tempfile.gettempdir()
temp_tar_file = os.path.join(dest_dir, "temp.tar" + tarball_ext)
# Download from file
print("Downloading clang-format %s from %s, saving to %s" % (CLANG_FORMAT_VERSION,
url, temp_tar_file))
# Retry download up to 5 times.
num_tries = 5
for attempt in range(num_tries):
try:
resp = urllib2.urlopen(url)
with open(temp_tar_file, 'wb') as f:
f.write(resp.read())
break
except urllib2.URLError:
if attempt == num_tries - 1:
raise
continue
extract_clang_format(temp_tar_file)
def get_clang_format_from_darwin_cache(dest_file):
"""Download clang-format from llvm.org, unpack the tarball,
and put clang-format in the specified place
"""
get_clang_format_from_cache_and_extract(CLANG_FORMAT_HTTP_DARWIN_CACHE, ".xz")
# Destination Path
shutil.move(get_tar_path(CLANG_FORMAT_VERSION, "x86_64-apple-darwin"), dest_file)
def get_clang_format_from_linux_cache(dest_file):
"""Get clang-format from mongodb's cache
"""
get_clang_format_from_cache_and_extract(CLANG_FORMAT_HTTP_LINUX_CACHE, ".gz")
# Destination Path
shutil.move("build/bin/clang-format", dest_file)
class ClangFormat(object):
"""Class encapsulates finding a suitable copy of clang-format,
and linting/formating an individual file
"""
def __init__(self, path, cache_dir):
self.path = None
clang_format_progname_ext = ""
if sys.platform == "win32":
clang_format_progname_ext += ".exe"
# Check the clang-format the user specified
if path is not None:
if os.path.isfile(path):
self.path = path
else:
print("WARNING: Could not find clang-format %s" % (path))
# Check the environment variable
if "MONGO_CLANG_FORMAT" in os.environ:
self.path = os.environ["MONGO_CLANG_FORMAT"]
if self.path and not self._validate_version():
self.path = None
# Check the users' PATH environment variable now
if self.path is None:
# Check for various versions staring with binaries with version specific suffixes in the
# user's path
programs = [
CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_VERSION,
CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_SHORT_VERSION,
CLANG_FORMAT_PROGNAME,
]
if sys.platform == "win32":
for i in range(len(programs)):
programs[i] += '.exe'
for program in programs:
self.path = spawn.find_executable(program)
if self.path:
if not self._validate_version():
self.path = None
else:
break
# If Windows, try to grab it from Program Files
# Check both native Program Files and WOW64 version
if sys.platform == "win32":
programfiles = [
os.environ["ProgramFiles"],
os.environ["ProgramFiles(x86)"],
]
for programfile in programfiles:
win32bin = os.path.join(programfile, "LLVM\\bin\\clang-format.exe")
if os.path.exists(win32bin):
self.path = win32bin
break
# Have not found it yet, download it from the web
if self.path is None:
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
self.path = os.path.join(cache_dir, CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_VERSION + clang_format_progname_ext)
# Download a new version if the cache is empty or stale
if not os.path.isfile(self.path) or not self._validate_version():
if sys.platform.startswith("linux"):
get_clang_format_from_linux_cache(self.path)
elif sys.platform == "darwin":
get_clang_format_from_darwin_cache(self.path)
else:
print("ERROR: clang-format.py does not support downloading clang-format " +
" on this platform, please install clang-format " + CLANG_FORMAT_VERSION)
# Validate we have the correct version
# We only can fail here if the user specified a clang-format binary and it is the wrong
# version
if not self._validate_version():
print("ERROR: exiting because of previous warning.")
sys.exit(1)
self.print_lock = threading.Lock()
def _validate_version(self):
"""Validate clang-format is the expected version
"""
cf_version = callo([self.path, "--version"])
if CLANG_FORMAT_VERSION in cf_version:
return True
print("WARNING: clang-format found in path, but incorrect version found at " +
self.path + " with version: " + cf_version)
return False
def _lint(self, file_name, print_diff):
"""Check the specified file has the correct format
"""
with open(file_name, 'rb') as original_text:
original_file = original_text.read()
# Get formatted file as clang-format would format the file
formatted_file = callo([self.path, "--style=file", file_name])
if original_file != formatted_file:
if print_diff:
original_lines = original_file.splitlines()
formatted_lines = formatted_file.splitlines()
result = difflib.unified_diff(original_lines, formatted_lines)
# Take a lock to ensure diffs do not get mixed when printed to the screen
with self.print_lock:
print("ERROR: Found diff for " + file_name)
print("To fix formatting errors, run %s --style=file -i %s" %
(self.path, file_name))
for line in result:
print(line.rstrip())
return False
return True
def lint(self, file_name):
"""Check the specified file has the correct format
"""
return self._lint(file_name, print_diff=True)
def format(self, file_name):
"""Update the format of the specified file
"""
if self._lint(file_name, print_diff=False):
return True
# Update the file with clang-format
formatted = not subprocess.call([self.path, "--style=file", "-i", file_name])
# Version 3.8 generates files like foo.cpp~RF83372177.TMP when it formats foo.cpp
# on Windows, we must clean these up
if sys.platform == "win32":
glob_pattern = file_name + "*.TMP"
for fglob in glob.glob(glob_pattern):
os.unlink(fglob)
return formatted
def parallel_process(items, func):
"""Run a set of work items to completion
"""
try:
cpus = cpu_count()
except NotImplementedError:
cpus = 1
task_queue = Queue.Queue()
# Use a list so that worker function will capture this variable
pp_event = threading.Event()
pp_result = [True]
pp_lock = threading.Lock()
def worker():
"""Worker thread to process work items in parallel
"""
while not pp_event.is_set():
try:
item = task_queue.get_nowait()
except Queue.Empty:
# if the queue is empty, exit the worker thread
pp_event.set()
return
try:
ret = func(item)
finally:
# Tell the queue we finished with the item
task_queue.task_done()
# Return early if we fail, and signal we are done
if not ret:
with pp_lock:
pp_result[0] = False
pp_event.set()
return
# Enqueue all the work we want to process
for item in items:
task_queue.put(item)
# Process all the work
threads = []
for cpu in range(cpus):
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
threads.append(thread)
# Wait for the threads to finish
# Loop with a timeout so that we can process Ctrl-C interrupts
# Note: On Python 2.6 wait always returns None so we check is_set also,
# This works because we only set the event once, and never reset it
while not pp_event.wait(1) and not pp_event.is_set():
time.sleep(1)
for thread in threads:
thread.join()
return pp_result[0]
def get_base_dir():
"""Get the base directory for mongo repo.
This script assumes that it is running in buildscripts/, and uses
that to find the base directory.
"""
try:
return subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).rstrip()
except:
# We are not in a valid git directory. Use the script path instead.
return os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_repos():
"""Get a list of Repos to check clang-format for
"""
base_dir = get_base_dir()
# Get a list of modules
# TODO: how do we filter rocks, does it matter?
mongo_modules = moduleconfig.discover_module_directories(
os.path.join(base_dir, MODULE_DIR), None)
paths = [os.path.join(base_dir, MODULE_DIR, m) for m in mongo_modules]
paths.append(base_dir)
return [Repo(p) for p in paths]
class Repo(object):
"""Class encapsulates all knowledge about a git repository, and its metadata
to run clang-format.
"""
def __init__(self, path):
self.path = path
self.root = self._get_root()
def _callgito(self, args):
"""Call git for this repository, and return the captured output
"""
# These two flags are the equivalent of -C in newer versions of Git
# but we use these to support versions pre 1.8.5 but it depends on the command
# and what the current directory is
return callo(['git', '--git-dir', os.path.join(self.path, ".git"),
'--work-tree', self.path] + args)
def _callgit(self, args):
"""Call git for this repository without capturing output
This is designed to be used when git returns non-zero exit codes.
"""
# These two flags are the equivalent of -C in newer versions of Git
# but we use these to support versions pre 1.8.5 but it depends on the command
# and what the current directory is
return subprocess.call(['git', '--git-dir', os.path.join(self.path, ".git"),
'--work-tree', self.path] + args)
def _get_local_dir(self, path):
"""Get a directory path relative to the git root directory
"""
if os.path.isabs(path):
return os.path.relpath(path, self.root)
return path
def get_candidates(self, candidates):
"""Get the set of candidate files to check by querying the repository
Returns the full path to the file for clang-format to consume.
"""
if candidates is not None and len(candidates) > 0:
candidates = [self._get_local_dir(f) for f in candidates]
valid_files = list(set(candidates).intersection(self.get_candidate_files()))
else:
valid_files = list(self.get_candidate_files())
# Get the full file name here
valid_files = [os.path.normpath(os.path.join(self.root, f)) for f in valid_files]
return valid_files
def get_root(self):
"""Get the root directory for this repository
"""
return self.root
def _get_root(self):
"""Gets the root directory for this repository from git
"""
gito = self._callgito(['rev-parse', '--show-toplevel'])
return gito.rstrip()
def _git_ls_files(self, cmd):
"""Run git-ls-files and filter the list of files to a valid candidate list
"""
gito = self._callgito(cmd)
# This allows us to pick all the interesting files
# in the mongo and mongo-enterprise repos
file_list = [line.rstrip()
for line in gito.splitlines()
if (line.startswith("jstests") or line.startswith("src"))
and not line.startswith("src/third_party/")
and not line.startswith("src/mongo/gotools/")]
files_match = re.compile('\\.(h|cpp|js)$')
file_list = [a for a in file_list if files_match.search(a)]
return file_list
def get_candidate_files(self):
"""Query git to get a list of all files in the repo to consider for analysis
"""
return self._git_ls_files(["ls-files", "--cached"])
def get_working_tree_candidate_files(self):
"""Query git to get a list of all files in the working tree to consider for analysis
"""
return self._git_ls_files(["ls-files", "--cached", "--others"])
def get_working_tree_candidates(self):
"""Get the set of candidate files to check by querying the repository
Returns the full path to the file for clang-format to consume.
"""
valid_files = list(self.get_working_tree_candidate_files())
# Get the full file name here
valid_files = [os.path.normpath(os.path.join(self.root, f)) for f in valid_files]
# Filter out files that git thinks exist but were removed.
valid_files = [f for f in valid_files if os.path.exists(f)]
return valid_files
def is_detached(self):
"""Is the current working tree in a detached HEAD state?
"""
# symbolic-ref returns 1 if the repo is in a detached HEAD state
return self._callgit(["symbolic-ref", "--quiet", "HEAD"])
def is_ancestor(self, parent, child):
"""Is the specified parent hash an ancestor of child hash?
"""
# merge base returns 0 if parent is an ancestor of child
return not self._callgit(["merge-base", "--is-ancestor", parent, child])
def is_commit(self, sha1):
"""Is the specified hash a valid git commit?
"""
# cat-file -e returns 0 if it is a valid hash
return not self._callgit(["cat-file", "-e", "%s^{commit}" % sha1])
def is_working_tree_dirty(self):
"""Does the current working tree have changes?
"""
# diff returns 1 if the working tree has local changes
return self._callgit(["diff", "--quiet"])
def does_branch_exist(self, branch):
"""Does the branch exist?
"""
# rev-parse returns 0 if the branch exists
return not self._callgit(["rev-parse", "--verify", branch])
def get_merge_base(self, commit):
"""Get the merge base between 'commit' and HEAD
"""
return self._callgito(["merge-base", "HEAD", commit]).rstrip()
def get_branch_name(self):
"""Get the current branch name, short form
This returns "master", not "refs/head/master"
Will not work if the current branch is detached
"""
branch = self.rev_parse(["--abbrev-ref", "HEAD"])
if branch == "HEAD":
raise ValueError("Branch is currently detached")
return branch
def add(self, command):
"""git add wrapper
"""
return self._callgito(["add"] + command)
def checkout(self, command):
"""git checkout wrapper
"""
return self._callgito(["checkout"] + command)
def commit(self, command):
"""git commit wrapper
"""
return self._callgito(["commit"] + command)
def diff(self, command):
"""git diff wrapper
"""
return self._callgito(["diff"] + command)
def log(self, command):
"""git log wrapper
"""
return self._callgito(["log"] + command)
def rev_parse(self, command):
"""git rev-parse wrapper
"""
return self._callgito(["rev-parse"] + command).rstrip()
def rm(self, command):
"""git rm wrapper
"""
return self._callgito(["rm"] + command)
def show(self, command):
"""git show wrapper
"""
return self._callgito(["show"] + command)
def get_list_from_lines(lines):
""""Convert a string containing a series of lines into a list of strings
"""
return [line.rstrip() for line in lines.splitlines()]
def get_files_to_check_working_tree():
"""Get a list of files to check form the working tree.
This will pick up files not managed by git.
"""
repos = get_repos()
valid_files = list(itertools.chain.from_iterable([r.get_working_tree_candidates() for r in repos]))
return valid_files
def get_files_to_check():
"""Get a list of files that need to be checked
based on which files are managed by git.
"""
repos = get_repos()
valid_files = list(itertools.chain.from_iterable([r.get_candidates(None) for r in repos]))
return valid_files
def get_files_to_check_from_patch(patches):
"""Take a patch file generated by git diff, and scan the patch for a list of files to check.
"""
candidates = []
# Get a list of candidate_files
check = re.compile(r"^diff --git a\/([\w\/\.\-]+) b\/[\w\/\.\-]+")
lines = []
for patch in patches:
with open(patch, "rb") as infile:
lines += infile.readlines()
candidates = [check.match(line).group(1) for line in lines if check.match(line)]
repos = get_repos()
valid_files = list(itertools.chain.from_iterable([r.get_candidates(candidates) for r in repos]))
return valid_files
def _get_build_dir():
"""Get the location of the scons' build directory in case we need to download clang-format
"""
return os.path.join(get_base_dir(), "build")
def _lint_files(clang_format, files):
"""Lint a list of files with clang-format
"""
clang_format = ClangFormat(clang_format, _get_build_dir())
lint_clean = parallel_process([os.path.abspath(f) for f in files], clang_format.lint)
if not lint_clean:
print("ERROR: Code Style does not match coding style")
sys.exit(1)
def lint_patch(clang_format, infile):
"""Lint patch command entry point
"""
files = get_files_to_check_from_patch(infile)
# Patch may have files that we do not want to check which is fine
if files:
_lint_files(clang_format, files)
def lint(clang_format):
"""Lint files command entry point
"""
files = get_files_to_check()
_lint_files(clang_format, files)
return True
def lint_all(clang_format):
"""Lint files command entry point based on working tree
"""
files = get_files_to_check_working_tree()
_lint_files(clang_format, files)
return True
def _format_files(clang_format, files):
"""Format a list of files with clang-format
"""
clang_format = ClangFormat(clang_format, _get_build_dir())
format_clean = parallel_process([os.path.abspath(f) for f in files], clang_format.format)
if not format_clean:
print("ERROR: failed to format files")
sys.exit(1)
def format_func(clang_format):
"""Format files command entry point
"""
files = get_files_to_check()
_format_files(clang_format, files)
def reformat_branch(clang_format, commit_prior_to_reformat, commit_after_reformat):
"""Reformat a branch made before a clang-format run
"""
clang_format = ClangFormat(clang_format, _get_build_dir())
if os.getcwd() != get_base_dir():
raise ValueError("reformat-branch must be run from the repo root")
if not os.path.exists("buildscripts/clang_format.py"):
raise ValueError("reformat-branch is only supported in the mongo repo")
repo = Repo(get_base_dir())
# Validate that user passes valid commits
if not repo.is_commit(commit_prior_to_reformat):
raise ValueError("Commit Prior to Reformat '%s' is not a valid commit in this repo" %
commit_prior_to_reformat)
if not repo.is_commit(commit_after_reformat):
raise ValueError("Commit After Reformat '%s' is not a valid commit in this repo" %
commit_after_reformat)
if not repo.is_ancestor(commit_prior_to_reformat, commit_after_reformat):
raise ValueError(("Commit Prior to Reformat '%s' is not a valid ancestor of Commit After" +
" Reformat '%s' in this repo") % (commit_prior_to_reformat, commit_after_reformat))
# Validate the user is on a local branch that has the right merge base
if repo.is_detached():
raise ValueError("You must not run this script in a detached HEAD state")
# Validate the user has no pending changes
if repo.is_working_tree_dirty():
raise ValueError("Your working tree has pending changes. You must have a clean working tree before proceeding.")
merge_base = repo.get_merge_base(commit_prior_to_reformat)
if not merge_base == commit_prior_to_reformat:
raise ValueError("Please rebase to '%s' and resolve all conflicts before running this script" % (commit_prior_to_reformat))
# We assume the target branch is master, it could be a different branch if needed for testing
merge_base = repo.get_merge_base("master")
if not merge_base == commit_prior_to_reformat:
raise ValueError("This branch appears to already have advanced too far through the merge process")
# Everything looks good so lets start going through all the commits
branch_name = repo.get_branch_name()
new_branch = "%s-reformatted" % branch_name
if repo.does_branch_exist(new_branch):
raise ValueError("The branch '%s' already exists. Please delete the branch '%s', or rename the current branch." % (new_branch, new_branch))
commits = get_list_from_lines(repo.log(["--reverse", "--pretty=format:%H", "%s..HEAD" % commit_prior_to_reformat]))
previous_commit_base = commit_after_reformat
files_match = re.compile('\\.(h|cpp|js)$')
# Go through all the commits the user made on the local branch and migrate to a new branch
# that is based on post_reformat commits instead
for commit_hash in commits:
repo.checkout(["--quiet", commit_hash])
deleted_files = []
# Format each of the files by checking out just a single commit from the user's branch
commit_files = get_list_from_lines(repo.diff(["HEAD~", "--name-only"]))
for commit_file in commit_files:
# Format each file needed if it was not deleted
if not os.path.exists(commit_file):
print("Skipping file '%s' since it has been deleted in commit '%s'" % (
commit_file, commit_hash))
deleted_files.append(commit_file)
continue
if files_match.search(commit_file):
clang_format.format(commit_file)
else:
print("Skipping file '%s' since it is not a file clang_format should format" %
commit_file)
# Check if anything needed reformatting, and if so amend the commit
if not repo.is_working_tree_dirty():
print ("Commit %s needed no reformatting" % commit_hash)
else:
repo.commit(["--all", "--amend", "--no-edit"])
# Rebase our new commit on top the post-reformat commit
previous_commit = repo.rev_parse(["HEAD"])
# Checkout the new branch with the reformatted commits
# Note: we will not name as a branch until we are done with all commits on the local branch
repo.checkout(["--quiet", previous_commit_base])
# Copy each file from the reformatted commit on top of the post reformat
diff_files = get_list_from_lines(repo.diff(["%s~..%s" % (previous_commit, previous_commit),
"--name-only"]))
for diff_file in diff_files:
# If the file was deleted in the commit we are reformatting, we need to delete it again
if diff_file in deleted_files:
repo.rm([diff_file])
continue
# The file has been added or modified, continue as normal
file_contents = repo.show(["%s:%s" % (previous_commit, diff_file)])
root_dir = os.path.dirname(diff_file)
if root_dir and not os.path.exists(root_dir):
os.makedirs(root_dir)
with open(diff_file, "w+") as new_file:
new_file.write(file_contents)
repo.add([diff_file])
# Create a new commit onto clang-formatted branch
repo.commit(["--reuse-message=%s" % previous_commit])
previous_commit_base = repo.rev_parse(["HEAD"])
# Create a new branch to mark the hashes we have been using
repo.checkout(["-b", new_branch])
print("reformat-branch is done running.\n")
print("A copy of your branch has been made named '%s', and formatted with clang-format.\n" % new_branch)
print("The original branch has been left unchanged.")
print("The next step is to rebase the new branch on 'master'.")
def usage():
"""Print usage
"""
print("clang-format.py supports 5 commands [ lint, lint-all, lint-patch, format, reformat-branch].")
def main():
"""Main entry point
"""
parser = OptionParser()
parser.add_option("-c", "--clang-format", type="string", dest="clang_format")
(options, args) = parser.parse_args(args=sys.argv)
if len(args) > 1:
command = args[1]
if command == "lint":
lint(options.clang_format)
elif command == "lint-all":
lint_all(options.clang_format)
elif command == "lint-patch":
lint_patch(options.clang_format, args[2:])
elif command == "format":
format_func(options.clang_format)
elif command == "reformat-branch":
if len(args) < 3:
print("ERROR: reformat-branch takes two parameters: commit_prior_to_reformat commit_after_reformat")
return
reformat_branch(options.clang_format, args[2], args[3])
else:
usage()
else:
usage()
if __name__ == "__main__":
main()
|
engine.py | """
"""
import logging
from logging import Logger
import smtplib
import os
from abc import ABC
from datetime import datetime
from email.message import EmailMessage
from queue import Empty, Queue
from threading import Thread
from typing import Any, Sequence, Type, Dict, List, Optional
from vnpy.event import Event, EventEngine
from .app import BaseApp
from .event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG
)
from .gateway import BaseGateway
from .object import (
CancelRequest,
LogData,
OrderRequest,
SubscribeRequest,
HistoryRequest,
OrderData,
BarData,
TickData,
TradeData,
PositionData,
AccountData,
ContractData,
Exchange
)
from .setting import SETTINGS
from .utility import get_folder_path, TRADER_DIR
class MainEngine:
"""
Acts as the core of VN Trader.
"""
def __init__(self, event_engine: EventEngine = None):
""""""
if event_engine:
self.event_engine: EventEngine = event_engine
else:
self.event_engine = EventEngine()
self.event_engine.start()
self.gateways: Dict[str, BaseGateway] = {}
self.engines: Dict[str, BaseEngine] = {}
self.apps: Dict[str, BaseApp] = {}
self.exchanges: List[Exchange] = []
os.chdir(TRADER_DIR) # Change working directory
self.init_engines() # Initialize function engines
def add_engine(self, engine_class: Any) -> "BaseEngine":
"""
Add function engine.
"""
engine = engine_class(self, self.event_engine)
self.engines[engine.engine_name] = engine
return engine
def add_gateway(self, gateway_class: Type[BaseGateway]) -> BaseGateway:
"""
Add gateway.
"""
gateway = gateway_class(self.event_engine)
self.gateways[gateway.gateway_name] = gateway
# Add gateway supported exchanges into engine
for exchange in gateway.exchanges:
if exchange not in self.exchanges:
self.exchanges.append(exchange)
return gateway
def add_app(self, app_class: Type[BaseApp]) -> "BaseEngine":
"""
Add app.
"""
app = app_class()
self.apps[app.app_name] = app
engine = self.add_engine(app.engine_class)
return engine
def init_engines(self) -> None:
"""
Init all engines.
"""
self.add_engine(LogEngine)
self.add_engine(OmsEngine)
self.add_engine(EmailEngine)
def write_log(self, msg: str, source: str = "") -> None:
"""
Put log event with specific message.
"""
log = LogData(msg=msg, gateway_name=source)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def get_gateway(self, gateway_name: str) -> BaseGateway:
"""
Return gateway object by name.
"""
gateway = self.gateways.get(gateway_name, None)
if not gateway:
self.write_log(f"找不到底层接口:{gateway_name}")
return gateway
def get_engine(self, engine_name: str) -> "BaseEngine":
"""
Return engine object by name.
"""
engine = self.engines.get(engine_name, None)
if not engine:
self.write_log(f"找不到引擎:{engine_name}")
return engine
def get_default_setting(self, gateway_name: str) -> Optional[Dict[str, Any]]:
"""
Get default setting dict of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.get_default_setting()
return None
def get_all_gateway_names(self) -> List[str]:
"""
Get all names of gatewasy added in main engine.
"""
return list(self.gateways.keys())
def get_all_apps(self) -> List[BaseApp]:
"""
Get all app objects.
"""
return list(self.apps.values())
def get_all_exchanges(self) -> List[Exchange]:
"""
Get all exchanges.
"""
return self.exchanges
def connect(self, setting: dict, gateway_name: str) -> None:
"""
Start connection of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.connect(setting)
def subscribe(self, req: SubscribeRequest, gateway_name: str) -> None:
"""
Subscribe tick data update of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.subscribe(req)
def send_order(self, req: OrderRequest, gateway_name: str) -> str:
"""
Send new order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_order(req)
else:
return ""
def cancel_order(self, req: CancelRequest, gateway_name: str) -> None:
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_order(req)
def send_orders(self, reqs: Sequence[OrderRequest], gateway_name: str) -> List[str]:
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_orders(reqs)
else:
return ["" for req in reqs]
def cancel_orders(self, reqs: Sequence[CancelRequest], gateway_name: str) -> None:
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_orders(reqs)
def query_history(self, req: HistoryRequest, gateway_name: str) -> Optional[List[BarData]]:
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_history(req)
else:
return None
def close(self) -> None:
"""
Make sure every gateway and app is closed properly before
programme exit.
"""
for engine in self.engines.values():
engine.close()
# Stop event engine first to prevent new timer event.
self.event_engine.stop()
for gateway in self.gateways.values():
gateway.close()
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
engine_name: str,
):
""""""
self.main_engine = main_engine
self.event_engine = event_engine
self.engine_name = engine_name
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(main_engine, event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level: int = SETTINGS["log.level"]
self.logger: Logger = logging.getLogger("VN Trader")
self.logger.setLevel(self.level)
self.formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
if SETTINGS["log.file"]:
self.add_file_handler()
self.register_event()
def add_null_handler(self) -> None:
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self) -> None:
"""
Add console output of log.
"""
console_handler = logging.StreamHandler()
console_handler.setLevel(self.level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def add_file_handler(self) -> None:
"""
Add file output of log.
"""
today_date = datetime.now().strftime("%Y%m%d")
filename = f"vt_{today_date}.log"
log_path = get_folder_path("log")
file_path = log_path.joinpath(filename)
file_handler = logging.FileHandler(
file_path, mode="a", encoding="utf8"
)
file_handler.setLevel(self.level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event) -> None:
"""
Process log event.
"""
log = event.data
self.logger.log(log.level, log.msg)
class OmsEngine(BaseEngine):
"""
Provides order management system function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(OmsEngine, self).__init__(main_engine, event_engine, "oms")
self.ticks: Dict[str, TickData] = {}
self.orders: Dict[str, OrderData] = {}
self.trades: Dict[str, TradeData] = {}
self.positions: Dict[str, PositionData] = {}
self.accounts: Dict[str, AccountData] = {}
self.contracts: Dict[str, ContractData] = {}
self.active_orders: Dict[str, OrderData] = {}
self.add_function()
self.register_event()
def add_function(self) -> None:
"""Add query function to main engine."""
self.main_engine.get_tick = self.get_tick
self.main_engine.get_order = self.get_order
self.main_engine.get_trade = self.get_trade
self.main_engine.get_position = self.get_position
self.main_engine.get_account = self.get_account
self.main_engine.get_contract = self.get_contract
self.main_engine.get_all_ticks = self.get_all_ticks
self.main_engine.get_all_orders = self.get_all_orders
self.main_engine.get_all_trades = self.get_all_trades
self.main_engine.get_all_positions = self.get_all_positions
self.main_engine.get_all_accounts = self.get_all_accounts
self.main_engine.get_all_contracts = self.get_all_contracts
self.main_engine.get_all_active_orders = self.get_all_active_orders
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event) -> None:
""""""
tick = event.data
self.ticks[tick.vt_symbol] = tick
def process_order_event(self, event: Event) -> None:
""""""
order = event.data
self.orders[order.vt_orderid] = order
# If order is active, then update data in dict.
if order.is_active():
self.active_orders[order.vt_orderid] = order
# Otherwise, pop inactive order from in dict
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
def process_trade_event(self, event: Event) -> None:
""""""
trade = event.data
self.trades[trade.vt_tradeid] = trade
def process_position_event(self, event: Event) -> None:
""""""
position = event.data
self.positions[position.vt_positionid] = position
def process_account_event(self, event: Event) -> None:
""""""
account = event.data
self.accounts[account.vt_accountid] = account
def process_contract_event(self, event: Event) -> None:
""""""
contract = event.data
self.contracts[contract.vt_symbol] = contract
def get_tick(self, vt_symbol: str) -> Optional[TickData]:
"""
Get latest market tick data by vt_symbol.
"""
return self.ticks.get(vt_symbol, None)
def get_order(self, vt_orderid: str) -> Optional[OrderData]:
"""
Get latest order data by vt_orderid.
"""
return self.orders.get(vt_orderid, None)
def get_trade(self, vt_tradeid: str) -> Optional[TradeData]:
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, vt_positionid: str) -> Optional[PositionData]:
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(vt_positionid, None)
def get_account(self, vt_accountid: str) -> Optional[AccountData]:
"""
Get latest account data by vt_accountid.
"""
return self.accounts.get(vt_accountid, None)
def get_contract(self, vt_symbol: str) -> Optional[ContractData]:
"""
Get contract data by vt_symbol.
"""
return self.contracts.get(vt_symbol, None)
def get_all_ticks(self) -> List[TickData]:
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self) -> List[OrderData]:
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self) -> List[TradeData]:
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self) -> List[PositionData]:
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self) -> List[AccountData]:
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self) -> List[ContractData]:
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_active_orders(self, vt_symbol: str = "") -> List[OrderData]:
"""
Get all active orders by vt_symbol.
If vt_symbol is empty, return all active orders.
"""
if not vt_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.vt_symbol == vt_symbol
]
return active_orders
class EmailEngine(BaseEngine):
"""
Provides email sending function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(main_engine, event_engine, "email")
self.thread: Thread = Thread(target=self.run)
self.queue: Queue = Queue()
self.active: bool = False
self.main_engine.send_email = self.send_email
def send_email(self, subject: str, content: str, receiver: str = "") -> None:
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = receiver
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self) -> None:
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(
SETTINGS["email.server"], SETTINGS["email.port"]
) as smtp:
smtp.login(
SETTINGS["email.username"], SETTINGS["email.password"]
)
smtp.send_message(msg)
except Empty:
pass
def start(self) -> None:
""""""
self.active = True
self.thread.start()
def close(self) -> None:
""""""
if not self.active:
return
self.active = False
self.thread.join()
|
runDataRecording.py | # encoding: UTF-8
from __future__ import print_function
import multiprocessing
from time import sleep
from datetime import datetime, time
from negociant.event import EventEngine2
from negociant.trader.vtEvent import EVENT_LOG, EVENT_ERROR
from negociant.trader.vtEngine import MainEngine, LogEngine
from negociant.trader.gateway import ctpGateway
from negociant.trader.gateway.ctpGateway import CtpAccount
from negociant.trader.app import dataRecorder
#----------------------------------------------------------------------
def processErrorEvent(event):
"""
处理错误事件
错误信息在每次登陆后,会将当日所有已产生的均推送一遍,所以不适合写入日志
"""
error = event.dict_['data']
print(u'错误代码:%s,错误信息:%s' %(error.errorID, error.errorMsg))
#----------------------------------------------------------------------
def runChildProcess(ctpAccount):
"""子进程运行函数"""
print('-'*20)
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动行情记录运行子进程')
ee = EventEngine2()
le.info(u'事件引擎创建成功')
me = MainEngine(ee)
me.addGateway(ctpGateway, ctpAccount)
me.addApp(dataRecorder)
le.info(u'主引擎创建成功')
ee.register(EVENT_LOG, le.processLogEvent)
ee.register(EVENT_ERROR, processErrorEvent)
le.info(u'注册日志事件监听')
me.connect('CTP')
le.info(u'连接CTP接口')
dr = me.getApp(dataRecorder.appName)
dr.initAll()
while True:
sleep(1)
#----------------------------------------------------------------------
def runParentProcess(CTPFile='CTP_connect'):
"""父进程运行函数"""
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动行情记录守护父进程')
DAY_START = time(8, 57) # 日盘启动和停止时间
DAY_END = time(15, 18)
NIGHT_START = time(20, 57) # 夜盘启动和停止时间
NIGHT_END = time(2, 40)
p = None # 子进程句柄
ctpAccount = CtpAccount(CTPFile)
while True:
currentTime = datetime.now().time()
recording = False
# 判断当前处于的时间段
if ((currentTime >= DAY_START and currentTime <= DAY_END) or
(currentTime >= NIGHT_START) or
(currentTime <= NIGHT_END)):
recording = True
# 过滤周末时间段:周六全天,周五夜盘,周日日盘
if ((datetime.today().weekday() == 6) or
(datetime.today().weekday() == 5 and currentTime > NIGHT_END) or
(datetime.today().weekday() == 0 and currentTime < DAY_START)):
recording = False
# 记录时间则需要启动子进程
if recording and p is None:
le.info(u'启动子进程')
p = multiprocessing.Process(target=runChildProcess, args=(ctpAccount,))
p.start()
le.info(u'子进程启动成功')
# 非记录时间则退出子进程
if not recording and p is not None:
le.info(u'关闭子进程')
p.terminate()
p.join()
p = None
le.info(u'子进程关闭成功')
sleep(5)
if __name__ == '__main__':
# runChildProcess(None)
runParentProcess()
|
detection_input.py | from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
import mxnet as mx
from six.moves.queue import Queue
from threading import Thread
from operator_py.cython.bbox import bbox_overlaps_cython
from operator_py.bbox_transform import nonlinear_transform as bbox_transform
class DetectionAugmentation(object):
def __init__(self):
pass
def apply(self, input_record):
pass
class ReadRoiRecord(DetectionAugmentation):
"""
input: image_url, str
gt_url, str
output: image, ndarray(h, w, rgb)
image_raw_meta, tuple(h, w)
gt, any
"""
def __init__(self, gt_select):
super(ReadRoiRecord, self).__init__()
self.gt_select = gt_select
def apply(self, input_record):
image = cv2.imread(input_record["image_url"], cv2.IMREAD_COLOR)
input_record["image"] = image[:, :, ::-1]
# TODO: remove this compatibility method
input_record["gt_bbox"] = np.concatenate([input_record["gt_bbox"],
input_record["gt_class"].reshape(-1, 1)],
axis=1)
# gt_dict = pkl.load(input_record["gt_url"])
# for s in self.gt_select:
# input_record[s] = gt_dict[s]
class Norm2DImage(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
output: image, ndarray(h, w, rgb)
"""
def __init__(self, pNorm):
super(Norm2DImage, self).__init__()
self.p = pNorm # type: NormParam
def apply(self, input_record):
p = self.p
image = input_record["image"].astype(np.float32)
image -= p.mean
image /= p.std
input_record["image"] = image
class Resize2DImageBbox(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 5)
output: image, ndarray(h', w', rgb)
im_info, tuple(h', w', scale)
gt_bbox, ndarray(n, 5)
"""
def __init__(self, pResize):
super(Resize2DImageBbox, self).__init__()
self.p = pResize # type: ResizeParam
def apply(self, input_record):
p = self.p
image = input_record["image"]
gt_bbox = input_record["gt_bbox"].astype(np.float32)
short = min(image.shape[:2])
long = max(image.shape[:2])
scale = min(p.short / short, p.long / long)
input_record["image"] = cv2.resize(image, None, None, scale, scale,
interpolation=cv2.INTER_LINEAR)
# make sure gt boxes do not overflow
gt_bbox[:, :4] = gt_bbox[:, :4] * scale
if image.shape[0] < image.shape[1]:
gt_bbox[:, [0, 2]] = np.clip(gt_bbox[:, [0, 2]], 0, p.long)
gt_bbox[:, [1, 3]] = np.clip(gt_bbox[:, [1, 3]], 0, p.short)
else:
gt_bbox[:, [0, 2]] = np.clip(gt_bbox[:, [0, 2]], 0, p.short)
gt_bbox[:, [1, 3]] = np.clip(gt_bbox[:, [1, 3]], 0, p.long)
input_record["gt_bbox"] = gt_bbox
# exactly as opencv
h, w = image.shape[:2]
input_record["im_info"] = (round(h * scale), round(w * scale), scale)
class Resize2DImageBboxByRoidb(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 5)
output: image, ndarray(h', w', rgb)
im_info, tuple(h', w', scale)
gt_bbox, ndarray(n, 5)
"""
def __init__(self):
super(Resize2DImageBboxByRoidb, self).__init__()
class ResizeParam:
long = None
short = None
self.resize_aug = Resize2DImageBbox(ResizeParam)
def apply(self, input_record):
self.resize_aug.p.long = input_record["resize_long"]
self.resize_aug.p.short = input_record["resize_short"]
self.resize_aug.apply(input_record)
class RandResize2DImageBbox(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 4)
output: image, ndarray(h', w', rgb)
im_info, tuple(h', w', scale)
gt_bbox, ndarray(n, 4)
"""
def __init__(self, pRandResize):
super(RandResize2DImageBbox, self).__init__()
self.p = pRandResize
class ResizeParam:
long = None
short = None
self.resize_aug = Resize2DImageBbox(ResizeParam)
def apply(self, input_record):
scale_id = np.random.randint(len(self.p.long_ranges))
self.resize_aug.p.long = self.p.long_ranges[scale_id]
self.resize_aug.p.short = self.p.short_ranges[scale_id]
self.resize_aug.apply(input_record)
class Flip2DImageBbox(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 4)
output: image, ndarray(h, w, rgb)
gt_bbox, ndarray(n, 4)
"""
def __init__(self):
super(Flip2DImageBbox, self).__init__()
def apply(self, input_record):
if input_record["flipped"]:
image = input_record["image"]
gt_bbox = input_record["gt_bbox"]
input_record["image"] = image[:, ::-1]
flipped_bbox = gt_bbox.copy()
h, w = image.shape[:2]
flipped_bbox[:, 0] = (w - 1) - gt_bbox[:, 2]
flipped_bbox[:, 2] = (w - 1) - gt_bbox[:, 0]
input_record["gt_bbox"] = flipped_bbox
class RandCrop2DImageBbox(DetectionAugmentation):
def __init__(self, pCrop):
super(RandCrop2DImageBbox, self).__init__()
self.p = pCrop
assert pCrop.mode in ["center", "random"], "The {} crop mode is not supported".format(pCrop.mode)
def apply(self, input_record):
p = self.p
image = input_record["image"]
gt_bbox = input_record["gt_bbox"]
if image.shape[0] >= image.shape[1]:
crop_w, crop_h = p.short, p.long
else:
crop_w, crop_h = p.long, p.short
crop_w = min(crop_w, image.shape[1])
crop_h = min(crop_h, image.shape[0])
if p.mode == "center" and gt_bbox.shape[0] > 0:
# random select a box as cropping center
rand_index = np.random.randint(gt_bbox.shape[0])
box = gt_bbox[rand_index, :]
# decide start point
ctr_x = (box[2] + box[0]) / 2.0
ctr_y = (box[3] + box[1]) / 2.0
noise_h = np.random.randint(-10, 10)
noise_w = np.random.randint(-30, 30)
start_h = int(round(ctr_y - crop_h / 2)) + noise_h
start_w = int(round(ctr_x - crop_w / 2)) + noise_w
end_h = start_h + crop_h
end_w = start_w + crop_w
# prevent crop cross border
if start_h < 0:
off = -start_h
start_h += off
end_h += off
if start_w < 0:
off = -start_w
start_w += off
end_w += off
if end_h > image.shape[0]:
off = end_h - image.shape[0]
end_h -= off
start_h -= off
if end_w > image.shape[1]:
off = end_w - image.shape[1]
end_w -= off
start_w -= off
else:
# random crop from image
start_h = np.random.randint(0, image.shape[0] - crop_h + 1)
start_w = np.random.randint(0, image.shape[1] - crop_w + 1)
end_h = start_h + crop_h
end_w = start_w + crop_w
assert start_h >= 0 and start_w >= 0 and end_h <= image.shape[0] and end_w <= image.shape[1]
# crop then resize
im_cropped = image[start_h:end_h, start_w:end_w, :]
# transform ground truth
ctrs_x = (gt_bbox[:, 2] + gt_bbox[:, 0]) / 2.0
ctrs_y = (gt_bbox[:, 3] + gt_bbox[:, 1]) / 2.0
keep = np.where((ctrs_y > start_h) & (ctrs_x > start_w) & (ctrs_y < end_h) & (ctrs_x < end_w))
gt_bbox = gt_bbox[keep]
gt_bbox[:, [0, 2]] -= start_w
gt_bbox[:, [1, 3]] -= start_h
gt_bbox[:, [0, 2]] = np.clip(gt_bbox[:, [0, 2]], 0, crop_w - 1)
gt_bbox[:, [1, 3]] = np.clip(gt_bbox[:, [1, 3]], 0, crop_h - 1)
input_record["image"] = im_cropped
input_record["gt_bbox"] = gt_bbox
input_record["im_info"] = (crop_h, crop_w, input_record["im_info"][2])
class Pad2DImageBbox(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 5)
output: image, ndarray(h, w, rgb)
gt_bbox, ndarray(max_num_gt, 5)
"""
def __init__(self, pPad):
super(Pad2DImageBbox, self).__init__()
self.p = pPad # type: PadParam
def apply(self, input_record):
p = self.p
image = input_record["image"]
gt_bbox = input_record["gt_bbox"]
h, w = image.shape[:2]
shape = (p.long, p.short, 3) if h >= w \
else (p.short, p.long, 3)
padded_image = np.zeros(shape, dtype=np.float32)
padded_image[:h, :w] = image
padded_gt_bbox = np.full(shape=(p.max_num_gt, 5), fill_value=-1, dtype=np.float32)
padded_gt_bbox[:len(gt_bbox)] = gt_bbox
input_record["image"] = padded_image
input_record["gt_bbox"] = padded_gt_bbox
class ConvertImageFromHwcToChw(DetectionAugmentation):
def __init__(self):
super(ConvertImageFromHwcToChw, self).__init__()
def apply(self, input_record):
input_record["image"] = input_record["image"].transpose((2, 0, 1))
class AnchorTarget2D(DetectionAugmentation):
"""
input: image_meta: tuple(h, w, scale)
gt_bbox, ndarry(max_num_gt, 5)
output: anchor_label, ndarray(num_anchor * 2, h, w)
anchor_bbox_target, ndarray(num_anchor * 4, h, w)
anchor_bbox_weight, ndarray(num_anchor * 4, h, w)
"""
def __init__(self, pAnchor):
super(AnchorTarget2D, self).__init__()
self.p = pAnchor # type: AnchorTarget2DParam
self.__base_anchor = None
self.__v_all_anchor = None
self.__h_all_anchor = None
self.__num_anchor = None
self.DEBUG = False
@property
def base_anchor(self):
if self.__base_anchor is not None:
return self.__base_anchor
p = self.p
base_anchor = np.array([0, 0, p.generate.stride - 1, self.p.generate.stride - 1])
w = base_anchor[2] - base_anchor[0] + 1
h = base_anchor[3] - base_anchor[1] + 1
x_ctr = base_anchor[0] + 0.5 * (w - 1)
y_ctr = base_anchor[1] + 0.5 * (h - 1)
w_ratios = np.round(np.sqrt(w * h / p.generate.aspects))
h_ratios = np.round(w_ratios * p.generate.aspects)
ws = (np.outer(w_ratios, p.generate.scales)).reshape(-1)
hs = (np.outer(h_ratios, p.generate.scales)).reshape(-1)
base_anchor = np.stack(
[x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)],
axis=1)
self.__base_anchor = base_anchor
return self.__base_anchor
@property
def v_all_anchor(self):
if self.__v_all_anchor is not None:
return self.__v_all_anchor
p = self.p
shift_x = np.arange(0, p.generate.short, dtype=np.float32) * p.generate.stride
shift_y = np.arange(0, p.generate.long, dtype=np.float32) * p.generate.stride
grid_x, grid_y = np.meshgrid(shift_x, shift_y)
grid_x, grid_y = grid_x.reshape(-1), grid_y.reshape(-1)
grid = np.stack([grid_x, grid_y, grid_x, grid_y], axis=1)
all_anchor = grid[:, None, :] + self.base_anchor[None, :, :]
all_anchor = all_anchor.reshape(-1, 4)
self.__v_all_anchor = all_anchor
self.__num_anchor = all_anchor.shape[0]
return self.__v_all_anchor
@property
def h_all_anchor(self):
if self.__h_all_anchor is not None:
return self.__h_all_anchor
p = self.p
shift_x = np.arange(0, p.generate.long, dtype=np.float32) * p.generate.stride
shift_y = np.arange(0, p.generate.short, dtype=np.float32) * p.generate.stride
grid_x, grid_y = np.meshgrid(shift_x, shift_y)
grid_x, grid_y = grid_x.reshape(-1), grid_y.reshape(-1)
grid = np.stack([grid_x, grid_y, grid_x, grid_y], axis=1)
all_anchor = grid[:, None, :] + self.base_anchor[None, :, :]
all_anchor = all_anchor.reshape(-1, 4)
self.__h_all_anchor = all_anchor
self.__num_anchor = all_anchor.shape[0]
return self.__h_all_anchor
@v_all_anchor.setter
def v_all_anchor(self, value):
self.__v_all_anchor = value
self.__num_anchor = value.shape[0]
@h_all_anchor.setter
def h_all_anchor(self, value):
self.__h_all_anchor = value
self.__num_anchor = value.shape[0]
def _assign_label_to_anchor(self, valid_anchor, gt_bbox, neg_thr, pos_thr, min_pos_thr):
num_anchor = valid_anchor.shape[0]
cls_label = np.full(shape=(num_anchor,), fill_value=-1, dtype=np.float32)
if len(gt_bbox) > 0:
# num_anchor x num_gt
overlaps = bbox_overlaps_cython(valid_anchor.astype(np.float32, copy=False), gt_bbox.astype(np.float32, copy=False))
max_overlaps = overlaps.max(axis=1)
argmax_overlaps = overlaps.argmax(axis=1)
gt_max_overlaps = overlaps.max(axis=0)
# TODO: speed up this
# TODO: fix potentially assigning wrong anchors as positive
# A correct implementation is given as
# gt_argmax_overlaps = np.where((overlaps.transpose() == gt_max_overlaps[:, None]) &
# (overlaps.transpose() >= min_pos_thr))[1]
gt_argmax_overlaps = np.where((overlaps == gt_max_overlaps) &
(overlaps >= min_pos_thr))[0]
# anchor class
cls_label[max_overlaps < neg_thr] = 0
# fg label: for each gt, anchor with highest overlap
cls_label[gt_argmax_overlaps] = 1
# fg label: above threshold IoU
cls_label[max_overlaps >= pos_thr] = 1
else:
cls_label[:] = 0
argmax_overlaps = np.zeros(shape=(num_anchor, ))
return cls_label, argmax_overlaps
def _sample_anchor(self, label, num, fg_fraction):
num_fg = int(fg_fraction * num)
fg_inds = np.where(label == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = np.random.choice(fg_inds, size=(len(fg_inds) - num_fg), replace=False)
if self.DEBUG:
disable_inds = fg_inds[:(len(fg_inds) - num_fg)]
label[disable_inds] = -1
num_bg = num - np.sum(label == 1)
bg_inds = np.where(label == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = np.random.choice(bg_inds, size=(len(bg_inds) - num_bg), replace=False)
if self.DEBUG:
disable_inds = bg_inds[:(len(bg_inds) - num_bg)]
label[disable_inds] = -1
def _cal_anchor_target(self, label, valid_anchor, gt_bbox, anchor_label):
num_anchor = valid_anchor.shape[0]
reg_target = np.zeros(shape=(num_anchor, 4), dtype=np.float32)
reg_weight = np.zeros(shape=(num_anchor, 4), dtype=np.float32)
fg_index = np.where(label == 1)[0]
if len(fg_index) > 0:
reg_target[fg_index] = bbox_transform(valid_anchor[fg_index], gt_bbox[anchor_label[fg_index], :4])
reg_weight[fg_index, :] = 1.0
return reg_target, reg_weight
def _gather_valid_anchor(self, image_info):
h, w = image_info[:2]
all_anchor = self.v_all_anchor if h >= w else self.h_all_anchor
allowed_border = self.p.assign.allowed_border
valid_index = np.where((all_anchor[:, 0] >= -allowed_border) &
(all_anchor[:, 1] >= -allowed_border) &
(all_anchor[:, 2] < w + allowed_border) &
(all_anchor[:, 3] < h + allowed_border))[0]
return valid_index, all_anchor[valid_index]
def _scatter_valid_anchor(self, valid_index, cls_label, reg_target, reg_weight):
num_anchor = self.__num_anchor
all_cls_label = np.full(shape=(num_anchor,), fill_value=-1, dtype=np.float32)
all_reg_target = np.zeros(shape=(num_anchor, 4), dtype=np.float32)
all_reg_weight = np.zeros(shape=(num_anchor, 4), dtype=np.float32)
all_cls_label[valid_index] = cls_label
all_reg_target[valid_index] = reg_target
all_reg_weight[valid_index] = reg_weight
return all_cls_label, all_reg_target, all_reg_weight
def apply(self, input_record):
p = self.p
im_info = input_record["im_info"]
gt_bbox = input_record["gt_bbox"]
assert isinstance(gt_bbox, np.ndarray)
assert gt_bbox.dtype == np.float32
valid = np.where(gt_bbox[:, 0] != -1)[0]
gt_bbox = gt_bbox[valid]
if gt_bbox.shape[1] == 5:
gt_bbox = gt_bbox[:, :4]
valid_index, valid_anchor = self._gather_valid_anchor(im_info)
cls_label, anchor_label = \
self._assign_label_to_anchor(valid_anchor, gt_bbox,
p.assign.neg_thr, p.assign.pos_thr, p.assign.min_pos_thr)
self._sample_anchor(cls_label, p.sample.image_anchor, p.sample.pos_fraction)
reg_target, reg_weight = self._cal_anchor_target(cls_label, valid_anchor, gt_bbox, anchor_label)
cls_label, reg_target, reg_weight = \
self._scatter_valid_anchor(valid_index, cls_label, reg_target, reg_weight)
h, w = im_info[:2]
if h >= w:
fh, fw = p.generate.long, p.generate.short
else:
fh, fw = p.generate.short, p.generate.long
input_record["rpn_cls_label"] = cls_label.reshape((fh, fw, -1)).transpose(2, 0, 1).reshape(-1)
input_record["rpn_reg_target"] = reg_target.reshape((fh, fw, -1)).transpose(2, 0, 1)
input_record["rpn_reg_weight"] = reg_weight.reshape((fh, fw, -1)).transpose(2, 0, 1)
return input_record["rpn_cls_label"], \
input_record["rpn_reg_target"], \
input_record["rpn_reg_weight"]
class RenameRecord(DetectionAugmentation):
def __init__(self, mapping):
super(RenameRecord, self).__init__()
self.mapping = mapping
def apply(self, input_record):
for k, new_k in self.mapping.items():
input_record[new_k] = input_record[k]
del input_record[k]
class Loader(mx.io.DataIter):
"""
Loader is now a 3-thread design,
Loader.next is called in the main thread,
multiple worker threads are responsible for performing transform,
a collector thread is responsible for converting numpy array to mxnet array.
"""
def __init__(self, roidb, transform, data_name, label_name, batch_size=1,
shuffle=False, num_worker=None, num_collector=None,
worker_queue_depth=None, collector_queue_depth=None, kv=None):
"""
This Iter will provide roi data to Fast R-CNN network
:param roidb: must be preprocessed
:param batch_size:
:param shuffle: bool
:return: Loader
"""
super(Loader, self).__init__(batch_size=batch_size)
if kv:
(self.rank, self.num_worker) = (kv.rank, kv.num_workers)
else:
(self.rank, self.num_worker) = (0, 1)
# data processing utilities
self.transform = transform
# save parameters as properties
self.roidb = roidb
self.shuffle = shuffle
# infer properties from roidb
self.index = np.arange(len(roidb))
# decide data and label names
self.data_name = data_name
self.label_name = label_name
# status variable for synchronization between get_data and get_label
self._cur = 0
self.data = None
self.label = None
# multi-thread settings
self.num_worker = num_worker
self.num_collector = num_collector
self.index_queue = Queue()
self.data_queue = Queue(maxsize=worker_queue_depth)
self.result_queue = Queue(maxsize=collector_queue_depth)
self.workers = None
self.collectors = None
# get first batch to fill in provide_data and provide_label
self._thread_start()
self.load_first_batch()
self.reset()
@property
def total_record(self):
return len(self.index) // self.batch_size * self.batch_size
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data)]
@property
def provide_label(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label)]
def _insert_queue(self):
for i in range(0, len(self.index), self.batch_size):
batch_index = self.index[i:i + self.batch_size]
if len(batch_index) == self.batch_size:
self.index_queue.put(batch_index)
def _thread_start(self):
self.workers = \
[Thread(target=self.worker, args=[self.roidb, self.index_queue, self.data_queue])
for _ in range(self.num_worker)]
for worker in self.workers:
worker.daemon = True
worker.start()
self.collectors = [Thread(target=self.collector, args=[]) for _ in range(self.num_collector)]
for c in self.collectors:
c.daemon = True
c.start()
def reset(self):
self._cur = 0
if self.shuffle:
np.random.shuffle(self.index)
self._insert_queue()
def iter_next(self):
return self._cur + self.batch_size <= len(self.index)
def load_first_batch(self):
self.index_queue.put(range(self.batch_size))
self.next()
def load_batch(self):
self._cur += self.batch_size
result = self.result_queue.get()
return result
def next(self):
if self.iter_next():
# print("[worker] %d" % self.data_queue.qsize())
# print("[collector] %d" % self.result_queue.qsize())
result = self.load_batch()
self.data = result.data
self.label = result.label
return result
else:
raise StopIteration
def worker(self, roidb, index_queue, data_queue):
while True:
batch_index = index_queue.get()
records = []
for index in batch_index:
roi_record = roidb[index].copy()
for trans in self.transform:
trans.apply(roi_record)
records.append(roi_record)
data_batch = {}
for name in self.data_name + self.label_name:
data_batch[name] = np.stack([r[name] for r in records])
data_queue.put(data_batch)
def collector(self):
while True:
record = self.data_queue.get()
data = [mx.nd.array(record[name]) for name in self.data_name]
label = [mx.nd.array(record[name]) for name in self.label_name]
provide_data = [(k, v.shape) for k, v in zip(self.data_name, data)]
provide_label = [(k, v.shape) for k, v in zip(self.label_name, label)]
data_batch = mx.io.DataBatch(data=data,
label=label,
provide_data=provide_data,
provide_label=provide_label)
self.result_queue.put(data_batch)
class SequentialLoader(mx.io.DataIter):
def __init__(self, iters):
super(SequentialLoader, self).__init__()
self.iters = iters
self.exhausted = [False] * len(iters)
def __getattr__(self, attr):
# delegate unknown keys to underlying iterators
first_non_empty_idx = self.exhausted.index(False)
first_non_empty_iter = self.iters[first_non_empty_idx]
return getattr(first_non_empty_iter, attr)
def next(self):
while True:
if all(self.exhausted):
raise StopIteration
first_non_empty_idx = self.exhausted.index(False)
first_non_empty_iter = self.iters[first_non_empty_idx]
try:
result = first_non_empty_iter.next()
return result
except StopIteration:
self.exhausted[first_non_empty_idx] = True
def reset(self):
for it in self.iters:
it.reset()
self.exhausted = [False] * len(self.iters)
@property
def provide_data(self):
return self.iters[0].provide_data
@property
def provide_label(self):
return self.iters[0].provide_label
class AnchorLoader(mx.io.DataIter):
def __init__(self, roidb, transform, data_name, label_name, batch_size=1,
shuffle=False, num_worker=12, num_collector=4, worker_queue_depth=4,
collector_queue_depth=4, kv=None):
super(AnchorLoader, self).__init__(batch_size=batch_size)
v_roidb, h_roidb = self.roidb_aspect_group(roidb)
if kv:
rank, num_rank = kv.rank, kv.num_workers
else:
rank, num_rank = 0, 1
if num_rank > 1:
v_part = len(v_roidb) // num_rank
v_remain = len(v_roidb) % num_rank
v_roidb_part = v_roidb[rank * v_part:(rank + 1) * v_part]
v_roidb_part += v_roidb[-v_remain:][rank:rank+1]
h_part = len(h_roidb) // num_rank
h_remain = len(h_roidb) % num_rank
h_roidb_part = h_roidb[rank * h_part:(rank + 1) * h_part]
h_roidb_part += h_roidb[-h_remain:][rank:rank+1]
else:
v_roidb_part = v_roidb
h_roidb_part = h_roidb
loaders = []
if len(h_roidb_part) >= batch_size:
h_loader = Loader(roidb=h_roidb_part,
transform=transform,
data_name=data_name,
label_name=label_name,
batch_size=batch_size,
shuffle=shuffle,
num_worker=num_worker,
num_collector=num_collector,
worker_queue_depth=worker_queue_depth,
collector_queue_depth=collector_queue_depth,
kv=kv)
loaders.append(h_loader)
if len(v_roidb_part) >= batch_size:
v_loader = Loader(roidb=v_roidb_part,
transform=transform,
data_name=data_name,
label_name=label_name,
batch_size=batch_size,
shuffle=shuffle,
num_worker=num_worker,
num_collector=num_collector,
worker_queue_depth=worker_queue_depth,
collector_queue_depth=collector_queue_depth,
kv=kv)
loaders.append(v_loader)
assert len(loaders) > 0, "at least one loader should be constructed"
self.__loader = SequentialLoader(loaders)
@property
def total_record(self):
return sum([it.total_record for it in self.__loader.iters])
def __len__(self):
return self.total_record
def __getattr__(self, attr):
# delegate unknown keys to underlying iterators
return getattr(self.__loader, attr)
def next(self):
return self.__loader.next()
def reset(self):
return self.__loader.reset()
@staticmethod
def roidb_aspect_group(roidb):
v_roidb, h_roidb = [], []
for roirec in roidb:
if roirec["h"] >= roirec["w"]:
v_roidb.append(roirec)
else:
h_roidb.append(roirec)
return v_roidb, h_roidb
def visualize_anchor_loader(batch_data):
image = batch_data.data[0][0].asnumpy().astype(np.uint8).transpose((1, 2, 0)).copy()
gt_bbox = batch_data.data[2][0].asnumpy().astype(np.int32)
for box in gt_bbox:
cv2.rectangle(image, tuple(box[:2]), tuple(box[2:4]), color=(0, 255, 0))
cv2.imshow("image", image)
cv2.waitKey()
def visualize_anchor_loader_old(batch_data):
image = batch_data.data[0][0].asnumpy().astype(np.uint8).transpose((1, 2, 0)).copy()
gt_bbox = batch_data.label[3][0].asnumpy().astype(np.int32)
for box in gt_bbox:
cv2.rectangle(image, tuple(box[:2]), tuple(box[2:4]), color=(0, 255, 0))
cv2.imshow("image", image)
cv2.waitKey()
def visualize_original_input(roirec):
image = cv2.imread(roirec["image_url"], cv2.IMREAD_COLOR)
gt_bbox = roirec["gt_bbox"]
for box in gt_bbox:
cv2.rectangle(image, tuple(box[:2]), tuple(box[2:4]), color=(0, 255, 0))
cv2.imshow("image", image)
cv2.waitKey()
|
transactions.py | #! /usr/bin/env python
#
# ===============================================================
# Description: Testing Weaver serializability and transaction
# correctness.
#
# Created: 2014-01-24 16:18:27
#
# Author: Ayush Dubey, dubey@cs.cornell.edu
#
# Copyright (C) 2013, Cornell University, see the LICENSE file
# for licensing agreement
# ===============================================================
#
import sys
try:
import weaver.client as client
except ImportError:
import client
import time
import random
import threading
from test_base import test_graph
from sets import Set
num_clients = 2
num_vts = 1
num_nodes = 10000
edge_factor = 1
edge_cnt = 0
init_tx_sz = 100
assert (num_nodes % init_tx_sz == 0)
assert (int(edge_factor * num_nodes) % init_tx_sz == 0)
wr_loops = 20
rd_loops = 10
clients = []
n_hndls = []
n_hndls_cpy = []
created_edges = Set([])
deleted_edges = Set([])
graph = {}
gmutex = threading.Lock() # graph DS mutex
pmutex = threading.Lock() # printing mutex
# if odd_even = -1, then don't care
# if odd_even = 0, then even
# if odd_even = 1, then odd
def get_random_idx(odd_even=-1):
if odd_even == -1:
return random.randrange(0, num_nodes)
elif odd_even == 1:
return random.randrange(1, num_nodes, 2)
elif odd_even == 0:
return random.randrange(0, num_nodes, 2)
else:
return -1 # error
# check if n2 already a nbr of n1
def check_repeat(n1, n2, edges):
for e in graph[n1]:
if n2 == e[0]:
return True
return (n1, n2) in edges
# create graph edges
# after transaction has finished:
# there is no edge (n1, n2) such that n1 != n2 mod 2
# there are no self-loops
# there maybe parallel edges
def create_edges(c, n_edges):
global edge_cnt
tx_id = c.begin_tx()
edges = []
with gmutex:
while n_edges > 0:
n1 = get_random_idx()
n2 = get_random_idx()
while n2 == n1:
n2 = get_random_idx()
e_hndl = c.create_edge(tx_id, n_hndls[n1], n_hndls[n2])
created_edges.add(e_hndl)
edges.append((n1, n2, e_hndl))
if n1 % 2 == n2 % 2:
n_edges -= 1
for e in edges:
if e[0] % 2 == e[1] % 2:
edge_cnt += 1
else:
assert e[2] not in deleted_edges
assert e[2] in created_edges
c.delete_edge(tx_id, e[2], n_hndls[e[0]])
deleted_edges.add(e[2])
assert c.end_tx(tx_id)
with gmutex:
for e in edges:
if e[0] % 2 == e[1] % 2:
graph[e[0]].append((e[1], e[2]))
# delete n_edges randomly chosen edges
def delete_edges(c, n_edges):
global edge_cnt
tx_id = c.begin_tx()
with gmutex:
for i in range(n_edges):
num_nbrs = 0
while num_nbrs == 0:
n = get_random_idx()
num_nbrs = len(graph[n])
e_idx = random.randrange(0, num_nbrs)
e_hndl = ((graph[n])[e_idx])[1]
assert e_hndl not in deleted_edges
assert e_hndl in created_edges
c.delete_edge(tx_id, e_hndl, n_hndls[n])
deleted_edges.add(e_hndl)
del (graph[n])[e_idx]
edge_cnt -= 1
assert len(graph[n]) == (num_nbrs-1)
assert c.end_tx(tx_id)
# if n1 != n2 mod 2, then !reachable(n1, n2)
def reach_requests(c, n_reqs):
pos_reqs = []
neg_reqs = []
for i in range(n_reqs):
n1 = get_random_idx()
n2 = get_random_idx(n1 % 2)
while n2 == n1:
n2 = get_random_idx(n1 % 2)
pos_reqs.append((n_hndls_cpy[n1], n_hndls_cpy[n2]))
n1 = get_random_idx()
n2 = get_random_idx(1 if n1 % 2 == 0 else 0)
neg_reqs.append((n_hndls_cpy[n1], n_hndls_cpy[n2]))
rp = client.ReachParams()
for r in pos_reqs:
rp.dest = r[1]
prog_args = [(r[0], rp)]
response = c.run_reach_program(prog_args)
for r in neg_reqs:
rp.dest = r[1]
prog_args = [(r[0], rp)]
response = c.run_reach_program(prog_args)
assert (not response.reachable)
# create initial graph
def init_graph(c):
global edge_cnt
global n_hndls_cpy
with gmutex:
for i in range(num_nodes):
if i % init_tx_sz == 0:
tx_id = c.begin_tx()
n = c.create_node(tx_id)
n_hndls.append(n)
graph[i] = []
if i % init_tx_sz == (init_tx_sz-1):
assert c.end_tx(tx_id)
n_hndls_cpy = n_hndls[:]
for i in range(int(edge_factor * num_nodes)):
if i % init_tx_sz == 0:
tx_id = c.begin_tx()
n1 = get_random_idx()
n2 = get_random_idx(n1 % 2)
while n2 == n1: # no self-loops
n2 = get_random_idx(n1 % 2)
assert n1 < len(n_hndls)
assert n2 < len(n_hndls)
e = c.create_edge(tx_id, n_hndls[n1], n_hndls[n2])
created_edges.add(e)
graph[n1].append((n2, e))
edge_cnt += 1
if i % init_tx_sz == (init_tx_sz-1):
assert c.end_tx(tx_id)
def write_loop(c, tid):
for i in range(wr_loops):
create_edges(c, 100)
delete_edges(c, 100)
with pmutex:
print str(tid) + ': Done write loop ' + str(i)
def read_loop(c, tid):
for i in range(rd_loops):
reach_requests(c, 20)
with pmutex:
print str(tid) + ': Read loop ' + str(i)
print 'Going to create clients'
for i in range(num_clients):
clients.append(client.Client(client._CLIENT_ID + i, i % num_vts))
print 'Created clients'
init_graph(clients[0])
print 'Created graph'
threads = []
for i in range(num_clients/2):
t = threading.Thread(target=write_loop, args=(clients[i], i))
t.start()
threads.append(t)
for i in range(num_clients/2, num_clients):
t = threading.Thread(target=read_loop, args=(clients[i], i))
t.start()
threads.append(t)
for t in threads:
t.join()
|
data_upload.py | """
Anonymizes and uploads DNS and flow data to cloud.
"""
import time
import datetime
import threading
import utils
import requests
import json
import server_config
from host_state import HostState
import traceback
UPLOAD_INTERVAL = 5
class DataUploader(object):
def __init__(self, host_state):
assert isinstance(host_state, HostState)
self._host_state = host_state
self._lock = threading.Lock()
self._active = True
self._thread = threading.Thread(target=self._upload_thread)
self._thread.daemon = True
self._last_upload_ts = time.time()
def _upload_thread(self):
# Loop until initialized
while True:
if utils.safe_run(self._upload_initialization):
break
time.sleep(2)
with self._host_state.lock:
self._host_state.has_consent = True
self._update_ui_status(
'Continuously analyzing your network.\n'
)
# Continuously upload data
while True:
# If /is_inspecting_traffic was called too long ago, exit.
with self._host_state.lock:
last_ui_contact_ts = self._host_state.last_ui_contact_ts
if last_ui_contact_ts:
time_delta = time.time() - last_ui_contact_ts
if time_delta > 15 and \
not self._host_state.persistent_mode:
self._host_state.quit = True
return
if not self._host_state.is_inspecting():
self._update_ui_status('Paused inspection.')
with self._host_state.lock:
self._clear_host_state_pending_data()
time.sleep(2)
continue
time.sleep(UPLOAD_INTERVAL)
with self._lock:
if not self._active:
return
utils.safe_run(self._upload_data)
def _upload_initialization(self):
"""Returns True if successfully initialized."""
# Send client's timezone to server
ts = time.time()
utc_offset = int(
(datetime.datetime.fromtimestamp(ts) -
datetime.datetime.utcfromtimestamp(ts)).total_seconds()
)
utc_offset_url = server_config.UTC_OFFSET_URL.format(
user_key=self._host_state.user_key,
offset_seconds=utc_offset
)
utils.log('[DATA] Update UTC offset:', utc_offset_url)
status = requests.get(utc_offset_url).text.strip()
utils.log('[DATA] Update UTC offset status:', status)
return 'SUCCESS' == status
def _clear_host_state_pending_data(self):
self._host_state.pending_dhcp_dict = {}
self._host_state.pending_resolver_dict = {}
self._host_state.pending_dns_dict = {}
self._host_state.pending_flow_dict = {}
self._host_state.pending_ua_dict = {}
self._host_state.pending_tls_dict_list = []
self._host_state.pending_netdisco_dict = {}
def _prepare_upload_data(self):
"""Returns (window_duration, a dictionary of data to post)."""
window_duration = time.time() - self._last_upload_ts
# Remove all pending tasks
with self._host_state.lock:
dns_dict = self._host_state.pending_dns_dict
dhcp_dict = self._host_state.pending_dhcp_dict
resolver_dict = self._host_state.pending_resolver_dict
flow_dict = self._host_state.pending_flow_dict
ua_dict = self._host_state.pending_ua_dict
ip_mac_dict = self._host_state.ip_mac_dict
tls_dict_list = self._host_state.pending_tls_dict_list
netdisco_dict = self._host_state.pending_netdisco_dict
self._clear_host_state_pending_data()
self._last_upload_ts = time.time()
# Turn IP -> MAC dict into device_id -> (ip, device_oui) dict, ignoring
# gateway's IP.
device_dict = {}
for (ip, mac) in ip_mac_dict.iteritems():
# Never include the gateway
if ip == self._host_state.gateway_ip:
continue
device_id = utils.get_device_id(mac, self._host_state)
oui = utils.get_oui(mac)
device_dict[device_id] = (ip, oui)
# Process flow_stats
for flow_key in flow_dict:
flow_stats = flow_dict[flow_key]
# Compute unique byte count during this window using seq number
for direction in ('inbound', 'outbound'):
flow_stats[direction + '_tcp_seq_range'] = get_seq_diff(
flow_stats[direction + '_tcp_seq_min_max']
)
flow_stats[direction + '_tcp_ack_range'] = get_seq_diff(
flow_stats[direction + '_tcp_ack_min_max']
)
# We use the original byte count or the sequence number as the
# final byte count (whichever is larger), although we should
# note the caveats of using TCP seq numbers to estimate flow
# size in packet_processor.py.
flow_stats[direction + '_byte_count'] = max(
flow_stats[direction + '_byte_count'],
flow_stats[direction + '_tcp_seq_range']
)
# Fill in missing byte count (e.g., due to failure of ARP spoofing)
if flow_stats['inbound_byte_count'] == 0:
outbound_seq_diff = flow_stats['outbound_tcp_ack_range']
if outbound_seq_diff:
flow_stats['inbound_byte_count'] = outbound_seq_diff
if flow_stats['outbound_byte_count'] == 0:
inbound_seq_diff = flow_stats['inbound_tcp_ack_range']
if inbound_seq_diff:
flow_stats['outbound_byte_count'] = inbound_seq_diff
# Keep only the byte count fields
flow_dict[flow_key] = {
'inbound_byte_count': flow_stats['inbound_byte_count'],
'outbound_byte_count': flow_stats['outbound_byte_count'],
'syn_originator': flow_stats['syn_originator']
}
return (window_duration, {
'dns_dict': jsonify_dict(dns_dict),
'flow_dict': jsonify_dict(flow_dict),
'device_dict': jsonify_dict(device_dict),
'ua_dict': jsonify_dict(ua_dict),
'dhcp_dict': jsonify_dict(dhcp_dict),
'resolver_dict': jsonify_dict(resolver_dict),
'client_version': self._host_state.client_version,
'tls_dict_list': json.dumps(tls_dict_list),
'netdisco_dict': jsonify_dict(netdisco_dict),
'duration': str(window_duration),
'client_ts': str(int(time.time()))
})
def _upload_data(self):
# Prepare POST
user_key = self._host_state.user_key
url = server_config.SUBMIT_URL.format(user_key=user_key)
(window_duration, post_data) = self._prepare_upload_data()
if window_duration < 1:
return
# Try uploading across 5 attempts
for attempt in range(5):
status_text = 'Uploading data to cloud...\n'
if attempt > 0:
status_text += ' (Attempt {} of 5)'.format(attempt + 1)
self._update_ui_status(status_text)
utils.log('[UPLOAD]', status_text)
# Upload data via POST
response = requests.post(url, data=post_data).text
utils.log('[UPLOAD] Gets back server response:', response)
# Update whitelist
try:
response_dict = json.loads(response)
if response_dict['status'] == 'success':
with self._host_state.lock:
self._host_state.device_whitelist = \
response_dict['inspected_devices']
break
except Exception:
utils.log('[UPLOAD] Failed. Retrying:', traceback.format_exc())
time.sleep((attempt + 1) ** 2)
# Report stats to UI
with self._host_state.lock:
byte_count = self._host_state.byte_count
self._host_state.byte_count = 0
self._update_ui_status(
'Currently analyzing ' +
'{:,}'.format(int(byte_count * 8.0 / 1000.0 / window_duration)) +
' Kbps of traffic.'
)
utils.log(
'[UPLOAD] Total bytes in past epoch:',
byte_count
)
def _update_ui_status(self, value):
utils.log('[DATA] Update UI:', value)
with self._host_state.lock:
self._host_state.status_text = value
def start(self):
with self._lock:
self._active = True
self._thread.start()
utils.log('[Data] Start uploading data.')
def stop(self):
utils.log('[Data] Stopping.')
with self._lock:
self._active = False
self._thread.join()
utils.log('[Data] Stopped.')
def get_seq_diff(seq_tuple):
"""Returns the difference between two TCP sequence numbers."""
(seq_min, seq_max) = seq_tuple
if None in (seq_min, seq_max) or 0 in (seq_min, seq_max):
return None
# Seq wrap-around
diff = seq_max - seq_min
if diff < 0:
diff += 2 ** 32
return diff
def jsonify_dict(input_dict):
"""
Returns a new dict where all the keys are jsonified as string, and all the
values are turned into lists if they are sets.
"""
output_dict = {}
for (k, v) in input_dict.iteritems():
if isinstance(k, tuple):
k = json.dumps(k)
if isinstance(v, set):
v = list(v)
output_dict[k] = v
return json.dumps(output_dict)
|
app.py | import json
import logging
import multiprocessing as mp
import os
from logging.handlers import QueueHandler
from typing import Dict, List
import sys
import signal
import yaml
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
from peewee_migrate import Router
from playhouse.sqlite_ext import SqliteExtDatabase
from playhouse.sqliteq import SqliteQueueDatabase
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
from frigate.edgetpu import EdgeTPUProcess
from frigate.events import EventProcessor, EventCleanup, VideoConverter
from frigate.http import create_app
from frigate.log import log_process, root_configurer
from frigate.models import Event
from frigate.mqtt import create_mqtt_client
from frigate.object_processing import TrackedObjectProcessor
from frigate.record import RecordingMaintainer
from frigate.stats import StatsEmitter, stats_init
from frigate.video import capture_camera, track_camera
from frigate.watchdog import FrigateWatchdog
from frigate.zeroconf import broadcast_zeroconf
logger = logging.getLogger(__name__)
class FrigateApp():
def __init__(self):
self.stop_event = mp.Event()
self.config: FrigateConfig = None
self.detection_queue = mp.Queue()
self.detectors: Dict[str, EdgeTPUProcess] = {}
self.detection_out_events: Dict[str, mp.Event] = {}
self.detection_shms: List[mp.shared_memory.SharedMemory] = []
self.log_queue = mp.Queue()
self.camera_metrics = {}
self.video_queue = mp.Queue()
def set_environment_vars(self):
for key, value in self.config.environment_vars.items():
os.environ[key] = value
def ensure_dirs(self):
for d in [RECORD_DIR, CLIPS_DIR, CACHE_DIR]:
if not os.path.exists(d) and not os.path.islink(d):
logger.info(f"Creating directory: {d}")
os.makedirs(d)
else:
logger.debug(f"Skipping directory: {d}")
tmpfs_size = self.config.clips.tmpfs_cache_size
if tmpfs_size:
logger.info(f"Creating tmpfs of size {tmpfs_size}")
rc = os.system(f"mount -t tmpfs -o size={tmpfs_size} tmpfs {CACHE_DIR}")
if rc != 0:
logger.error(f"Failed to create tmpfs, error code: {rc}")
def init_logger(self):
self.log_process = mp.Process(target=log_process, args=(self.log_queue,), name='log_process')
self.log_process.daemon = True
self.log_process.start()
root_configurer(self.log_queue)
def init_config(self):
config_file = os.environ.get('CONFIG_FILE', '/config/config.yml')
self.config = FrigateConfig(config_file=config_file)
for camera_name in self.config.cameras.keys():
# create camera_metrics
self.camera_metrics[camera_name] = {
'camera_fps': mp.Value('d', 0.0),
'skipped_fps': mp.Value('d', 0.0),
'process_fps': mp.Value('d', 0.0),
'detection_enabled': mp.Value('i', self.config.cameras[camera_name].detect.enabled),
'detection_fps': mp.Value('d', 0.0),
'detection_frame': mp.Value('d', 0.0),
'read_start': mp.Value('d', 0.0),
'ffmpeg_pid': mp.Value('i', 0),
'frame_queue': mp.Queue(maxsize=2),
}
def check_config(self):
for name, camera in self.config.cameras.items():
assigned_roles = list(set([r for i in camera.ffmpeg.inputs for r in i.roles]))
if not camera.clips.enabled and 'clips' in assigned_roles:
logger.warning(f"Camera {name} has clips assigned to an input, but clips is not enabled.")
elif camera.clips.enabled and not 'clips' in assigned_roles:
logger.warning(f"Camera {name} has clips enabled, but clips is not assigned to an input.")
if not camera.record.enabled and 'record' in assigned_roles:
logger.warning(f"Camera {name} has record assigned to an input, but record is not enabled.")
elif camera.record.enabled and not 'record' in assigned_roles:
logger.warning(f"Camera {name} has record enabled, but record is not assigned to an input.")
if not camera.rtmp.enabled and 'rtmp' in assigned_roles:
logger.warning(f"Camera {name} has rtmp assigned to an input, but rtmp is not enabled.")
elif camera.rtmp.enabled and not 'rtmp' in assigned_roles:
logger.warning(f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input.")
def set_log_levels(self):
logging.getLogger().setLevel(self.config.logger.default)
for log, level in self.config.logger.logs.items():
logging.getLogger(log).setLevel(level)
if not 'geventwebsocket.handler' in self.config.logger.logs:
logging.getLogger('geventwebsocket.handler').setLevel('ERROR')
def init_queues(self):
# Queues for clip processing
self.event_queue = mp.Queue()
self.event_processed_queue = mp.Queue()
# Queue for cameras to push tracked objects to
self.detected_frames_queue = mp.Queue(maxsize=len(self.config.cameras.keys())*2)
def init_database(self):
migrate_db = SqliteExtDatabase(self.config.database.path)
# Run migrations
del(logging.getLogger('peewee_migrate').handlers[:])
router = Router(migrate_db)
router.run()
migrate_db.close()
self.db = SqliteQueueDatabase(self.config.database.path)
models = [Event]
self.db.bind(models)
def init_stats(self):
self.stats_tracking = stats_init(self.camera_metrics, self.detectors)
def init_web_server(self):
self.flask_app = create_app(self.config, self.db, self.stats_tracking, self.detected_frames_processor, self.mqtt_client)
def init_mqtt(self):
self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics)
def start_detectors(self):
model_shape = (self.config.model.height, self.config.model.width)
for name in self.config.cameras.keys():
self.detection_out_events[name] = mp.Event()
shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=self.config.model.height*self.config.model.width*3)
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20*6*4)
self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out)
for name, detector in self.config.detectors.items():
if detector.type == 'cpu':
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, 'cpu', detector.num_threads)
if detector.type == 'edgetpu':
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, detector.device, detector.num_threads)
def start_detected_frames_processor(self):
self.detected_frames_processor = TrackedObjectProcessor(self.config, self.mqtt_client, self.config.mqtt.topic_prefix,
self.detected_frames_queue, self.event_queue, self.event_processed_queue, self.stop_event)
self.detected_frames_processor.start()
def start_camera_processors(self):
model_shape = (self.config.model.height, self.config.model.width)
for name, config in self.config.cameras.items():
camera_process = mp.Process(target=track_camera, name=f"camera_processor:{name}", args=(name, config, model_shape,
self.detection_queue, self.detection_out_events[name], self.detected_frames_queue,
self.camera_metrics[name]))
camera_process.daemon = True
self.camera_metrics[name]['process'] = camera_process
camera_process.start()
logger.info(f"Camera processor started for {name}: {camera_process.pid}")
def start_camera_capture_processes(self):
for name, config in self.config.cameras.items():
capture_process = mp.Process(target=capture_camera, name=f"camera_capture:{name}", args=(name, config,
self.camera_metrics[name]))
capture_process.daemon = True
self.camera_metrics[name]['capture_process'] = capture_process
capture_process.start()
logger.info(f"Capture process started for {name}: {capture_process.pid}")
def start_event_processor(self):
self.event_processor = EventProcessor(self.config, self.camera_metrics, self.event_queue, self.event_processed_queue, self.stop_event, self.video_queue)
self.event_processor.start()
def start_event_cleanup(self):
self.event_cleanup = EventCleanup(self.config, self.stop_event)
self.event_cleanup.start()
def start_video_converter(self):
self.video_converter = VideoConverter(self.config, self.stop_event, self.video_queue)
self.video_converter.start()
def start_recording_maintainer(self):
self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event)
self.recording_maintainer.start()
def start_stats_emitter(self):
self.stats_emitter = StatsEmitter(self.config, self.stats_tracking, self.mqtt_client, self.config.mqtt.topic_prefix, self.stop_event)
self.stats_emitter.start()
def start_watchdog(self):
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
self.frigate_watchdog.start()
def start(self):
self.init_logger()
try:
try:
self.init_config()
except Exception as e:
print(f"Error parsing config: {e}")
self.log_process.terminate()
sys.exit(1)
self.set_environment_vars()
self.ensure_dirs()
self.check_config()
self.set_log_levels()
self.init_queues()
self.init_database()
self.init_mqtt()
except Exception as e:
print(e)
self.log_process.terminate()
sys.exit(1)
self.start_detectors()
self.start_detected_frames_processor()
self.start_camera_processors()
self.start_camera_capture_processes()
self.init_stats()
self.init_web_server()
self.start_event_processor()
self.start_video_converter()
self.start_event_cleanup()
self.start_recording_maintainer()
self.start_stats_emitter()
self.start_watchdog()
# self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id)
def receiveSignal(signalNumber, frame):
self.stop()
sys.exit()
signal.signal(signal.SIGTERM, receiveSignal)
server = pywsgi.WSGIServer(('127.0.0.1', 5001), self.flask_app, handler_class=WebSocketHandler)
server.serve_forever()
self.stop()
def stop(self):
logger.info(f"Stopping...")
self.stop_event.set()
self.detected_frames_processor.join()
self.event_processor.join()
self.event_cleanup.join()
self.video_converter.join()
self.recording_maintainer.join()
self.stats_emitter.join()
self.frigate_watchdog.join()
self.db.stop()
for detector in self.detectors.values():
detector.stop()
while len(self.detection_shms) > 0:
shm = self.detection_shms.pop()
shm.close()
shm.unlink()
|
test_sockets.py | import os, multiprocessing, subprocess
from runner import BrowserCore, path_from_root
from tools.shared import *
def clean_pids(pids):
import signal, errno
def pid_exists(pid):
try:
# NOTE: may just kill the process in Windows
os.kill(pid, 0)
except OSError, e:
return e.errno == errno.EPERM
else:
return True
def kill_pids(pids, sig):
for pid in pids:
if not pid_exists(pid):
break
print '[killing %d]' % pid
try:
os.kill(pid, sig)
print '[kill succeeded]'
except:
print '[kill fail]'
# ask nicely (to try and catch the children)
kill_pids(pids, signal.SIGTERM)
time.sleep(1)
# extreme prejudice, may leave children
kill_pids(pids, signal.SIGKILL)
def make_relay_server(port1, port2):
print >> sys.stderr, 'creating relay server on ports %d,%d' % (port1, port2)
proc = Popen([PYTHON, path_from_root('tests', 'sockets', 'socket_relay.py'), str(port1), str(port2)])
return proc
class WebsockifyServerHarness:
def __init__(self, filename, args, listen_port):
self.pids = []
self.filename = filename
self.listen_port = listen_port
self.target_port = listen_port-1
self.args = args or []
def __enter__(self):
import socket, websockify
# compile the server
# NOTE empty filename support is a hack to support
# the current test_enet
if self.filename:
Popen([CLANG_CC, path_from_root('tests', self.filename), '-o', 'server', '-DSOCKK=%d' % self.target_port] + get_clang_native_args() + self.args).communicate()
process = Popen([os.path.abspath('server')])
self.pids.append(process.pid)
# start the websocket proxy
print >> sys.stderr, 'running websockify on %d, forward to tcp %d' % (self.listen_port, self.target_port)
wsp = websockify.WebSocketProxy(verbose=True, listen_port=self.listen_port, target_host="127.0.0.1", target_port=self.target_port, run_once=True)
self.websockify = multiprocessing.Process(target=wsp.start_server)
self.websockify.start()
self.pids.append(self.websockify.pid)
print '[Websockify on process %s]' % str(self.pids[-2:])
def __exit__(self, *args, **kwargs):
# try to kill the websockify proxy gracefully
if self.websockify.is_alive():
self.websockify.terminate()
self.websockify.join()
# clean up any processes we started
clean_pids(self.pids)
class CompiledServerHarness:
def __init__(self, filename, args, listen_port):
self.pids = []
self.filename = filename
self.listen_port = listen_port
self.args = args or []
def __enter__(self):
# assuming this is only used for WebSocket tests at the moment, validate that
# the ws module is installed
child = Popen(NODE_JS + ['-e', 'require("ws");'])
child.communicate()
assert child.returncode == 0, 'ws module for Node.js not installed. Please run \'npm install\' from %s' % EMSCRIPTEN_ROOT
# compile the server
Popen([PYTHON, EMCC, path_from_root('tests', self.filename), '-o', 'server.js', '-DSOCKK=%d' % self.listen_port] + self.args).communicate()
process = Popen(NODE_JS + ['server.js'])
self.pids.append(process.pid)
def __exit__(self, *args, **kwargs):
# clean up any processes we started
clean_pids(self.pids)
# always run these tests last
# make sure to use different ports in each one because it takes a while for the processes to be cleaned up
# NOTE all datagram tests are temporarily disabled, as
# we can't truly test datagram sockets until we have
# proper listen server support.
def filter_harnesses(harnesses):
# XXX avoid websockify for now due to intermittent errors. see issue #2700
return filter(lambda harness: (harness[0].__class__ if type(harness) is tuple else harness.__class__) is not WebsockifyServerHarness, harnesses)
class sockets(BrowserCore):
emcc_args = []
def test_inet(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
printf("*%x,%x,%x,%x,%x,%x*\n", htonl(0xa1b2c3d4), htonl(0xfe3572e0), htonl(0x07abcdf0), htons(0xabcd), ntohl(0x43211234), ntohs(0xbeaf));
in_addr_t i = inet_addr("190.180.10.78");
printf("%x\n", i);
return 0;
}
'''
self.do_run(src, '*d4c3b2a1,e07235fe,f0cdab07,cdab,34122143,afbe*\n4e0ab4be\n')
def test_inet2(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
struct in_addr x, x2;
int *y = (int*)&x;
*y = 0x12345678;
printf("%s\n", inet_ntoa(x));
int r = inet_aton(inet_ntoa(x), &x2);
printf("%s\n", inet_ntoa(x2));
return 0;
}
'''
self.do_run(src, '120.86.52.18\n120.86.52.18\n')
def test_inet3(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
#include <sys/socket.h>
int main() {
char dst[64];
struct in_addr x, x2;
int *y = (int*)&x;
*y = 0x12345678;
printf("%s\n", inet_ntop(AF_INET,&x,dst,sizeof dst));
int r = inet_aton(inet_ntoa(x), &x2);
printf("%s\n", inet_ntop(AF_INET,&x2,dst,sizeof dst));
return 0;
}
'''
self.do_run(src, '120.86.52.18\n120.86.52.18\n')
def test_inet4(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
#include <sys/socket.h>
void test(char *test_addr){
char str[40];
struct in6_addr addr;
unsigned char *p = (unsigned char*)&addr;
int ret;
ret = inet_pton(AF_INET6,test_addr,&addr);
if(ret == -1) return;
if(ret == 0) return;
if(inet_ntop(AF_INET6,&addr,str,sizeof(str)) == NULL ) return;
printf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x - %s\n",
p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9],p[10],p[11],p[12],p[13],p[14],p[15],str);
}
int main(){
test("::");
test("::1");
test("::1.2.3.4");
test("::17.18.19.20");
test("::ffff:1.2.3.4");
test("1::ffff");
test("::255.255.255.255");
test("0:ff00:1::");
test("0:ff::");
test("abcd::");
test("ffff::a");
test("ffff::a:b");
test("ffff::a:b:c");
test("ffff::a:b:c:d");
test("ffff::a:b:c:d:e");
test("::1:2:0:0:0");
test("0:0:1:2:3::");
test("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff");
test("1::255.255.255.255");
//below should fail and not produce results..
test("1.2.3.4");
test("");
test("-");
}
'''
self.do_run(src,
"0000:0000:0000:0000:0000:0000:0000:0000 - ::\n"
"0000:0000:0000:0000:0000:0000:0000:0001 - ::1\n"
"0000:0000:0000:0000:0000:0000:0102:0304 - ::1.2.3.4\n"
"0000:0000:0000:0000:0000:0000:1112:1314 - ::17.18.19.20\n"
"0000:0000:0000:0000:0000:ffff:0102:0304 - ::ffff:1.2.3.4\n"
"0001:0000:0000:0000:0000:0000:0000:ffff - 1::ffff\n"
"0000:0000:0000:0000:0000:0000:ffff:ffff - ::255.255.255.255\n"
"0000:ff00:0001:0000:0000:0000:0000:0000 - 0:ff00:1::\n"
"0000:00ff:0000:0000:0000:0000:0000:0000 - 0:ff::\n"
"abcd:0000:0000:0000:0000:0000:0000:0000 - abcd::\n"
"ffff:0000:0000:0000:0000:0000:0000:000a - ffff::a\n"
"ffff:0000:0000:0000:0000:0000:000a:000b - ffff::a:b\n"
"ffff:0000:0000:0000:0000:000a:000b:000c - ffff::a:b:c\n"
"ffff:0000:0000:0000:000a:000b:000c:000d - ffff::a:b:c:d\n"
"ffff:0000:0000:000a:000b:000c:000d:000e - ffff::a:b:c:d:e\n"
"0000:0000:0000:0001:0002:0000:0000:0000 - ::1:2:0:0:0\n"
"0000:0000:0001:0002:0003:0000:0000:0000 - 0:0:1:2:3::\n"
"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff - ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff\n"
"0001:0000:0000:0000:0000:0000:ffff:ffff - 1::ffff:ffff\n"
)
def test_getaddrinfo(self):
self.emcc_args=[]
self.do_run(open(path_from_root('tests', 'sockets', 'test_getaddrinfo.c')).read(), 'success')
def test_getnameinfo(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_getnameinfo.c')).read(), 'success')
def test_gethostbyname(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_gethostbyname.c')).read(), 'success')
def test_getprotobyname(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_getprotobyname.c')).read(), 'success')
def test_sockets_echo(self):
sockets_include = '-I'+path_from_root('tests', 'sockets')
# Websockify-proxied servers can't run dgram tests
harnesses = [
(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49160), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49161), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49162), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1'], 49163), 0)
]
harnesses = filter_harnesses(harnesses)
for harness, datagram in harnesses:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, sockets_include])
def test_sockets_async_echo(self):
# Run with ./runner.py sockets.test_sockets_async_echo
sockets_include = '-I'+path_from_root('tests', 'sockets')
# Websockify-proxied servers can't run dgram tests
harnesses = [
(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_ASYNC=1'], 49165), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ASYNC=1'], 49166), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1', '-DTEST_ASYNC=1'], 49167), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1', '-DTEST_ASYNC=1'], 49168), 0)
]
#harnesses = filter_harnesses(harnesses)
for harness, datagram in harnesses:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, '-DTEST_ASYNC=1', sockets_include])
# Deliberately attempt a connection on a port that will fail to test the error callback and getsockopt
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=49169', '-DTEST_ASYNC=1', sockets_include])
def test_sockets_echo_bigdata(self):
sockets_include = '-I'+path_from_root('tests', 'sockets')
# generate a large string literal to use as our message
message = ''
for i in range(256*256*2):
message += str(unichr(ord('a') + (i % 26)))
# re-write the client test with this literal (it's too big to pass via command line)
input_filename = path_from_root('tests', 'sockets', 'test_sockets_echo_client.c')
input = open(input_filename).read()
output = input.replace('#define MESSAGE "pingtothepong"', '#define MESSAGE "%s"' % message)
harnesses = [
(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49170), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49171), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49172), 1)
]
harnesses = filter_harnesses(harnesses)
for harness, datagram in harnesses:
with harness:
self.btest(output, expected='0', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram], force_c=True)
def test_sockets_partial(self):
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49180),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49181)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_partial_client.c'), expected='165', args=['-DSOCKK=%d' % harness.listen_port])
def test_sockets_select_server_down(self):
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49190),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49191)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_down_client.c'), expected='266', args=['-DSOCKK=%d' % harness.listen_port])
def test_sockets_select_server_closes_connection_rw(self):
sockets_include = '-I'+path_from_root('tests', 'sockets')
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49200),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49201)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_closes_connection_client_rw.c'), expected='266', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port])
def test_enet(self):
# this is also a good test of raw usage of emconfigure and emmake
try_delete(self.in_dir('enet'))
shutil.copytree(path_from_root('tests', 'enet'), self.in_dir('enet'))
pwd = os.getcwd()
os.chdir(self.in_dir('enet'))
Popen([PYTHON, path_from_root('emconfigure'), './configure']).communicate()
Popen([PYTHON, path_from_root('emmake'), 'make']).communicate()
enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I'+path_from_root('tests', 'enet', 'include')]
os.chdir(pwd)
for harness in [
CompiledServerHarness(os.path.join('sockets', 'test_enet_server.c'), enet, 49210)
]:
with harness:
self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=enet + ['-DSOCKK=%d' % harness.listen_port])
# This test is no longer in use for WebSockets as we can't truly emulate
# a server in the browser (in the past, there were some hacks to make it
# somewhat work, but those have been removed). However, with WebRTC it
# should be able to resurect this test.
# def test_enet_in_browser(self):
# try_delete(self.in_dir('enet'))
# shutil.copytree(path_from_root('tests', 'enet'), self.in_dir('enet'))
# pwd = os.getcwd()
# os.chdir(self.in_dir('enet'))
# Popen([PYTHON, path_from_root('emconfigure'), './configure']).communicate()
# Popen([PYTHON, path_from_root('emmake'), 'make']).communicate()
# enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I'+path_from_root('tests', 'enet', 'include')]
# os.chdir(pwd)
# Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_enet_server.c'), '-o', 'server.html', '-DSOCKK=2235'] + enet).communicate()
# with WebsockifyServerHarness('', [], 2235, 2234):
# with WebsockifyServerHarness('', [], 2237, 2236):
# pids = []
# try:
# proc = make_relay_server(2234, 2236)
# pids.append(proc.pid)
# self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=['-DSOCKK=2237', '-DUSE_IFRAME=1'] + enet)
# finally:
# clean_pids(pids);
def zzztest_webrtc(self): # XXX see src/settings.js, this is disabled pending investigation
host_src = 'webrtc_host.c'
peer_src = 'webrtc_peer.c'
host_outfile = 'host.html'
peer_outfile = 'peer.html'
host_filepath = path_from_root('tests', 'sockets', host_src)
temp_host_filepath = os.path.join(self.get_dir(), os.path.basename(host_src))
with open(host_filepath) as f: host_src = f.read()
with open(temp_host_filepath, 'w') as f: f.write(self.with_report_result(host_src))
peer_filepath = path_from_root('tests', 'sockets', peer_src)
temp_peer_filepath = os.path.join(self.get_dir(), os.path.basename(peer_src))
with open(peer_filepath) as f: peer_src = f.read()
with open(temp_peer_filepath, 'w') as f: f.write(self.with_report_result(peer_src))
open(os.path.join(self.get_dir(), 'host_pre.js'), 'w').write('''
var Module = {
webrtc: {
broker: 'http://localhost:8182',
session: undefined,
onpeer: function(peer, route) {
window.open('http://localhost:8888/peer.html?' + route);
// iframe = document.createElement("IFRAME");
// iframe.setAttribute("src", "http://localhost:8888/peer.html?" + route);
// iframe.style.display = "none";
// document.body.appendChild(iframe);
peer.listen();
},
onconnect: function(peer) {
},
ondisconnect: function(peer) {
},
onerror: function(error) {
console.error(error);
}
},
};
''')
open(os.path.join(self.get_dir(), 'peer_pre.js'), 'w').write('''
var Module = {
webrtc: {
broker: 'http://localhost:8182',
session: window.location.toString().split('?')[1],
onpeer: function(peer, route) {
peer.connect(Module['webrtc']['session']);
},
onconnect: function(peer) {
},
ondisconnect: function(peer) {
// Calling window.close() from this handler hangs my browser, so run it in the next turn
setTimeout(window.close, 0);
},
onerror: function(error) {
console.error(error);
}
}
};
''')
Popen([PYTHON, EMCC, temp_host_filepath, '-o', host_outfile] + ['-s', 'GL_TESTING=1', '--pre-js', 'host_pre.js', '-s', 'SOCKET_WEBRTC=1', '-s', 'SOCKET_DEBUG=1']).communicate()
Popen([PYTHON, EMCC, temp_peer_filepath, '-o', peer_outfile] + ['-s', 'GL_TESTING=1', '--pre-js', 'peer_pre.js', '-s', 'SOCKET_WEBRTC=1', '-s', 'SOCKET_DEBUG=1']).communicate()
# note: you may need to run this manually yourself, if npm is not in the path, or if you need a version that is not in the path
Popen(['npm', 'install', path_from_root('tests', 'sockets', 'p2p')]).communicate()
broker = Popen(NODE_JS + [path_from_root('tests', 'sockets', 'p2p', 'broker', 'p2p-broker.js')])
expected = '1'
self.run_browser(host_outfile, '.', ['/report_result?' + e for e in expected])
broker.kill();
def test_nodejs_sockets_echo(self):
# This test checks that sockets work when the client code is run in Node.js
# Run with ./runner.py sockets.test_nodejs_sockets_echo
if not NODE_JS in JS_ENGINES:
return self.skip('node is not present')
sockets_include = '-I'+path_from_root('tests', 'sockets')
harnesses = [
(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59160), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 59162), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 59164), 1)
]
harnesses = filter_harnesses(harnesses)
# Basic test of node client against both a Websockified and compiled echo server.
for harness, datagram in harnesses:
with harness:
Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, '-DREPORT_RESULT=int dummy'], stdout=PIPE, stderr=PIPE).communicate()
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
# Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified
# server because as long as the subprotocol list contains binary it will configure itself to accept binary
# This test also checks that the connect url contains the correct subprotocols.
print "\nTesting compile time WebSocket configuration.\n"
for harness in filter_harnesses([
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59166)
]):
with harness:
Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-s', 'SOCKET_DEBUG=1', '-s', 'WEBSOCKET_SUBPROTOCOL="base64, binary"', '-DSOCKK=59166', '-DREPORT_RESULT=int dummy'], stdout=PIPE, stderr=PIPE).communicate()
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained(['connect: ws://127.0.0.1:59166, base64,binary', 'connect: ws://127.0.0.1:59166/, base64,binary'], out)
# Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol.
# In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so
# the connection would fail without us specifying a valid WebSocket URL in the configuration.
print "\nTesting runtime WebSocket configuration.\n"
for harness in filter_harnesses([
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59168)
]):
with harness:
open(os.path.join(self.get_dir(), 'websocket_pre.js'), 'w').write('''
var Module = {
websocket: {
url: 'ws://localhost:59168/testA/testB',
subprotocol: 'text, base64, binary',
}
};
''')
Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '--pre-js', 'websocket_pre.js', '-s', 'SOCKET_DEBUG=1', '-DSOCKK=12345', '-DREPORT_RESULT=int dummy'], stdout=PIPE, stderr=PIPE).communicate()
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained('connect: ws://localhost:59168/testA/testB, text,base64,binary', out)
|
pipeline_ops_test.py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.pipeline_ops."""
import copy
import os
import threading
import time
from absl.testing import parameterized
from absl.testing.absltest import mock
import tensorflow as tf
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import pipeline_ops
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core import test_utils
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import status as status_lib
from ml_metadata.proto import metadata_store_pb2
def _test_pipeline(pipeline_id,
execution_mode: pipeline_pb2.Pipeline.ExecutionMode = (
pipeline_pb2.Pipeline.ASYNC)):
pipeline = pipeline_pb2.Pipeline()
pipeline.pipeline_info.id = pipeline_id
pipeline.execution_mode = execution_mode
if execution_mode == pipeline_pb2.Pipeline.SYNC:
pipeline.runtime_spec.pipeline_run_id.field_value.string_value = 'run0'
return pipeline
class PipelineOpsTest(test_utils.TfxTest, parameterized.TestCase):
def setUp(self):
super(PipelineOpsTest, self).setUp()
pipeline_root = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self.id())
# Makes sure multiple connections within a test always connect to the same
# MLMD instance.
metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db')
self._metadata_path = metadata_path
connection_config = metadata.sqlite_metadata_connection_config(
metadata_path)
connection_config.sqlite.SetInParent()
self._mlmd_connection = metadata.Metadata(
connection_config=connection_config)
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_initiate_pipeline_start(self, pipeline):
with self._mlmd_connection as m:
# Initiate a pipeline start.
with pipeline_ops.initiate_pipeline_start(m, pipeline) as pipeline_state1:
self.assertProtoPartiallyEquals(
pipeline, pipeline_state1.pipeline, ignored_fields=['runtime_spec'])
self.assertEqual(metadata_store_pb2.Execution.NEW,
pipeline_state1.get_pipeline_execution_state())
# Initiate another pipeline start.
pipeline2 = _test_pipeline('pipeline2')
with pipeline_ops.initiate_pipeline_start(m,
pipeline2) as pipeline_state2:
self.assertEqual(pipeline2, pipeline_state2.pipeline)
self.assertEqual(metadata_store_pb2.Execution.NEW,
pipeline_state2.get_pipeline_execution_state())
# Error if attempted to initiate when old one is active.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.initiate_pipeline_start(m, pipeline)
self.assertEqual(status_lib.Code.ALREADY_EXISTS,
exception_context.exception.code)
# Fine to initiate after the previous one is inactive.
with pipeline_state1:
pipeline_state1.set_pipeline_execution_state(
metadata_store_pb2.Execution.COMPLETE)
with pipeline_ops.initiate_pipeline_start(m, pipeline) as pipeline_state3:
self.assertEqual(metadata_store_pb2.Execution.NEW,
pipeline_state3.get_pipeline_execution_state())
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_stop_pipeline_non_existent_or_inactive(self, pipeline):
with self._mlmd_connection as m:
# Stop pipeline without creating one.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.stop_pipeline(m,
task_lib.PipelineUid.from_pipeline(pipeline))
self.assertEqual(status_lib.Code.NOT_FOUND,
exception_context.exception.code)
# Initiate pipeline start and mark it completed.
pipeline_ops.initiate_pipeline_start(m, pipeline)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
pipeline_state.initiate_stop(status_lib.Status(code=status_lib.Code.OK))
pipeline_state.set_pipeline_execution_state(
metadata_store_pb2.Execution.COMPLETE)
# Try to initiate stop again.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.stop_pipeline(m, pipeline_uid)
self.assertEqual(status_lib.Code.NOT_FOUND,
exception_context.exception.code)
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_stop_pipeline_wait_for_inactivation(self, pipeline):
with self._mlmd_connection as m:
pipeline_state = pipeline_ops.initiate_pipeline_start(m, pipeline)
def _inactivate(pipeline_state):
time.sleep(2.0)
with pipeline_ops._PIPELINE_OPS_LOCK:
with pipeline_state:
pipeline_state.set_pipeline_execution_state(
metadata_store_pb2.Execution.COMPLETE)
thread = threading.Thread(target=_inactivate, args=(pipeline_state,))
thread.start()
pipeline_ops.stop_pipeline(
m, task_lib.PipelineUid.from_pipeline(pipeline), timeout_secs=10.0)
thread.join()
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_stop_pipeline_wait_for_inactivation_timeout(self, pipeline):
with self._mlmd_connection as m:
pipeline_ops.initiate_pipeline_start(m, pipeline)
with self.assertRaisesRegex(
status_lib.StatusNotOkError,
'Timed out.*waiting for execution inactivation.'
) as exception_context:
pipeline_ops.stop_pipeline(
m, task_lib.PipelineUid.from_pipeline(pipeline), timeout_secs=1.0)
self.assertEqual(status_lib.Code.DEADLINE_EXCEEDED,
exception_context.exception.code)
def test_stop_node_no_active_executions(self):
pipeline = pipeline_pb2.Pipeline()
self.load_proto_from_text(
os.path.join(
os.path.dirname(__file__), 'testdata', 'async_pipeline.pbtxt'),
pipeline)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
node_uid = task_lib.NodeUid(node_id='my_trainer', pipeline_uid=pipeline_uid)
with self._mlmd_connection as m:
pstate.PipelineState.new(m, pipeline)
pipeline_ops.stop_node(m, node_uid)
pipeline_state = pstate.PipelineState.load(m, pipeline_uid)
# The node state should be STOPPING even when node is inactive to prevent
# future triggers.
with pipeline_state:
node_state = pipeline_state.get_node_state(node_uid)
self.assertEqual(status_lib.Code.CANCELLED, node_state.status.code)
self.assertEqual(pstate.NodeState.STOPPING, node_state.state)
# Restart node.
pipeline_state = pipeline_ops.initiate_node_start(m, node_uid)
with pipeline_state:
node_state = pipeline_state.get_node_state(node_uid)
self.assertEqual(pstate.NodeState.STARTING, node_state.state)
def test_stop_node_wait_for_inactivation(self):
pipeline = pipeline_pb2.Pipeline()
self.load_proto_from_text(
os.path.join(
os.path.dirname(__file__), 'testdata', 'async_pipeline.pbtxt'),
pipeline)
trainer = pipeline.nodes[2].pipeline_node
test_utils.fake_component_output(
self._mlmd_connection, trainer, active=True)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
node_uid = task_lib.NodeUid(node_id='my_trainer', pipeline_uid=pipeline_uid)
with self._mlmd_connection as m:
pstate.PipelineState.new(m, pipeline)
def _inactivate(execution):
time.sleep(2.0)
with pipeline_ops._PIPELINE_OPS_LOCK:
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
m.store.put_executions([execution])
execution = task_gen_utils.get_executions(m, trainer)[0]
thread = threading.Thread(
target=_inactivate, args=(copy.deepcopy(execution),))
thread.start()
pipeline_ops.stop_node(m, node_uid, timeout_secs=5.0)
thread.join()
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(node_uid)
self.assertEqual(pstate.NodeState.STOPPING, node_state.state)
# Restart node.
with pipeline_ops.initiate_node_start(m, node_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(node_uid)
self.assertEqual(pstate.NodeState.STARTING, node_state.state)
def test_stop_node_wait_for_inactivation_timeout(self):
pipeline = pipeline_pb2.Pipeline()
self.load_proto_from_text(
os.path.join(
os.path.dirname(__file__), 'testdata', 'async_pipeline.pbtxt'),
pipeline)
trainer = pipeline.nodes[2].pipeline_node
test_utils.fake_component_output(
self._mlmd_connection, trainer, active=True)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
node_uid = task_lib.NodeUid(node_id='my_trainer', pipeline_uid=pipeline_uid)
with self._mlmd_connection as m:
pstate.PipelineState.new(m, pipeline)
with self.assertRaisesRegex(
status_lib.StatusNotOkError,
'Timed out.*waiting for execution inactivation.'
) as exception_context:
pipeline_ops.stop_node(m, node_uid, timeout_secs=1.0)
self.assertEqual(status_lib.Code.DEADLINE_EXCEEDED,
exception_context.exception.code)
# Even if `wait_for_inactivation` times out, the node should be in state
# STOPPING or STOPPED to prevent future triggers.
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(node_uid)
self.assertIn(node_state.state,
(pstate.NodeState.STOPPING, pstate.NodeState.STOPPED))
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_orchestrate_active_pipelines(self, mock_async_task_gen,
mock_sync_task_gen):
with self._mlmd_connection as m:
# Sync and async active pipelines.
async_pipelines = [
_test_pipeline('pipeline1'),
_test_pipeline('pipeline2'),
]
sync_pipelines = [
_test_pipeline('pipeline3', pipeline_pb2.Pipeline.SYNC),
_test_pipeline('pipeline4', pipeline_pb2.Pipeline.SYNC),
]
for pipeline in async_pipelines + sync_pipelines:
pipeline_ops.initiate_pipeline_start(m, pipeline)
# Active executions for active async pipelines.
mock_async_task_gen.return_value.generate.side_effect = [
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
async_pipelines[0]),
node_id='Transform'))
],
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
async_pipelines[1]),
node_id='Trainer'))
],
]
# Active executions for active sync pipelines.
mock_sync_task_gen.return_value.generate.side_effect = [
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
sync_pipelines[0]),
node_id='Trainer'))
],
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
sync_pipelines[1]),
node_id='Validator'))
],
]
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue,
service_jobs.DummyServiceJobManager())
self.assertEqual(2, mock_async_task_gen.return_value.generate.call_count)
self.assertEqual(2, mock_sync_task_gen.return_value.generate.call_count)
# Verify that tasks are enqueued in the expected order.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline1', 'Transform'), task.node_uid)
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline2', 'Trainer'), task.node_uid)
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline3', 'Trainer'), task.node_uid)
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline4', 'Validator'), task.node_uid)
self.assertTrue(task_queue.is_empty())
@parameterized.parameters(
_test_pipeline('pipeline1'),
_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC))
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
@mock.patch.object(task_gen_utils, 'generate_task_from_active_execution')
def test_stop_initiated_pipelines(self, pipeline, mock_gen_task_from_active,
mock_async_task_gen, mock_sync_task_gen):
with self._mlmd_connection as m:
pipeline.nodes.add().pipeline_node.node_info.id = 'ExampleGen'
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline.nodes.add().pipeline_node.node_info.id = 'Evaluator'
mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
mock_service_job_manager.is_pure_service_node.side_effect = (
lambda _, node_id: node_id == 'ExampleGen')
mock_service_job_manager.is_mixed_service_node.side_effect = (
lambda _, node_id: node_id == 'Transform')
pipeline_ops.initiate_pipeline_start(m, pipeline)
with pstate.PipelineState.load(
m, task_lib.PipelineUid.from_pipeline(pipeline)) as pipeline_state:
pipeline_state.initiate_stop(
status_lib.Status(code=status_lib.Code.CANCELLED))
pipeline_execution_id = pipeline_state.execution_id
task_queue = tq.TaskQueue()
# For the stop-initiated pipeline, "Transform" execution task is in queue,
# "Trainer" has an active execution in MLMD but no task in queue,
# "Evaluator" has no active execution.
task_queue.enqueue(
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(pipeline),
node_id='Transform')))
transform_task = task_queue.dequeue() # simulates task being processed
mock_gen_task_from_active.side_effect = [
test_utils.create_exec_node_task(
node_uid=task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(pipeline),
node_id='Trainer'),
is_cancelled=True), None, None, None, None
]
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
# There are no active pipelines so these shouldn't be called.
mock_async_task_gen.assert_not_called()
mock_sync_task_gen.assert_not_called()
# stop_node_services should be called for ExampleGen which is a pure
# service node.
mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'ExampleGen')
mock_service_job_manager.reset_mock()
task_queue.task_done(transform_task) # Pop out transform task.
# CancelNodeTask for the "Transform" ExecNodeTask should be next.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_cancel_node_task(task))
self.assertEqual('Transform', task.node_uid.node_id)
# ExecNodeTask (with is_cancelled=True) for "Trainer" is next.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual('Trainer', task.node_uid.node_id)
self.assertTrue(task.is_cancelled)
self.assertTrue(task_queue.is_empty())
mock_gen_task_from_active.assert_has_calls([
mock.call(
m,
pipeline_state.pipeline,
pipeline.nodes[2].pipeline_node,
mock.ANY,
is_cancelled=True),
mock.call(
m,
pipeline_state.pipeline,
pipeline.nodes[3].pipeline_node,
mock.ANY,
is_cancelled=True)
])
self.assertEqual(2, mock_gen_task_from_active.call_count)
# Pipeline execution should continue to be active since active node
# executions were found in the last call to `orchestrate`.
[execution] = m.store.get_executions_by_id([pipeline_execution_id])
self.assertTrue(execution_lib.is_execution_active(execution))
# Call `orchestrate` again; this time there are no more active node
# executions so the pipeline should be marked as cancelled.
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
self.assertTrue(task_queue.is_empty())
[execution] = m.store.get_executions_by_id([pipeline_execution_id])
self.assertEqual(metadata_store_pb2.Execution.CANCELED,
execution.last_known_state)
# stop_node_services should be called on both ExampleGen and Transform
# which are service nodes.
mock_service_job_manager.stop_node_services.assert_has_calls(
[mock.call(mock.ANY, 'ExampleGen'),
mock.call(mock.ANY, 'Transform')],
any_order=True)
@parameterized.parameters(
_test_pipeline('pipeline1'),
_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC))
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
@mock.patch.object(task_gen_utils, 'generate_task_from_active_execution')
def test_active_pipelines_with_stopped_nodes(self, pipeline,
mock_gen_task_from_active,
mock_async_task_gen,
mock_sync_task_gen):
if pipeline.execution_mode == pipeline_pb2.Pipeline.SYNC:
mock_task_gen = mock_sync_task_gen
else:
mock_task_gen = mock_async_task_gen
with self._mlmd_connection as m:
pipeline.nodes.add().pipeline_node.node_info.id = 'ExampleGen'
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline.nodes.add().pipeline_node.node_info.id = 'Evaluator'
mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
mock_service_job_manager.is_pure_service_node.side_effect = (
lambda _, node_id: node_id == 'ExampleGen')
example_gen_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[0].pipeline_node)
transform_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[1].pipeline_node)
transform_task = test_utils.create_exec_node_task(
node_uid=transform_node_uid)
trainer_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[2].pipeline_node)
trainer_task = test_utils.create_exec_node_task(node_uid=trainer_node_uid)
evaluator_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[3].pipeline_node)
evaluator_task = test_utils.create_exec_node_task(
node_uid=evaluator_node_uid)
cancelled_evaluator_task = test_utils.create_exec_node_task(
node_uid=evaluator_node_uid, is_cancelled=True)
pipeline_ops.initiate_pipeline_start(m, pipeline)
with pstate.PipelineState.load(
m, task_lib.PipelineUid.from_pipeline(pipeline)) as pipeline_state:
# Stop example-gen, trainer and evaluator.
with pipeline_state.node_state_update_context(
example_gen_node_uid) as node_state:
node_state.update(pstate.NodeState.STOPPING,
status_lib.Status(code=status_lib.Code.CANCELLED))
with pipeline_state.node_state_update_context(
trainer_node_uid) as node_state:
node_state.update(pstate.NodeState.STOPPING,
status_lib.Status(code=status_lib.Code.CANCELLED))
with pipeline_state.node_state_update_context(
evaluator_node_uid) as node_state:
node_state.update(pstate.NodeState.STOPPING,
status_lib.Status(code=status_lib.Code.ABORTED))
task_queue = tq.TaskQueue()
# Simulate a new transform execution being triggered.
mock_task_gen.return_value.generate.return_value = [transform_task]
# Simulate ExecNodeTask for trainer already present in the task queue.
task_queue.enqueue(trainer_task)
# Simulate Evaluator having an active execution in MLMD.
mock_gen_task_from_active.side_effect = [evaluator_task]
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
self.assertEqual(1, mock_task_gen.return_value.generate.call_count)
# stop_node_services should be called on example-gen which is a pure
# service node.
mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'ExampleGen')
# Verify that tasks are enqueued in the expected order:
# Pre-existing trainer task.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertEqual(trainer_task, task)
# CancelNodeTask for trainer.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_cancel_node_task(task))
self.assertEqual(trainer_node_uid, task.node_uid)
# ExecNodeTask with is_cancelled=True for evaluator.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(cancelled_evaluator_task, task)
# ExecNodeTask for newly triggered transform node.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertEqual(transform_task, task)
# No more tasks.
self.assertTrue(task_queue.is_empty())
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
def test_handling_finalize_pipeline_task(self, task_gen):
with self._mlmd_connection as m:
pipeline = _test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)
pipeline_ops.initiate_pipeline_start(m, pipeline)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
finalize_reason = status_lib.Status(
code=status_lib.Code.ABORTED, message='foo bar')
task_gen.return_value.generate.side_effect = [
[
task_lib.FinalizePipelineTask(
pipeline_uid=pipeline_uid, status=finalize_reason)
],
]
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue,
service_jobs.DummyServiceJobManager())
task_gen.return_value.generate.assert_called_once()
self.assertTrue(task_queue.is_empty())
# Load pipeline state and verify stop initiation.
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
self.assertEqual(finalize_reason,
pipeline_state.stop_initiated_reason())
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_handling_finalize_node_task(self, task_gen):
with self._mlmd_connection as m:
pipeline = _test_pipeline('pipeline1')
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline_ops.initiate_pipeline_start(m, pipeline)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
finalize_reason = status_lib.Status(
code=status_lib.Code.ABORTED, message='foo bar')
transform_node_uid = task_lib.NodeUid(
pipeline_uid=pipeline_uid, node_id='Transform')
trainer_node_uid = task_lib.NodeUid(
pipeline_uid=pipeline_uid, node_id='Trainer')
task_gen.return_value.generate.side_effect = [
[
test_utils.create_exec_node_task(transform_node_uid),
task_lib.FinalizeNodeTask(
node_uid=trainer_node_uid, status=finalize_reason)
],
]
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue,
service_jobs.DummyServiceJobManager())
task_gen.return_value.generate.assert_called_once()
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(transform_node_uid, task.node_uid)
# Load pipeline state and verify trainer node state.
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(trainer_node_uid)
self.assertEqual(pstate.NodeState.STOPPING, node_state.state)
self.assertEqual(finalize_reason, node_state.status)
def test_to_status_not_ok_error_decorator(self):
@pipeline_ops._to_status_not_ok_error
def fn1():
raise RuntimeError('test error 1')
@pipeline_ops._to_status_not_ok_error
def fn2():
raise status_lib.StatusNotOkError(
code=status_lib.Code.ALREADY_EXISTS, message='test error 2')
with self.assertRaisesRegex(status_lib.StatusNotOkError,
'test error 1') as ctxt:
fn1()
self.assertEqual(status_lib.Code.UNKNOWN, ctxt.exception.code)
with self.assertRaisesRegex(status_lib.StatusNotOkError,
'test error 2') as ctxt:
fn2()
self.assertEqual(status_lib.Code.ALREADY_EXISTS, ctxt.exception.code)
@parameterized.parameters(
_test_pipeline('pipeline1'),
_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC))
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_executor_node_stop_then_start_flow(self, pipeline,
mock_async_task_gen,
mock_sync_task_gen):
service_job_manager = service_jobs.DummyServiceJobManager()
with self._mlmd_connection as m:
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
trainer_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[0].pipeline_node)
# Start pipeline and stop trainer.
pipeline_ops.initiate_pipeline_start(m, pipeline)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
with pipeline_state.node_state_update_context(
trainer_node_uid) as node_state:
node_state.update(pstate.NodeState.STOPPING,
status_lib.Status(code=status_lib.Code.CANCELLED))
task_queue = tq.TaskQueue()
# Simulate ExecNodeTask for trainer already present in the task queue.
trainer_task = test_utils.create_exec_node_task(node_uid=trainer_node_uid)
task_queue.enqueue(trainer_task)
pipeline_ops.orchestrate(m, task_queue, service_job_manager)
# Dequeue pre-existing trainer task.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertEqual(trainer_task, task)
# Dequeue CancelNodeTask for trainer.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_cancel_node_task(task))
self.assertEqual(trainer_node_uid, task.node_uid)
self.assertTrue(task_queue.is_empty())
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(trainer_node_uid)
self.assertEqual(pstate.NodeState.STOPPING, node_state.state)
self.assertEqual(status_lib.Code.CANCELLED, node_state.status.code)
pipeline_ops.orchestrate(m, task_queue, service_job_manager)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(trainer_node_uid)
self.assertEqual(pstate.NodeState.STOPPED, node_state.state)
self.assertEqual(status_lib.Code.CANCELLED, node_state.status.code)
pipeline_ops.initiate_node_start(m, trainer_node_uid)
pipeline_ops.orchestrate(m, task_queue, service_job_manager)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(trainer_node_uid)
self.assertEqual(pstate.NodeState.STARTED, node_state.state)
@parameterized.parameters(
_test_pipeline('pipeline1'),
_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC))
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_pure_service_node_stop_then_start_flow(self, pipeline,
mock_async_task_gen,
mock_sync_task_gen):
with self._mlmd_connection as m:
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
pipeline.nodes.add().pipeline_node.node_info.id = 'ExampleGen'
mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
mock_service_job_manager.is_pure_service_node.return_value = True
example_gen_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[0].pipeline_node)
pipeline_ops.initiate_pipeline_start(m, pipeline)
with pstate.PipelineState.load(
m, task_lib.PipelineUid.from_pipeline(pipeline)) as pipeline_state:
with pipeline_state.node_state_update_context(
example_gen_node_uid) as node_state:
node_state.update(pstate.NodeState.STOPPING,
status_lib.Status(code=status_lib.Code.CANCELLED))
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
# stop_node_services should be called for ExampleGen which is a pure
# service node.
mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'ExampleGen')
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(example_gen_node_uid)
self.assertEqual(pstate.NodeState.STOPPED, node_state.state)
self.assertEqual(status_lib.Code.CANCELLED, node_state.status.code)
pipeline_ops.initiate_node_start(m, example_gen_node_uid)
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(example_gen_node_uid)
self.assertEqual(pstate.NodeState.STARTED, node_state.state)
@parameterized.parameters(
_test_pipeline('pipeline1'),
_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC))
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_mixed_service_node_stop_then_start_flow(self, pipeline,
mock_async_task_gen,
mock_sync_task_gen):
with self._mlmd_connection as m:
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
mock_service_job_manager.is_pure_service_node.return_value = False
mock_service_job_manager.is_mixed_service_node.return_value = True
transform_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[0].pipeline_node)
pipeline_ops.initiate_pipeline_start(m, pipeline)
with pstate.PipelineState.load(
m, task_lib.PipelineUid.from_pipeline(pipeline)) as pipeline_state:
# Stop Transform.
with pipeline_state.node_state_update_context(
transform_node_uid) as node_state:
node_state.update(pstate.NodeState.STOPPING,
status_lib.Status(code=status_lib.Code.CANCELLED))
task_queue = tq.TaskQueue()
# Simulate ExecNodeTask for Transform already present in the task queue.
transform_task = test_utils.create_exec_node_task(
node_uid=transform_node_uid)
task_queue.enqueue(transform_task)
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
# stop_node_services should not be called as there was an active
# ExecNodeTask for Transform which is a mixed service node.
mock_service_job_manager.stop_node_services.assert_not_called()
# Dequeue pre-existing transform task.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertEqual(transform_task, task)
# Dequeue CancelNodeTask for transform.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_cancel_node_task(task))
self.assertEqual(transform_node_uid, task.node_uid)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(transform_node_uid)
self.assertEqual(pstate.NodeState.STOPPING, node_state.state)
self.assertEqual(status_lib.Code.CANCELLED, node_state.status.code)
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
# stop_node_services should be called for Transform which is a mixed
# service node and corresponding ExecNodeTask has been dequeued.
mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'Transform')
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(transform_node_uid)
self.assertEqual(pstate.NodeState.STOPPED, node_state.state)
self.assertEqual(status_lib.Code.CANCELLED, node_state.status.code)
pipeline_ops.initiate_node_start(m, transform_node_uid)
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(transform_node_uid)
self.assertEqual(pstate.NodeState.STARTED, node_state.state)
if __name__ == '__main__':
tf.test.main()
|
connection_pool.py | import redis
import threading
import time
import unittest
class ConnectionPoolTestCase(unittest.TestCase):
def test_multiple_connections(self):
# 2 clients to the same host/port/db/pool should use the same connection
pool = redis.ConnectionPool()
r1 = redis.Redis(host='localhost', port=6379, db=9, connection_pool=pool)
r2 = redis.Redis(host='localhost', port=6379, db=9, connection_pool=pool)
self.assertEquals(r1.connection, r2.connection)
# if one of them switches, they should have
# separate conncetion objects
r2.select(db=10, host='localhost', port=6379)
self.assertNotEqual(r1.connection, r2.connection)
conns = [r1.connection, r2.connection]
conns.sort()
# but returning to the original state shares the object again
r2.select(db=9, host='localhost', port=6379)
self.assertEquals(r1.connection, r2.connection)
# the connection manager should still have just 2 connections
mgr_conns = pool.get_all_connections()
mgr_conns.sort()
self.assertEquals(conns, mgr_conns)
def test_threaded_workers(self):
r = redis.Redis(host='localhost', port=6379, db=9)
r.set('a', 'foo')
r.set('b', 'bar')
def _info_worker():
for i in range(50):
_ = r.info()
time.sleep(0.01)
def _keys_worker():
for i in range(50):
_ = r.keys()
time.sleep(0.01)
t1 = threading.Thread(target=_info_worker)
t2 = threading.Thread(target=_keys_worker)
t1.start()
t2.start()
for i in [t1, t2]:
i.join()
|
delete_all_infection_entities.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script will delete all entitities of the infection kind
"""
from google.cloud import datastore
import itertools
from concurrent.futures import ThreadPoolExecutor
from queue import Queue
from threading import Thread
client = datastore.Client()
def producer(futures_queue):
query = client.query(kind="infection")
all_infection_entity_keys = (entity.key for entity in query.fetch())
with ThreadPoolExecutor(max_workers=8) as executor:
batch = []
for key in all_infection_entity_keys:
batch.append(key)
if len(batch) >= 500:
delete_entities_batch(batch)
future = executor.submit(delete_entities_batch, batch)
futures_queue.put(future)
batch = []
future = executor.submit(delete_entities_batch, batch)
futures_queue.put(future)
def consumer(futures_queue):
client = datastore.Client()
query = client.query(kind="infection")
all_infection_entity_keys = (entity.key for entity in query.fetch())
total_deleted = 0
while not futures_queue.empty():
future = futures_queue.get()
result = future.result()
total_deleted += result
print(f"Deleted {result} keys. Total Deleted: {total_deleted}")
def delete_entities_batch(batch):
client.delete_multi(batch)
return len(batch)
if __name__ == "__main__":
# Create the shared queue and launch both threads
q = Queue()
t1 = Thread(target=consumer, args=(q,))
t2 = Thread(target=producer, args=(q,))
t1.start()
t2.start()
|
postprocesssgns.py | import numpy as np
import os, sys
from multiprocessing import Queue, Process
from argparse import ArgumentParser
tpath = os.path.dirname(os.path.realpath(__file__))
VIZ_DIR=os.path.join(tpath, "web")
ROOT_DIR=tpath
tpath = os.path.abspath(os.path.join(tpath, "../"))
sys.path.append(tpath)
os.chdir(tpath)
from ioutils import load_pickle, write_pickle
def worker(proc_num, queue, dir, count_dir, min_count):
while True:
if queue.empty():
break
year = queue.get()
print "Loading data...", year
print "Path..", "sgns/" + count_dir + str(year) + "-counts.pkl"
# time.sleep(120 * random.random())
freqs = load_pickle("sgns/" + count_dir + str(year) + "-counts.pkl")
iw = []
with open("sgns/" + dir + str(year) + "/" + str(year) + ".sgns.words.txt") as fp: # seems to correspond to X.counts.words.vocab
info = fp.readline().split()
vocab_size = int(info[0])
dim = int(info[1])
w_mat = np.zeros((vocab_size, dim))
for i, line in enumerate(fp):
line = line.strip().split()
iw.append(line[0].decode("utf-8"))
if freqs[iw[-1]] >= 500:
w_mat[i,:] = np.array(map(float, line[1:]))
c_mat = np.zeros((vocab_size, dim))
with open("sgns/" + dir + str(year) + "/" + str(year) + ".sgns.contexts.txt") as fp: # seems to correspond to X.counts.contexts.vocab >> TEST IN "GROUP"
fp.readline()
for i, line in enumerate(fp):
line = line.strip().split()
if freqs[line[0]] >= min_count:
c_mat[i,:] = np.array(map(float, line[1:]))
np.save("sgns/" + dir + str(year) + "/" + str(year) + "-w.npy", w_mat)
np.save("sgns/" + dir + str(year) + "/" + str(year) + "-c.npy", c_mat)
write_pickle(iw, "sgns/" + dir + str(year) + "/" + str(year) + "-vocab.pkl")
if __name__ == "__main__":
parser = ArgumentParser("Post-processes SGNS vectors to easier-to-use format. Removes infrequent words.")
parser.add_argument("dir")
parser.add_argument("count_dir", help="Directory with count data.")
parser.add_argument("--workers", type=int, help="Number of processes to spawn", default=20)
parser.add_argument("--start-year", type=int, default=1860) #
parser.add_argument("--end-year", type=int, default=2000)
parser.add_argument("--year-inc", type=int, default=10) #
parser.add_argument("--min-count", type=int, default=300) #
args = parser.parse_args()
years = range(args.start_year, args.end_year + 1, args.year_inc)
queue = Queue()
for year in years:
queue.put(year)
procs = [Process(target=worker, args=[i, queue, args.dir, args.count_dir, args.min_count]) for i in range(args.workers)]
for p in procs:
p.start()
for p in procs:
p.join()
|
API_checkusernames.py | from bs4 import BeautifulSoup
import requests
from threading import Thread
def thread_search(service, username, results, i):
while True:
try:
req = requests.get('http://checkusernames.com/usercheckv2.php?target=' + service + '&username=' + username, headers={'X-Requested-With': 'XMLHttpRequest'})
if (req.content.split('|')[0] == "2"): # found
results[i] = service
break
except Exception, e:
# print e
pass
class CheckUsernames(object):
def __init__(self):
self.services = []
req = requests.get('http://checkusernames.com/')
soup = BeautifulSoup(req.content)
services = soup.findAll('li', attrs={'class': 'socialdeets'})
for service in services:
self.services.append(service['id'].replace(' ', ''))
def search(self, username):
threads = [None] * len(self.services)
results = [None] * len(self.services)
for i in range(len(self.services)):
threads[i] = Thread(target=thread_search, args=(self.services[i], username, results, i))
threads[i].start()
# do some other stuff
for i in range(len(threads)):
threads[i].join()
return [x for x in results if x is not None] |
utils.py | # coding=utf-8
"""Shared utility functions"""
import argparse
import collections
import functools
import glob
import inspect
import itertools
import os
import re
import subprocess
import sys
import threading
import unicodedata
from enum import (
Enum,
)
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
TextIO,
Type,
TypeVar,
Union,
cast,
)
from . import (
constants,
)
from .argparse_custom import (
ChoicesProviderFunc,
CompleterFunc,
)
if TYPE_CHECKING: # pragma: no cover
import cmd2 # noqa: F401
PopenTextIO = subprocess.Popen[bytes]
else:
PopenTextIO = subprocess.Popen
_T = TypeVar('_T')
def is_quoted(arg: str) -> bool:
"""
Checks if a string is quoted
:param arg: the string being checked for quotes
:return: True if a string is quoted
"""
return len(arg) > 1 and arg[0] == arg[-1] and arg[0] in constants.QUOTES
def quote_string(arg: str) -> str:
"""Quote a string"""
if '"' in arg:
quote = "'"
else:
quote = '"'
return quote + arg + quote
def quote_string_if_needed(arg: str) -> str:
"""Quote a string if it contains spaces and isn't already quoted"""
if is_quoted(arg) or ' ' not in arg:
return arg
return quote_string(arg)
def strip_quotes(arg: str) -> str:
"""Strip outer quotes from a string.
Applies to both single and double quotes.
:param arg: string to strip outer quotes from
:return: same string with potentially outer quotes stripped
"""
if is_quoted(arg):
arg = arg[1:-1]
return arg
def str_to_bool(val: str) -> bool:
"""Converts a string to a boolean based on its value.
:param val: string being converted
:return: boolean value expressed in the string
:raises: ValueError if the string does not contain a value corresponding to a boolean value
"""
if isinstance(val, str):
if val.capitalize() == str(True):
return True
elif val.capitalize() == str(False):
return False
raise ValueError("must be True or False (case-insensitive)")
class Settable:
"""Used to configure an attribute to be settable via the set command in the CLI"""
def __init__(
self,
name: str,
val_type: Union[Type[Any], Callable[[Any], Any]],
description: str,
settable_object: object,
*,
settable_attrib_name: Optional[str] = None,
onchange_cb: Optional[Callable[[str, _T, _T], Any]] = None,
choices: Optional[Iterable[Any]] = None,
choices_provider: Optional[ChoicesProviderFunc] = None,
completer: Optional[CompleterFunc] = None,
) -> None:
"""
Settable Initializer
:param name: name of the instance attribute being made settable
:param val_type: callable used to cast the string value from the command line into its proper type and
even validate its value. Setting this to bool provides tab completion for true/false and
validation using str_to_bool(). The val_type function should raise an exception if it fails.
This exception will be caught and printed by Cmd.do_set().
:param description: string describing this setting
:param settable_object: object to which the instance attribute belongs (e.g. self)
:param settable_attrib_name: name which displays to the user in the output of the set command.
Defaults to `name` if not specified.
:param onchange_cb: optional function or method to call when the value of this settable is altered
by the set command. (e.g. onchange_cb=self.debug_changed)
Cmd.do_set() passes the following 3 arguments to onchange_cb:
param_name: str - name of the changed parameter
old_value: Any - the value before being changed
new_value: Any - the value after being changed
The following optional settings provide tab completion for a parameter's values. They correspond to the
same settings in argparse-based tab completion. A maximum of one of these should be provided.
:param choices: iterable of accepted values
:param choices_provider: function that provides choices for this argument
:param completer: tab completion function that provides choices for this argument
"""
if val_type == bool:
def get_bool_choices(_) -> List[str]: # type: ignore[no-untyped-def]
"""Used to tab complete lowercase boolean values"""
return ['true', 'false']
val_type = str_to_bool
choices_provider = cast(ChoicesProviderFunc, get_bool_choices)
self.name = name
self.val_type = val_type
self.description = description
self.settable_obj = settable_object
self.settable_attrib_name = settable_attrib_name if settable_attrib_name is not None else name
self.onchange_cb = onchange_cb
self.choices = choices
self.choices_provider = choices_provider
self.completer = completer
def get_value(self) -> Any:
"""
Get the value of the settable attribute
:return:
"""
return getattr(self.settable_obj, self.settable_attrib_name)
def set_value(self, value: Any) -> Any:
"""
Set the settable attribute on the specified destination object
:param value: New value to set
:return: New value that the attribute was set to
"""
# Run the value through its type function to handle any conversion or validation
new_value = self.val_type(value)
# Make sure new_value is a valid choice
if self.choices is not None and new_value not in self.choices:
choices_str = ', '.join(map(repr, self.choices))
raise ValueError(f"invalid choice: {new_value!r} (choose from {choices_str})")
# Try to update the settable's value
orig_value = self.get_value()
setattr(self.settable_obj, self.settable_attrib_name, new_value)
# Check if we need to call an onchange callback
if orig_value != new_value and self.onchange_cb:
self.onchange_cb(self.name, orig_value, new_value)
return new_value
def is_text_file(file_path: str) -> bool:
"""Returns if a file contains only ASCII or UTF-8 encoded text and isn't empty.
:param file_path: path to the file being checked
:return: True if the file is a non-empty text file, otherwise False
:raises OSError if file can't be read
"""
import codecs
expanded_path = os.path.abspath(os.path.expanduser(file_path.strip()))
valid_text_file = False
# Only need to check for utf-8 compliance since that covers ASCII, too
try:
with codecs.open(expanded_path, encoding='utf-8', errors='strict') as f:
# Make sure the file has only utf-8 text and is not empty
if sum(1 for _ in f) > 0:
valid_text_file = True
except OSError:
raise
except UnicodeDecodeError:
# Not UTF-8
pass
return valid_text_file
def remove_duplicates(list_to_prune: List[_T]) -> List[_T]:
"""Removes duplicates from a list while preserving order of the items.
:param list_to_prune: the list being pruned of duplicates
:return: The pruned list
"""
temp_dict: collections.OrderedDict[_T, Any] = collections.OrderedDict()
for item in list_to_prune:
temp_dict[item] = None
return list(temp_dict.keys())
def norm_fold(astr: str) -> str:
"""Normalize and casefold Unicode strings for saner comparisons.
:param astr: input unicode string
:return: a normalized and case-folded version of the input string
"""
return unicodedata.normalize('NFC', astr).casefold()
def alphabetical_sort(list_to_sort: Iterable[str]) -> List[str]:
"""Sorts a list of strings alphabetically.
For example: ['a1', 'A11', 'A2', 'a22', 'a3']
To sort a list in place, don't call this method, which makes a copy. Instead, do this:
my_list.sort(key=norm_fold)
:param list_to_sort: the list being sorted
:return: the sorted list
"""
return sorted(list_to_sort, key=norm_fold)
def try_int_or_force_to_lower_case(input_str: str) -> Union[int, str]:
"""
Tries to convert the passed-in string to an integer. If that fails, it converts it to lower case using norm_fold.
:param input_str: string to convert
:return: the string as an integer or a lower case version of the string
"""
try:
return int(input_str)
except ValueError:
return norm_fold(input_str)
def natural_keys(input_str: str) -> List[Union[int, str]]:
"""
Converts a string into a list of integers and strings to support natural sorting (see natural_sort).
For example: natural_keys('abc123def') -> ['abc', '123', 'def']
:param input_str: string to convert
:return: list of strings and integers
"""
return [try_int_or_force_to_lower_case(substr) for substr in re.split(r'(\d+)', input_str)]
def natural_sort(list_to_sort: Iterable[str]) -> List[str]:
"""
Sorts a list of strings case insensitively as well as numerically.
For example: ['a1', 'A2', 'a3', 'A11', 'a22']
To sort a list in place, don't call this method, which makes a copy. Instead, do this:
my_list.sort(key=natural_keys)
:param list_to_sort: the list being sorted
:return: the list sorted naturally
"""
return sorted(list_to_sort, key=natural_keys)
def quote_specific_tokens(tokens: List[str], tokens_to_quote: List[str]) -> None:
"""
Quote specific tokens in a list
:param tokens: token list being edited
:param tokens_to_quote: the tokens, which if present in tokens, to quote
"""
for i, token in enumerate(tokens):
if token in tokens_to_quote:
tokens[i] = quote_string(token)
def unquote_specific_tokens(tokens: List[str], tokens_to_unquote: List[str]) -> None:
"""
Unquote specific tokens in a list
:param tokens: token list being edited
:param tokens_to_unquote: the tokens, which if present in tokens, to unquote
"""
for i, token in enumerate(tokens):
unquoted_token = strip_quotes(token)
if unquoted_token in tokens_to_unquote:
tokens[i] = unquoted_token
def expand_user(token: str) -> str:
"""
Wrap os.expanduser() to support expanding ~ in quoted strings
:param token: the string to expand
"""
if token:
if is_quoted(token):
quote_char = token[0]
token = strip_quotes(token)
else:
quote_char = ''
token = os.path.expanduser(token)
# Restore the quotes even if not needed to preserve what the user typed
if quote_char:
token = quote_char + token + quote_char
return token
def expand_user_in_tokens(tokens: List[str]) -> None:
"""
Call expand_user() on all tokens in a list of strings
:param tokens: tokens to expand
"""
for index, _ in enumerate(tokens):
tokens[index] = expand_user(tokens[index])
def find_editor() -> Optional[str]:
"""
Used to set cmd2.Cmd.DEFAULT_EDITOR. If EDITOR env variable is set, that will be used.
Otherwise the function will look for a known editor in directories specified by PATH env variable.
:return: Default editor or None
"""
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform[:3] == 'win':
editors = ['code.cmd', 'notepad++.exe', 'notepad.exe']
else:
editors = ['vim', 'vi', 'emacs', 'nano', 'pico', 'joe', 'code', 'subl', 'atom', 'gedit', 'geany', 'kate']
# Get a list of every directory in the PATH environment variable and ignore symbolic links
env_path = os.getenv('PATH')
if env_path is None:
paths = []
else:
paths = [p for p in env_path.split(os.path.pathsep) if not os.path.islink(p)]
for editor, path in itertools.product(editors, paths):
editor_path = os.path.join(path, editor)
if os.path.isfile(editor_path) and os.access(editor_path, os.X_OK):
if sys.platform[:3] == 'win':
# Remove extension from Windows file names
editor = os.path.splitext(editor)[0]
break
else:
editor = None
return editor
def files_from_glob_pattern(pattern: str, access: int = os.F_OK) -> List[str]:
"""Return a list of file paths based on a glob pattern.
Only files are returned, not directories, and optionally only files for which the user has a specified access to.
:param pattern: file name or glob pattern
:param access: file access type to verify (os.* where * is F_OK, R_OK, W_OK, or X_OK)
:return: list of files matching the name or glob pattern
"""
return [f for f in glob.glob(pattern) if os.path.isfile(f) and os.access(f, access)]
def files_from_glob_patterns(patterns: List[str], access: int = os.F_OK) -> List[str]:
"""Return a list of file paths based on a list of glob patterns.
Only files are returned, not directories, and optionally only files for which the user has a specified access to.
:param patterns: list of file names and/or glob patterns
:param access: file access type to verify (os.* where * is F_OK, R_OK, W_OK, or X_OK)
:return: list of files matching the names and/or glob patterns
"""
files = []
for pattern in patterns:
matches = files_from_glob_pattern(pattern, access=access)
files.extend(matches)
return files
def get_exes_in_path(starts_with: str) -> List[str]:
"""Returns names of executables in a user's path
:param starts_with: what the exes should start with. leave blank for all exes in path.
:return: a list of matching exe names
"""
# Purposely don't match any executable containing wildcards
wildcards = ['*', '?']
for wildcard in wildcards:
if wildcard in starts_with:
return []
# Get a list of every directory in the PATH environment variable and ignore symbolic links
env_path = os.getenv('PATH')
if env_path is None:
paths = []
else:
paths = [p for p in env_path.split(os.path.pathsep) if not os.path.islink(p)]
# Use a set to store exe names since there can be duplicates
exes_set = set()
# Find every executable file in the user's path that matches the pattern
for path in paths:
full_path = os.path.join(path, starts_with)
matches = files_from_glob_pattern(full_path + '*', access=os.X_OK)
for match in matches:
exes_set.add(os.path.basename(match))
return list(exes_set)
class StdSim:
"""
Class to simulate behavior of sys.stdout or sys.stderr.
Stores contents in internal buffer and optionally echos to the inner stream it is simulating.
"""
def __init__(
self,
inner_stream: Union[TextIO, 'StdSim'],
*,
echo: bool = False,
encoding: str = 'utf-8',
errors: str = 'replace',
) -> None:
"""
StdSim Initializer
:param inner_stream: the wrapped stream. Should be a TextIO or StdSim instance.
:param echo: if True, then all input will be echoed to inner_stream
:param encoding: codec for encoding/decoding strings (defaults to utf-8)
:param errors: how to handle encoding/decoding errors (defaults to replace)
"""
self.inner_stream = inner_stream
self.echo = echo
self.encoding = encoding
self.errors = errors
self.pause_storage = False
self.buffer = ByteBuf(self)
def write(self, s: str) -> None:
"""
Add str to internal bytes buffer and if echo is True, echo contents to inner stream
:param s: String to write to the stream
"""
if not isinstance(s, str):
raise TypeError(f'write() argument must be str, not {type(s)}')
if not self.pause_storage:
self.buffer.byte_buf += s.encode(encoding=self.encoding, errors=self.errors)
if self.echo:
self.inner_stream.write(s)
def getvalue(self) -> str:
"""Get the internal contents as a str"""
return self.buffer.byte_buf.decode(encoding=self.encoding, errors=self.errors)
def getbytes(self) -> bytes:
"""Get the internal contents as bytes"""
return bytes(self.buffer.byte_buf)
def read(self, size: Optional[int] = -1) -> str:
"""
Read from the internal contents as a str and then clear them out
:param size: Number of bytes to read from the stream
"""
if size is None or size == -1:
result = self.getvalue()
self.clear()
else:
result = self.buffer.byte_buf[:size].decode(encoding=self.encoding, errors=self.errors)
self.buffer.byte_buf = self.buffer.byte_buf[size:]
return result
def readbytes(self) -> bytes:
"""Read from the internal contents as bytes and then clear them out"""
result = self.getbytes()
self.clear()
return result
def clear(self) -> None:
"""Clear the internal contents"""
self.buffer.byte_buf.clear()
def isatty(self) -> bool:
"""StdSim only considered an interactive stream if `echo` is True and `inner_stream` is a tty."""
if self.echo:
return self.inner_stream.isatty()
else:
return False
@property
def line_buffering(self) -> bool:
"""
Handle when the inner stream doesn't have a line_buffering attribute which is the case
when running unit tests because pytest sets stdout to a pytest EncodedFile object.
"""
try:
return bool(self.inner_stream.line_buffering)
except AttributeError:
return False
def __getattr__(self, item: str) -> Any:
if item in self.__dict__:
return self.__dict__[item]
else:
return getattr(self.inner_stream, item)
class ByteBuf:
"""
Used by StdSim to write binary data and stores the actual bytes written
"""
# Used to know when to flush the StdSim
NEWLINES = [b'\n', b'\r']
def __init__(self, std_sim_instance: StdSim) -> None:
self.byte_buf = bytearray()
self.std_sim_instance = std_sim_instance
def write(self, b: bytes) -> None:
"""Add bytes to internal bytes buffer and if echo is True, echo contents to inner stream."""
if not isinstance(b, bytes):
raise TypeError(f'a bytes-like object is required, not {type(b)}')
if not self.std_sim_instance.pause_storage:
self.byte_buf += b
if self.std_sim_instance.echo:
self.std_sim_instance.inner_stream.buffer.write(b)
# Since StdSim wraps TextIO streams, we will flush the stream if line buffering is on
# and the bytes being written contain a new line character. This is helpful when StdSim
# is being used to capture output of a shell command because it causes the output to print
# to the screen more often than if we waited for the stream to flush its buffer.
if self.std_sim_instance.line_buffering:
if any(newline in b for newline in ByteBuf.NEWLINES):
self.std_sim_instance.flush()
class ProcReader:
"""
Used to capture stdout and stderr from a Popen process if any of those were set to subprocess.PIPE.
If neither are pipes, then the process will run normally and no output will be captured.
"""
def __init__(self, proc: PopenTextIO, stdout: Union[StdSim, TextIO], stderr: Union[StdSim, TextIO]) -> None:
"""
ProcReader initializer
:param proc: the Popen process being read from
:param stdout: the stream to write captured stdout
:param stderr: the stream to write captured stderr
"""
self._proc = proc
self._stdout = stdout
self._stderr = stderr
self._out_thread = threading.Thread(name='out_thread', target=self._reader_thread_func, kwargs={'read_stdout': True})
self._err_thread = threading.Thread(name='err_thread', target=self._reader_thread_func, kwargs={'read_stdout': False})
# Start the reader threads for pipes only
if self._proc.stdout is not None:
self._out_thread.start()
if self._proc.stderr is not None:
self._err_thread.start()
def send_sigint(self) -> None:
"""Send a SIGINT to the process similar to if <Ctrl>+C were pressed"""
import signal
if sys.platform.startswith('win'):
# cmd2 started the Windows process in a new process group. Therefore we must send
# a CTRL_BREAK_EVENT since CTRL_C_EVENT signals cannot be generated for process groups.
self._proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
# Since cmd2 uses shell=True in its Popen calls, we need to send the SIGINT to
# the whole process group to make sure it propagates further than the shell
try:
group_id = os.getpgid(self._proc.pid)
os.killpg(group_id, signal.SIGINT)
except ProcessLookupError:
return
def terminate(self) -> None:
"""Terminate the process"""
self._proc.terminate()
def wait(self) -> None:
"""Wait for the process to finish"""
if self._out_thread.is_alive():
self._out_thread.join()
if self._err_thread.is_alive():
self._err_thread.join()
# Handle case where the process ended before the last read could be done.
# This will return None for the streams that weren't pipes.
out, err = self._proc.communicate()
if out:
self._write_bytes(self._stdout, out)
if err:
self._write_bytes(self._stderr, err)
def _reader_thread_func(self, read_stdout: bool) -> None:
"""
Thread function that reads a stream from the process
:param read_stdout: if True, then this thread deals with stdout. Otherwise it deals with stderr.
"""
if read_stdout:
read_stream = self._proc.stdout
write_stream = self._stdout
else:
read_stream = self._proc.stderr
write_stream = self._stderr
# The thread should have been started only if this stream was a pipe
assert read_stream is not None
# Run until process completes
while self._proc.poll() is None:
# noinspection PyUnresolvedReferences
available = read_stream.peek() # type: ignore[attr-defined]
if available:
read_stream.read(len(available))
self._write_bytes(write_stream, available)
@staticmethod
def _write_bytes(stream: Union[StdSim, TextIO], to_write: bytes) -> None:
"""
Write bytes to a stream
:param stream: the stream being written to
:param to_write: the bytes being written
"""
try:
stream.buffer.write(to_write)
except BrokenPipeError:
# This occurs if output is being piped to a process that closed
pass
class ContextFlag:
"""A context manager which is also used as a boolean flag value within the default sigint handler.
Its main use is as a flag to prevent the SIGINT handler in cmd2 from raising a KeyboardInterrupt
while a critical code section has set the flag to True. Because signal handling is always done on the
main thread, this class is not thread-safe since there is no need.
"""
def __init__(self) -> None:
# When this flag has a positive value, it is considered set.
# When it is 0, it is not set. It should never go below 0.
self.__count = 0
def __bool__(self) -> bool:
return self.__count > 0
def __enter__(self) -> None:
self.__count += 1
def __exit__(self, *args: Any) -> None:
self.__count -= 1
if self.__count < 0:
raise ValueError("count has gone below 0")
class RedirectionSavedState:
"""Created by each command to store information required to restore state after redirection"""
def __init__(
self,
self_stdout: Union[StdSim, TextIO],
sys_stdout: Union[StdSim, TextIO],
pipe_proc_reader: Optional[ProcReader],
saved_redirecting: bool,
) -> None:
"""
RedirectionSavedState initializer
:param self_stdout: saved value of Cmd.stdout
:param sys_stdout: saved value of sys.stdout
:param pipe_proc_reader: saved value of Cmd._cur_pipe_proc_reader
:param saved_redirecting: saved value of Cmd._redirecting
"""
# Tells if command is redirecting
self.redirecting = False
# Used to restore values after redirection ends
self.saved_self_stdout = self_stdout
self.saved_sys_stdout = sys_stdout
# Used to restore values after command ends regardless of whether the command redirected
self.saved_pipe_proc_reader = pipe_proc_reader
self.saved_redirecting = saved_redirecting
def _remove_overridden_styles(styles_to_parse: List[str]) -> List[str]:
"""
Utility function for align_text() / truncate_line() which filters a style list down
to only those which would still be in effect if all were processed in order.
This is mainly used to reduce how many style strings are stored in memory when
building large multiline strings with ANSI styles. We only need to carry over
styles from previous lines that are still in effect.
:param styles_to_parse: list of styles to evaluate.
:return: list of styles that are still in effect.
"""
from . import (
ansi,
)
class StyleState:
"""Keeps track of what text styles are enabled"""
def __init__(self) -> None:
# Contains styles still in effect, keyed by their index in styles_to_parse
self.style_dict: Dict[int, str] = dict()
# Indexes into style_dict
self.reset_all: Optional[int] = None
self.fg: Optional[int] = None
self.bg: Optional[int] = None
self.intensity: Optional[int] = None
self.italic: Optional[int] = None
self.overline: Optional[int] = None
self.strikethrough: Optional[int] = None
self.underline: Optional[int] = None
# Read the previous styles in order and keep track of their states
style_state = StyleState()
for index, style in enumerate(styles_to_parse):
# For styles types that we recognize, only keep their latest value from styles_to_parse.
# All unrecognized style types will be retained and their order preserved.
if style in (str(ansi.TextStyle.RESET_ALL), str(ansi.TextStyle.ALT_RESET_ALL)):
style_state = StyleState()
style_state.reset_all = index
elif ansi.STD_FG_RE.match(style) or ansi.EIGHT_BIT_FG_RE.match(style) or ansi.RGB_FG_RE.match(style):
if style_state.fg is not None:
style_state.style_dict.pop(style_state.fg)
style_state.fg = index
elif ansi.STD_BG_RE.match(style) or ansi.EIGHT_BIT_BG_RE.match(style) or ansi.RGB_BG_RE.match(style):
if style_state.bg is not None:
style_state.style_dict.pop(style_state.bg)
style_state.bg = index
elif style in (
str(ansi.TextStyle.INTENSITY_BOLD),
str(ansi.TextStyle.INTENSITY_DIM),
str(ansi.TextStyle.INTENSITY_NORMAL),
):
if style_state.intensity is not None:
style_state.style_dict.pop(style_state.intensity)
style_state.intensity = index
elif style in (str(ansi.TextStyle.ITALIC_ENABLE), str(ansi.TextStyle.ITALIC_DISABLE)):
if style_state.italic is not None:
style_state.style_dict.pop(style_state.italic)
style_state.italic = index
elif style in (str(ansi.TextStyle.OVERLINE_ENABLE), str(ansi.TextStyle.OVERLINE_DISABLE)):
if style_state.overline is not None:
style_state.style_dict.pop(style_state.overline)
style_state.overline = index
elif style in (str(ansi.TextStyle.STRIKETHROUGH_ENABLE), str(ansi.TextStyle.STRIKETHROUGH_DISABLE)):
if style_state.strikethrough is not None:
style_state.style_dict.pop(style_state.strikethrough)
style_state.strikethrough = index
elif style in (str(ansi.TextStyle.UNDERLINE_ENABLE), str(ansi.TextStyle.UNDERLINE_DISABLE)):
if style_state.underline is not None:
style_state.style_dict.pop(style_state.underline)
style_state.underline = index
# Store this style and its location in the dictionary
style_state.style_dict[index] = style
return list(style_state.style_dict.values())
class TextAlignment(Enum):
"""Horizontal text alignment"""
LEFT = 1
CENTER = 2
RIGHT = 3
def align_text(
text: str,
alignment: TextAlignment,
*,
fill_char: str = ' ',
width: Optional[int] = None,
tab_width: int = 4,
truncate: bool = False,
) -> str:
"""
Align text for display within a given width. Supports characters with display widths greater than 1.
ANSI style sequences do not count toward the display width. If text has line breaks, then each line is aligned
independently.
There are convenience wrappers around this function: align_left(), align_center(), and align_right()
:param text: text to align (can contain multiple lines)
:param alignment: how to align the text
:param fill_char: character that fills the alignment gap. Defaults to space. (Cannot be a line breaking character)
:param width: display width of the aligned text. Defaults to width of the terminal.
:param tab_width: any tabs in the text will be replaced with this many spaces. if fill_char is a tab, then it will
be converted to one space.
:param truncate: if True, then each line will be shortened to fit within the display width. The truncated
portions are replaced by a '…' character. Defaults to False.
:return: aligned text
:raises: TypeError if fill_char is more than one character (not including ANSI style sequences)
:raises: ValueError if text or fill_char contains an unprintable character
:raises: ValueError if width is less than 1
"""
import io
import shutil
from . import (
ansi,
)
if width is None:
width = shutil.get_terminal_size().columns
if width < 1:
raise ValueError("width must be at least 1")
# Convert tabs to spaces
text = text.replace('\t', ' ' * tab_width)
fill_char = fill_char.replace('\t', ' ')
# Save fill_char with no styles for use later
stripped_fill_char = ansi.strip_style(fill_char)
if len(stripped_fill_char) != 1:
raise TypeError("Fill character must be exactly one character long")
fill_char_width = ansi.style_aware_wcswidth(fill_char)
if fill_char_width == -1:
raise (ValueError("Fill character is an unprintable character"))
# Isolate the style chars before and after the fill character. We will use them when building sequences of
# fill characters. Instead of repeating the style characters for each fill character, we'll wrap each sequence.
fill_char_style_begin, fill_char_style_end = fill_char.split(stripped_fill_char)
if text:
lines = text.splitlines()
else:
lines = ['']
text_buf = io.StringIO()
# ANSI style sequences that may affect subsequent lines will be cancelled by the fill_char's style.
# To avoid this, we save styles which are still in effect so we can restore them when beginning the next line.
# This also allows lines to be used independently and still have their style. TableCreator does this.
previous_styles: List[str] = []
for index, line in enumerate(lines):
if index > 0:
text_buf.write('\n')
if truncate:
line = truncate_line(line, width)
line_width = ansi.style_aware_wcswidth(line)
if line_width == -1:
raise (ValueError("Text to align contains an unprintable character"))
# Get list of styles in this line
line_styles = list(get_styles_dict(line).values())
# Calculate how wide each side of filling needs to be
if line_width >= width:
# Don't return here even though the line needs no fill chars.
# There may be styles sequences to restore.
total_fill_width = 0
else:
total_fill_width = width - line_width
if alignment == TextAlignment.LEFT:
left_fill_width = 0
right_fill_width = total_fill_width
elif alignment == TextAlignment.CENTER:
left_fill_width = total_fill_width // 2
right_fill_width = total_fill_width - left_fill_width
else:
left_fill_width = total_fill_width
right_fill_width = 0
# Determine how many fill characters are needed to cover the width
left_fill = (left_fill_width // fill_char_width) * stripped_fill_char
right_fill = (right_fill_width // fill_char_width) * stripped_fill_char
# In cases where the fill character display width didn't divide evenly into
# the gap being filled, pad the remainder with space.
left_fill += ' ' * (left_fill_width - ansi.style_aware_wcswidth(left_fill))
right_fill += ' ' * (right_fill_width - ansi.style_aware_wcswidth(right_fill))
# Don't allow styles in fill characters and text to affect one another
if fill_char_style_begin or fill_char_style_end or previous_styles or line_styles:
if left_fill:
left_fill = ansi.TextStyle.RESET_ALL + fill_char_style_begin + left_fill + fill_char_style_end
left_fill += ansi.TextStyle.RESET_ALL
if right_fill:
right_fill = ansi.TextStyle.RESET_ALL + fill_char_style_begin + right_fill + fill_char_style_end
right_fill += ansi.TextStyle.RESET_ALL
# Write the line and restore styles from previous lines which are still in effect
text_buf.write(left_fill + ''.join(previous_styles) + line + right_fill)
# Update list of styles that are still in effect for the next line
previous_styles.extend(line_styles)
previous_styles = _remove_overridden_styles(previous_styles)
return text_buf.getvalue()
def align_left(
text: str, *, fill_char: str = ' ', width: Optional[int] = None, tab_width: int = 4, truncate: bool = False
) -> str:
"""
Left align text for display within a given width. Supports characters with display widths greater than 1.
ANSI style sequences do not count toward the display width. If text has line breaks, then each line is aligned
independently.
:param text: text to left align (can contain multiple lines)
:param fill_char: character that fills the alignment gap. Defaults to space. (Cannot be a line breaking character)
:param width: display width of the aligned text. Defaults to width of the terminal.
:param tab_width: any tabs in the text will be replaced with this many spaces. if fill_char is a tab, then it will
be converted to one space.
:param truncate: if True, then text will be shortened to fit within the display width. The truncated portion is
replaced by a '…' character. Defaults to False.
:return: left-aligned text
:raises: TypeError if fill_char is more than one character (not including ANSI style sequences)
:raises: ValueError if text or fill_char contains an unprintable character
:raises: ValueError if width is less than 1
"""
return align_text(text, TextAlignment.LEFT, fill_char=fill_char, width=width, tab_width=tab_width, truncate=truncate)
def align_center(
text: str, *, fill_char: str = ' ', width: Optional[int] = None, tab_width: int = 4, truncate: bool = False
) -> str:
"""
Center text for display within a given width. Supports characters with display widths greater than 1.
ANSI style sequences do not count toward the display width. If text has line breaks, then each line is aligned
independently.
:param text: text to center (can contain multiple lines)
:param fill_char: character that fills the alignment gap. Defaults to space. (Cannot be a line breaking character)
:param width: display width of the aligned text. Defaults to width of the terminal.
:param tab_width: any tabs in the text will be replaced with this many spaces. if fill_char is a tab, then it will
be converted to one space.
:param truncate: if True, then text will be shortened to fit within the display width. The truncated portion is
replaced by a '…' character. Defaults to False.
:return: centered text
:raises: TypeError if fill_char is more than one character (not including ANSI style sequences)
:raises: ValueError if text or fill_char contains an unprintable character
:raises: ValueError if width is less than 1
"""
return align_text(text, TextAlignment.CENTER, fill_char=fill_char, width=width, tab_width=tab_width, truncate=truncate)
def align_right(
text: str, *, fill_char: str = ' ', width: Optional[int] = None, tab_width: int = 4, truncate: bool = False
) -> str:
"""
Right align text for display within a given width. Supports characters with display widths greater than 1.
ANSI style sequences do not count toward the display width. If text has line breaks, then each line is aligned
independently.
:param text: text to right align (can contain multiple lines)
:param fill_char: character that fills the alignment gap. Defaults to space. (Cannot be a line breaking character)
:param width: display width of the aligned text. Defaults to width of the terminal.
:param tab_width: any tabs in the text will be replaced with this many spaces. if fill_char is a tab, then it will
be converted to one space.
:param truncate: if True, then text will be shortened to fit within the display width. The truncated portion is
replaced by a '…' character. Defaults to False.
:return: right-aligned text
:raises: TypeError if fill_char is more than one character (not including ANSI style sequences)
:raises: ValueError if text or fill_char contains an unprintable character
:raises: ValueError if width is less than 1
"""
return align_text(text, TextAlignment.RIGHT, fill_char=fill_char, width=width, tab_width=tab_width, truncate=truncate)
def truncate_line(line: str, max_width: int, *, tab_width: int = 4) -> str:
"""
Truncate a single line to fit within a given display width. Any portion of the string that is truncated
is replaced by a '…' character. Supports characters with display widths greater than 1. ANSI style sequences
do not count toward the display width.
If there are ANSI style sequences in the string after where truncation occurs, this function will append them
to the returned string.
This is done to prevent issues caused in cases like: truncate_line(Fg.BLUE + hello + Fg.RESET, 3)
In this case, "hello" would be truncated before Fg.RESET resets the color from blue. Appending the remaining style
sequences makes sure the style is in the same state had the entire string been printed. align_text() relies on this
behavior when preserving style over multiple lines.
:param line: text to truncate
:param max_width: the maximum display width the resulting string is allowed to have
:param tab_width: any tabs in the text will be replaced with this many spaces
:return: line that has a display width less than or equal to width
:raises: ValueError if text contains an unprintable character like a newline
:raises: ValueError if max_width is less than 1
"""
import io
from . import (
ansi,
)
# Handle tabs
line = line.replace('\t', ' ' * tab_width)
if ansi.style_aware_wcswidth(line) == -1:
raise (ValueError("text contains an unprintable character"))
if max_width < 1:
raise ValueError("max_width must be at least 1")
if ansi.style_aware_wcswidth(line) <= max_width:
return line
# Find all style sequences in the line
styles_dict = get_styles_dict(line)
# Add characters one by one and preserve all style sequences
done = False
index = 0
total_width = 0
truncated_buf = io.StringIO()
while not done:
# Check if a style sequence is at this index. These don't count toward display width.
if index in styles_dict:
truncated_buf.write(styles_dict[index])
style_len = len(styles_dict[index])
styles_dict.pop(index)
index += style_len
continue
char = line[index]
char_width = ansi.style_aware_wcswidth(char)
# This char will make the text too wide, add the ellipsis instead
if char_width + total_width >= max_width:
char = constants.HORIZONTAL_ELLIPSIS
char_width = ansi.style_aware_wcswidth(char)
done = True
total_width += char_width
truncated_buf.write(char)
index += 1
# Filter out overridden styles from the remaining ones
remaining_styles = _remove_overridden_styles(list(styles_dict.values()))
# Append the remaining styles to the truncated text
truncated_buf.write(''.join(remaining_styles))
return truncated_buf.getvalue()
def get_styles_dict(text: str) -> Dict[int, str]:
"""
Return an OrderedDict containing all ANSI style sequences found in a string
The structure of the dictionary is:
key: index where sequences begins
value: ANSI style sequence found at index in text
Keys are in ascending order
:param text: text to search for style sequences
"""
from . import (
ansi,
)
start = 0
styles = collections.OrderedDict()
while True:
match = ansi.ANSI_STYLE_RE.search(text, start)
if match is None:
break
styles[match.start()] = match.group()
start += len(match.group())
return styles
def categorize(func: Union[Callable[..., Any], Iterable[Callable[..., Any]]], category: str) -> None:
"""Categorize a function.
The help command output will group the passed function under the
specified category heading
:param func: function or list of functions to categorize
:param category: category to put it in
:Example:
>>> import cmd2
>>> class MyApp(cmd2.Cmd):
>>> def do_echo(self, arglist):
>>> self.poutput(' '.join(arglist)
>>>
>>> cmd2.utils.categorize(do_echo, "Text Processing")
For an alternative approach to categorizing commands using a decorator, see
:func:`~cmd2.decorators.with_category`
"""
if isinstance(func, Iterable):
for item in func:
setattr(item, constants.CMD_ATTR_HELP_CATEGORY, category)
else:
if inspect.ismethod(func):
setattr(func.__func__, constants.CMD_ATTR_HELP_CATEGORY, category) # type: ignore[attr-defined]
else:
setattr(func, constants.CMD_ATTR_HELP_CATEGORY, category)
def get_defining_class(meth: Callable[..., Any]) -> Optional[Type[Any]]:
"""
Attempts to resolve the class that defined a method.
Inspired by implementation published here:
https://stackoverflow.com/a/25959545/1956611
:param meth: method to inspect
:return: class type in which the supplied method was defined. None if it couldn't be resolved.
"""
if isinstance(meth, functools.partial):
return get_defining_class(meth.func)
if inspect.ismethod(meth) or (
inspect.isbuiltin(meth)
and getattr(meth, '__self__') is not None
and getattr(meth.__self__, '__class__') # type: ignore[attr-defined]
):
for cls in inspect.getmro(meth.__self__.__class__): # type: ignore[attr-defined]
if meth.__name__ in cls.__dict__:
return cls
meth = getattr(meth, '__func__', meth) # fallback to __qualname__ parsing
if inspect.isfunction(meth):
cls = getattr(inspect.getmodule(meth), meth.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if isinstance(cls, type):
return cls
return cast(type, getattr(meth, '__objclass__', None)) # handle special descriptor objects
class CompletionMode(Enum):
"""Enum for what type of tab completion to perform in cmd2.Cmd.read_input()"""
# Tab completion will be disabled during read_input() call
# Use of custom up-arrow history supported
NONE = 1
# read_input() will tab complete cmd2 commands and their arguments
# cmd2's command line history will be used for up arrow if history is not provided.
# Otherwise use of custom up-arrow history supported.
COMMANDS = 2
# read_input() will tab complete based on one of its following parameters:
# choices, choices_provider, completer, parser
# Use of custom up-arrow history supported
CUSTOM = 3
class CustomCompletionSettings:
"""Used by cmd2.Cmd.complete() to tab complete strings other than command arguments"""
def __init__(self, parser: argparse.ArgumentParser, *, preserve_quotes: bool = False) -> None:
"""
Initializer
:param parser: arg parser defining format of string being tab completed
:param preserve_quotes: if True, then quoted tokens will keep their quotes when processed by
ArgparseCompleter. This is helpful in cases when you're tab completing
flag-like tokens (e.g. -o, --option) and you don't want them to be
treated as argparse flags when quoted. Set this to True if you plan
on passing the string to argparse with the tokens still quoted.
"""
self.parser = parser
self.preserve_quotes = preserve_quotes
def strip_doc_annotations(doc: str) -> str:
"""
Strip annotations from a docstring leaving only the text description
:param doc: documentation string
"""
# Attempt to locate the first documentation block
cmd_desc = ''
found_first = False
for doc_line in doc.splitlines():
stripped_line = doc_line.strip()
# Don't include :param type lines
if stripped_line.startswith(':'):
if found_first:
break
elif stripped_line:
if found_first:
cmd_desc += "\n"
cmd_desc += stripped_line
found_first = True
elif found_first:
break
return cmd_desc
|
paramikoe.py | #
# Paramiko Expect
#
# Written by Fotis Gimian
# http://github.com/fgimian
#
# This library works with a Paramiko SSH channel to provide native SSH
# expect-like handling for servers. The library may be used to interact
# with commands like 'configure' or Cisco IOS devices or with interactive
# Unix scripts or commands.
#
# You must have Paramiko installed in order to use this library.
#
import sys
import re
import socket
# Windows does not have termios
try:
import termios
import tty
has_termios = True
except ImportError:
import threading
has_termios = False
import select
class SSHClientInteraction:
"""This class allows an expect-like interface to Paramiko which allows
coders to interact with applications and the shell of the connected
device.
"""
def __init__(self, client, timeout=60, newline='\r', buffer_size=1024,
display=False):
"""The constructor for our SSHClientInteraction class.
Arguments:
client -- A Paramiko SSHClient object
Keyword arguments:
timeout -- THe connection timeout in seconds
newline -- The newline character to send after each command
buffer_size -- The amount of data (in bytes) that will be read at a
time after a command is run
display -- Whether or not the output should be displayed in real-time
as it is being performed (especially useful when debugging)
"""
self.channel = client.invoke_shell()
self.newline = newline
self.buffer_size = buffer_size
self.display = display
self.timeout = timeout
self.current_output = ''
self.current_output_clean = ''
self.current_send_string = ''
self.last_match = ''
def __del__(self):
"""The destructor for our SSHClientInteraction class."""
self.close()
def close(self):
"""Attempts to close the channel for clean completion."""
try:
self.channel.close()
except:
pass
def expect(self, re_strings='', timeout=None):
"""This function takes in a regular expression (or regular expressions)
that represent the last line of output from the server. The function
waits for one or more of the terms to be matched. The regexes are
matched using expression \n<regex>$ so you'll need to provide an
easygoing regex such as '.*server.*' if you wish to have a fuzzy match.
Keyword arguments:
re_strings -- Either a regex string or list of regex strings that
we should expect. If this is not specified, then
EOF is expected (i.e. the shell is completely closed
after the exit command is issued)
timeout -- (long) timeout in seconds, if this timeout pass, should raise exception
Returns:
- EOF: Returns -1
- Regex String: When matched, returns 0
- List of Regex Strings: Returns the index of the matched string as
an integer
Raises:
exception on timeout
"""
# Set the channel timeout
if timeout is None: timeout = self.timeout
self.channel.settimeout(timeout)
# Create an empty output buffer
self.current_output = ''
# This function needs all regular expressions to be in the form of a
# list, so if the user provided a string, let's convert it to a 1
# item list.
if len(re_strings) != 0 and isinstance(re_strings, str):
re_strings = [re_strings]
# Loop until one of the expressions is matched or loop forever if
# nothing is expected (usually used for exit)
while (
len(re_strings) == 0 or
not [re_string
for re_string in re_strings
if re.match('.*\n' + re_string + '$',
self.current_output, re.DOTALL)]
):
# Read some of the output
buffer = self.channel.recv(self.buffer_size)
# If we have an empty buffer, then the SSH session has been closed
if len(buffer) == 0:
break
# Strip all ugly \r (Ctrl-M making) characters from the current
# read
buffer = buffer.replace('\r', '')
# Display the current buffer in realtime if requested to do so
# (good for debugging purposes)
if self.display:
sys.stdout.write(buffer)
sys.stdout.flush()
# Add the currently read buffer to the output
self.current_output += buffer
# Grab the first pattern that was matched
if len(re_strings) != 0:
found_pattern = [(re_index, re_string)
for re_index, re_string in enumerate(re_strings)
if re.match('.*\n' + re_string + '$',
self.current_output, re.DOTALL)]
self.current_output_clean = self.current_output
# Clean the output up by removing the sent command
if len(self.current_send_string) != 0:
self.current_output_clean = (
self.current_output_clean.replace(
self.current_send_string + '\n', ''))
# Reset the current send string to ensure that multiple expect calls
# don't result in bad output cleaning
self.current_send_string = ''
# Clean the output up by removing the expect output from the end if
# requested and save the details of the matched pattern
if len(re_strings) != 0:
self.current_output_clean = (
re.sub(found_pattern[0][1] + '$', '',
self.current_output_clean))
self.last_match = found_pattern[0][1]
return found_pattern[0][0]
else:
# We would socket timeout before getting here, but for good
# measure, let's send back a -1
return -1
def send(self, send_string):
"""Saves and sends the send string provided"""
self.current_send_string = send_string
self.channel.send(send_string + self.newline)
def tail(self, line_prefix=None):
"""This function takes control of an SSH channel and displays line
by line of output as \n is recieved. This function is specifically
made for tail-like commands.
Keyword arguments:
line_prefix -- Text to append to the left of each line of output.
This is especially useful if you are using my
MultiSSH class to run tail commands over multiple
servers.
"""
# Set the channel timeout to the maximum integer the server allows,
# setting this to None breaks the KeyboardInterrupt exception and
# won't allow us to Ctrl+C out of teh script
self.channel.settimeout(sys.maxint)
# Create an empty line buffer and a line counter
current_line = ''
line_counter = 0
# Loop forever, Ctrl+C (KeyboardInterrupt) is used to break the tail
while True:
# Read the output one byte at a time so we can detect \n correctly
buffer = self.channel.recv(1)
# If we have an empty buffer, then the SSH session has been closed
if len(buffer) == 0:
break
# Strip all ugly \r (Ctrl-M making) characters from the current
# read
buffer = buffer.replace('\r', '')
# Add the currently read buffer to the current line output
current_line += buffer
# Display the last read line in realtime when we reach a \n
# character
if current_line.endswith('\n'):
if line_counter and line_prefix:
sys.stdout.write(line_prefix)
if line_counter:
sys.stdout.write(current_line)
sys.stdout.flush()
line_counter += 1
current_line = ''
def take_control(self):
"""This function is a better documented and touched up version of the
posix_shell function found in the interactive.py demo script that
ships with Paramiko"""
if has_termios:
# Get attributes of the shell you were in before going to the
# new one
original_tty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
# We must set the timeout to 0 so that we can bypass times when
# there is no available text to receive
self.channel.settimeout(0)
# Loop forever until the user exits (i.e. read buffer is empty)
while True:
select_read, select_write, select_exception = (
select.select([self.channel, sys.stdin], [], []))
# Read any output from the terminal and print it to the
# screen. With timeout set to 0, we just can ignore times
# when there's nothing to receive.
if self.channel in select_read:
try:
buffer = self.channel.recv(self.buffer_size)
if len(buffer) == 0:
break
sys.stdout.write(buffer)
sys.stdout.flush()
except socket.timeout:
pass
# Send any keyboard input to the terminal one byte at a
# time
if sys.stdin in select_read:
buffer = sys.stdin.read(1)
if len(buffer) == 0:
break
self.channel.send(buffer)
finally:
# Restore the attributes of the shell you were in
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, original_tty)
else:
def writeall(sock):
while True:
buffer = sock.recv(self.buffer_size)
if len(buffer) == 0:
break
sys.stdout.write(buffer)
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(self.channel,))
writer.start()
try:
while True:
buffer = sys.stdin.read(1)
if len(buffer) == 0:
break
self.channel.send(buffer)
# User has hit Ctrl+Z or F6
except EOFError:
pass
|
x10.py | # X10 is so slow...
# I ran a web server to let me control X10 lights through a CM11A gateway.
# there's nearly a second of latency per command! on top of that, it just
# wasn't very reliable (some commands would go, some wouldn't).
# I'm not using this code anymore, but left it here if it's helpful to you
import logging, requests, threading, queue
logger = logging.getLogger(__name__)
import os
if 'X10_HTTP_SERVER' in os.environ:
X10_HTTP_SERVER = os.environ['X10_HTTP_SERVER']
else:
logger.warning("X10_HTTP_SERVER wasn't specified in the environment... disabling!")
X10_HTTP_SERVER = None
if X10_HTTP_SERVER is not None:
x10threadqueue = queue.Queue()
def x10thread():
# We run all the X10 web requests on a separate thread because they are
# so slow we don't want to block the music playing
logger.debug("x10 thread started")
while True:
code,cmd = x10threadqueue.get()
r = requests.get('http://10.0.0.21:8080/'+code+'/'+cmd)
logger.debug("x10 response = "+r.text)
x10threadqueue.task_done()
thread = threading.Thread(target=x10thread)
thread.daemon = True
thread.start()
X10_DELAY_MS = 850
# X10 (especially X10 via http request) is extremely inconsistent
# on timeliness... but this is about how much latency.
def raw_x10(code,cmd):
x10threadqueue.put((code,cmd))
def x10_macro(time,code,cmd):
""" this macro is to deal with the delay """
return [(time-X10_DELAY_MS,"raw_x10 %s %s"%(code,cmd))]
def register(commands,macros,commits):
macros['x10'] = x10_macro
commands['raw_x10'] = raw_x10
else:
def register(commands,macros,commits):
pass
|
conftest.py | import logging
from itertools import chain
import os
from random import randrange
import tempfile
import textwrap
from time import sleep
import yaml
import pytest
from botocore.exceptions import ClientError
import threading
from datetime import datetime
import random
from math import floor
from ocs_ci.utility.utils import (
TimeoutSampler, get_rook_repo, get_ocp_version, ceph_health_check
)
from ocs_ci.ocs.exceptions import TimeoutExpiredError, CephHealthException
from ocs_ci.utility.spreadsheet.spreadsheet_api import GoogleSpreadSheetAPI
from ocs_ci.utility import aws
from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import (
deployment, ignore_leftovers, tier_marks
)
from ocs_ci.ocs.version import get_ocs_version, report_ocs_version
from ocs_ci.utility.environment_check import (
get_status_before_execution, get_status_after_execution
)
from ocs_ci.utility.utils import (
get_openshift_client, ocsci_log_path, get_testrun_name,
ceph_health_check_base, skipif_ocs_version
)
from ocs_ci.deployment import factory as dep_factory
from tests import helpers
from tests.manage.mcg.helpers import get_rgw_restart_count
from ocs_ci.ocs import constants, ocp, defaults, node, platform_nodes
from ocs_ci.ocs.resources.mcg import MCG
from ocs_ci.ocs.resources.mcg_bucket import S3Bucket, OCBucket, CLIBucket
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.resources.pod import get_rgw_pod
from ocs_ci.ocs.resources.pvc import PVC
from ocs_ci.ocs.ocp import OCP
from ocs_ci.utility import deployment_openshift_logging as ocp_logging_obj
from ocs_ci.utility.uninstall_openshift_logging import uninstall_cluster_logging
from ocs_ci.utility import templating
log = logging.getLogger(__name__)
class OCSLogFormatter(logging.Formatter):
def __init__(self):
fmt = (
"%(asctime)s - %(levelname)s - %(name)s.%(funcName)s.%(lineno)d "
"- %(message)s"
)
super(OCSLogFormatter, self).__init__(fmt)
def pytest_logger_config(logger_config):
logger_config.add_loggers([''], stdout_level='info')
logger_config.set_log_option_default('')
logger_config.split_by_outcome()
logger_config.set_formatter_class(OCSLogFormatter)
def pytest_collection_modifyitems(session, config, items):
"""
A pytest hook to filter out skipped tests satisfying
skipif_ocs_version
Args:
session: pytest session
config: pytest config object
items: list of collected tests
"""
for item in items[:]:
skip_marker = item.get_closest_marker("skipif_ocs_version")
if skip_marker:
skip_condition = skip_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocs_version(skip_condition[0]):
log.info(
f'Test: {item} will be skipped due to {skip_condition}'
)
items.remove(item)
@pytest.fixture()
def supported_configuration():
"""
Check that cluster nodes have enough CPU and Memory as described in:
https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.2/html-single/planning_your_deployment/index#infrastructure-requirements_rhocs
This fixture is intended as a prerequisite for tests or fixtures that
run flaky on configurations that don't meet minimal requirements.
Minimum requirements for each starting node (OSD+MON):
16 CPUs
64 GB memory
Last documentation check: 2020-02-21
"""
min_cpu = 16
min_memory = 64 * 10**9
node_obj = ocp.OCP(kind=constants.NODE)
log.info('Checking if system meets minimal requirements')
nodes = node_obj.get(selector=constants.WORKER_LABEL).get('items')
log.info(
f"Checking following nodes with worker selector (assuming that "
f"this is ran in CI and there are no worker nodes without OCS):\n"
f"{[item.get('metadata').get('name') for item in nodes]}"
)
for node_info in nodes:
real_cpu = int(node_info['status']['capacity']['cpu'])
real_memory = node_info['status']['capacity']['memory']
if real_memory.endswith('Ki'):
real_memory = int(real_memory[0:-2]) * 2**10
elif real_memory.endswith('Mi'):
real_memory = int(real_memory[0:-2]) * 2**20
elif real_memory.endswith('Gi'):
real_memory = int(real_memory[0:-2]) * 2**30
elif real_memory.endswith('Ti'):
real_memory = int(real_memory[0:-2]) * 2**40
else:
real_memory = int(real_memory)
if (real_cpu < min_cpu or real_memory < min_memory):
error_msg = (
f"Node {node_info.get('metadata').get('name')} doesn't have "
f"minimum of required reasources for running the test:\n"
f"{min_cpu} CPU and {min_memory} Memory\nIt has:\n{real_cpu} "
f"CPU and {real_memory} Memory"
)
log.error(error_msg)
pytest.xfail(error_msg)
@pytest.fixture(scope='class')
def secret_factory_class(request):
return secret_factory_fixture(request)
@pytest.fixture(scope='session')
def secret_factory_session(request):
return secret_factory_fixture(request)
@pytest.fixture(scope='function')
def secret_factory(request):
return secret_factory_fixture(request)
def secret_factory_fixture(request):
"""
Secret factory. Calling this fixture creates a new secret.
RBD based is default.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(interface=constants.CEPHBLOCKPOOL):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
"""
secret_obj = helpers.create_secret(
interface_type=interface
)
assert secret_obj, "Failed to create a secret"
instances.append(secret_obj)
return secret_obj
def finalizer():
"""
Delete the RBD secrets
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(
instance.name
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def log_ocs_version(cluster):
"""
Fixture handling version reporting for OCS.
This fixture handles alignment of the version reporting, so that we:
* report version for each test run (no matter if just deployment, just
test or both deployment and tests are executed)
* prevent conflict of version reporting with deployment/teardown (eg. we
should not run the version logging before actual deployment, or after
a teardown)
Version is reported in:
* log entries of INFO log level during test setup phase
* ocs_version file in cluster path directory (for copy pasting into bug
reports)
"""
teardown = config.RUN['cli_params'].get('teardown')
deploy = config.RUN['cli_params'].get('deploy')
if teardown and not deploy:
log.info("Skipping version reporting for teardown.")
return
cluster_version, image_dict = get_ocs_version()
file_name = os.path.join(
config.ENV_DATA['cluster_path'],
"ocs_version." + datetime.now().isoformat())
with open(file_name, "w") as file_obj:
report_ocs_version(cluster_version, image_dict, file_obj)
log.info("human readable ocs version info written into %s", file_name)
@pytest.fixture(scope='class')
def ceph_pool_factory_class(request):
return ceph_pool_factory_fixture(request)
@pytest.fixture(scope='session')
def ceph_pool_factory_session(request):
return ceph_pool_factory_fixture(request)
@pytest.fixture(scope='function')
def ceph_pool_factory(request):
return ceph_pool_factory_fixture(request)
def ceph_pool_factory_fixture(request):
"""
Create a Ceph pool factory.
Calling this fixture creates new Ceph pool instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(interface=constants.CEPHBLOCKPOOL):
if interface == constants.CEPHBLOCKPOOL:
ceph_pool_obj = helpers.create_ceph_block_pool()
elif interface == constants.CEPHFILESYSTEM:
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
).get(defaults.CEPHFILESYSTEM_NAME)
ceph_pool_obj = OCS(**cfs)
assert ceph_pool_obj, f"Failed to create {interface} pool"
if interface != constants.CEPHFILESYSTEM:
instances.append(ceph_pool_obj)
return ceph_pool_obj
def finalizer():
"""
Delete the Ceph block pool
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(
instance.name
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope='class')
def storageclass_factory_class(
request,
ceph_pool_factory_class,
secret_factory_class
):
return storageclass_factory_fixture(
request,
ceph_pool_factory_class,
secret_factory_class
)
@pytest.fixture(scope='session')
def storageclass_factory_session(
request,
ceph_pool_factory_session,
secret_factory_session
):
return storageclass_factory_fixture(
request,
ceph_pool_factory_session,
secret_factory_session
)
@pytest.fixture(scope='function')
def storageclass_factory(
request,
ceph_pool_factory,
secret_factory
):
return storageclass_factory_fixture(
request,
ceph_pool_factory,
secret_factory
)
def storageclass_factory_fixture(
request,
ceph_pool_factory,
secret_factory,
):
"""
Create a storage class factory. Default is RBD based.
Calling this fixture creates new storage class instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
secret=None,
custom_data=None,
sc_name=None,
reclaim_policy=constants.RECLAIM_POLICY_DELETE
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
secret (object): An OCS instance for the secret.
custom_data (dict): If provided then storageclass object is created
by using these data. Parameters `block_pool` and `secret`
are not useds but references are set if provided.
sc_name (str): Name of the storage class
Returns:
object: helpers.create_storage_class instance with links to
block_pool and secret.
"""
if custom_data:
sc_obj = helpers.create_resource(**custom_data)
else:
secret = secret or secret_factory(interface=interface)
if interface == constants.CEPHBLOCKPOOL:
interface_name = constants.DEFAULT_BLOCKPOOL
elif interface == constants.CEPHFILESYSTEM:
interface_name = helpers.get_cephfs_data_pool_name()
sc_obj = helpers.create_storage_class(
interface_type=interface,
interface_name=interface_name,
secret_name=secret.name,
sc_name=sc_name,
reclaim_policy=reclaim_policy
)
assert sc_obj, f"Failed to create {interface} storage class"
sc_obj.secret = secret
instances.append(sc_obj)
return sc_obj
def finalizer():
"""
Delete the storageclass
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(
instance.name
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope='class')
def project_factory_class(request):
return project_factory_fixture(request)
@pytest.fixture(scope='session')
def project_factory_session(request):
return project_factory_fixture(request)
@pytest.fixture()
def project_factory(request):
return project_factory_fixture(request)
@pytest.fixture()
def project(project_factory):
"""
This fixture creates a single project instance.
"""
project_obj = project_factory()
return project_obj
def project_factory_fixture(request):
"""
Create a new project factory.
Calling this fixture creates new project.
"""
instances = []
def factory():
"""
Returns:
object: ocs_ci.ocs.resources.ocs instance of 'Project' kind.
"""
proj_obj = helpers.create_project()
instances.append(proj_obj)
return proj_obj
def finalizer():
"""
Delete the project
"""
for instance in instances:
ocp.switch_to_default_rook_cluster_project()
instance.delete(resource_name=instance.namespace)
instance.wait_for_delete(instance.namespace, timeout=300)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope='class')
def pvc_factory_class(
request,
project_factory_class
):
return pvc_factory_fixture(
request,
project_factory_class
)
@pytest.fixture(scope='session')
def pvc_factory_session(
request,
project_factory_session
):
return pvc_factory_fixture(
request,
project_factory_session
)
@pytest.fixture(scope='function')
def pvc_factory(
request,
project_factory
):
return pvc_factory_fixture(
request,
project_factory,
)
def pvc_factory_fixture(
request,
project_factory
):
"""
Create a persistent Volume Claim factory. Calling this fixture creates new
PVC. For custom PVC provide 'storageclass' parameter.
"""
instances = []
active_project = None
active_rbd_storageclass = None
active_cephfs_storageclass = None
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_mode=constants.ACCESS_MODE_RWO,
custom_data=None,
status=constants.STATUS_BOUND,
volume_mode=None
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_mode (str): ReadWriteOnce, ReadOnlyMany or ReadWriteMany.
This decides the access mode to be used for the PVC.
ReadWriteOnce is default.
custom_data (dict): If provided then PVC object is created
by using these data. Parameters `project` and `storageclass`
are not used but reference is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
volume_mode (str): Volume mode for PVC.
eg: volume_mode='Block' to create rbd `block` type volume
Returns:
object: helpers.create_pvc instance.
"""
if custom_data:
pvc_obj = PVC(**custom_data)
pvc_obj.create(do_reload=False)
else:
nonlocal active_project
nonlocal active_rbd_storageclass
nonlocal active_cephfs_storageclass
project = project or active_project or project_factory()
active_project = project
if interface == constants.CEPHBLOCKPOOL:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_rbd_storageclass = storageclass
elif interface == constants.CEPHFILESYSTEM:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_cephfs_storageclass = storageclass
pvc_size = f"{size}Gi" if size else None
pvc_obj = helpers.create_pvc(
sc_name=storageclass.name,
namespace=project.namespace,
size=pvc_size,
do_reload=False,
access_mode=access_mode,
volume_mode=volume_mode
)
assert pvc_obj, "Failed to create PVC"
if status:
helpers.wait_for_resource_state(pvc_obj, status)
pvc_obj.storageclass = storageclass
pvc_obj.project = project
pvc_obj.access_mode = access_mode
instances.append(pvc_obj)
return pvc_obj
def finalizer():
"""
Delete the PVC
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(
instance.name
)
# Wait for PVs to delete
# If they have ReclaimPolicy set to Retain then delete them manually
for pv_obj in pv_objs:
if pv_obj.data.get('spec').get(
'persistentVolumeReclaimPolicy'
) == constants.RECLAIM_POLICY_RETAIN:
helpers.wait_for_resource_state(
pv_obj,
constants.STATUS_RELEASED
)
pv_obj.delete()
pv_obj.ocp.wait_for_delete(pv_obj.name)
else:
pv_obj.ocp.wait_for_delete(
resource_name=pv_obj.name, timeout=180
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope='class')
def pod_factory_class(request, pvc_factory_class):
return pod_factory_fixture(request, pvc_factory_class)
@pytest.fixture(scope='session')
def pod_factory_session(request, pvc_factory_session):
return pod_factory_fixture(request, pvc_factory_session)
@pytest.fixture(scope='function')
def pod_factory(request, pvc_factory):
return pod_factory_fixture(request, pvc_factory)
def pod_factory_fixture(request, pvc_factory):
"""
Create a Pod factory. Calling this fixture creates new Pod.
For custom Pods provide 'pvc' parameter.
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
custom_data=None,
status=constants.STATUS_RUNNING,
pod_dict_path=None,
raw_block_pv=False
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
pod_dict_path (str): YAML path for the pod.
raw_block_pv (bool): True for creating raw block pv based pod,
False otherwise.
Returns:
object: helpers.create_pvc instance.
"""
if custom_data:
pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface)
pod_obj = helpers.create_pod(
pvc_name=pvc.name,
namespace=pvc.namespace,
interface_type=interface,
pod_dict_path=pod_dict_path,
raw_block_pv=raw_block_pv
)
assert pod_obj, "Failed to create PVC"
instances.append(pod_obj)
if status:
helpers.wait_for_resource_state(pod_obj, status)
pod_obj.reload()
pod_obj.pvc = pvc
return pod_obj
def finalizer():
"""
Delete the Pod
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(
instance.name
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope='class')
def teardown_factory_class(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope='session')
def teardown_factory_session(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope='function')
def teardown_factory(request):
return teardown_factory_fixture(request)
def teardown_factory_fixture(request):
"""
Tearing down a resource that was created during the test
To use this factory, you'll need to pass 'teardown_factory' to your test
function and call it in your test when a new resource was created and you
want it to be removed in teardown phase:
def test_example(self, teardown_factory):
pvc_obj = create_pvc()
teardown_factory(pvc_obj)
"""
instances = []
def factory(resource_obj):
"""
Args:
resource_obj (OCS object or list of OCS objects) : Object to teardown after the test
"""
if isinstance(resource_obj, list):
instances.extend(resource_obj)
else:
instances.append(resource_obj)
def finalizer():
"""
Delete the resources created in the test
"""
for instance in instances[::-1]:
if not instance.is_deleted:
reclaim_policy = instance.reclaim_policy if instance.kind == constants.PVC else None
instance.delete()
instance.ocp.wait_for_delete(
instance.name
)
if reclaim_policy == constants.RECLAIM_POLICY_DELETE:
helpers.validate_pv_delete(instance.backed_pv)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def service_account_factory(request):
"""
Create a service account
"""
instances = []
active_service_account_obj = None
def factory(
project=None, service_account=None
):
"""
Args:
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
service_account (str): service_account_name
Returns:
object: serviceaccount instance.
"""
nonlocal active_service_account_obj
if active_service_account_obj and not service_account:
return active_service_account_obj
elif service_account:
sa_obj = helpers.get_serviceaccount_obj(sa_name=service_account, namespace=project.namespace)
if not helpers.validate_scc_policy(sa_name=service_account, namespace=project.namespace):
helpers.add_scc_policy(sa_name=service_account, namespace=project.namespace)
sa_obj.project = project
active_service_account_obj = sa_obj
instances.append(sa_obj)
return sa_obj
else:
sa_obj = helpers.create_serviceaccount(
namespace=project.namespace,
)
sa_obj.project = project
active_service_account_obj = sa_obj
helpers.add_scc_policy(sa_name=sa_obj.name, namespace=project.namespace)
assert sa_obj, "Failed to create serviceaccount"
instances.append(sa_obj)
return sa_obj
def finalizer():
"""
Delete the service account
"""
for instance in instances:
helpers.remove_scc_policy(
sa_name=instance.name,
namespace=instance.namespace
)
instance.delete()
instance.ocp.wait_for_delete(resource_name=instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def dc_pod_factory(
request,
pvc_factory,
service_account_factory
):
"""
Create deploymentconfig pods
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
service_account=None,
size=None,
custom_data=None,
node_name=None,
node_selector=None,
replica_count=1,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
service_account (str): service account name for dc_pods
size (int): The requested size for the PVC
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
node_name (str): The name of specific node to schedule the pod
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
replica_count (int): Replica count for deployment config
"""
if custom_data:
dc_pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface, size=size)
sa_obj = service_account_factory(project=pvc.project, service_account=service_account)
dc_pod_obj = helpers.create_pod(
interface_type=interface, pvc_name=pvc.name, do_reload=False,
namespace=pvc.namespace, sa_name=sa_obj.name, dc_deployment=True,
replica_count=replica_count, node_name=node_name,
node_selector=node_selector
)
instances.append(dc_pod_obj)
log.info(dc_pod_obj.name)
helpers.wait_for_resource_state(
dc_pod_obj, constants.STATUS_RUNNING, timeout=180
)
dc_pod_obj.pvc = pvc
return dc_pod_obj
def finalizer():
"""
Delete dc pods
"""
for instance in instances:
helpers.delete_deploymentconfig_pods(instance)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def polarion_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures polarion testsuite properties for junit xml
"""
polarion_project_id = config.REPORTING['polarion']['project_id']
record_testsuite_property('polarion-project-id', polarion_project_id)
jenkins_build_url = config.RUN.get('jenkins_build_url')
if jenkins_build_url:
record_testsuite_property(
'polarion-custom-description', jenkins_build_url
)
polarion_testrun_name = get_testrun_name()
record_testsuite_property(
'polarion-testrun-id', polarion_testrun_name
)
record_testsuite_property(
'polarion-testrun-status-id', 'inprogress'
)
record_testsuite_property(
'polarion-custom-isautomated', "True"
)
@pytest.fixture(scope='session')
def tier_marks_name():
"""
Gets the tier mark names
Returns:
list: list of tier mark names
"""
tier_marks_name = []
for each_tier in tier_marks:
try:
tier_marks_name.append(each_tier.name)
except AttributeError:
tier_marks_name.append(each_tier().args[0].name)
return tier_marks_name
@pytest.fixture(scope='function', autouse=True)
def health_checker(request, tier_marks_name):
def finalizer():
try:
teardown = config.RUN['cli_params']['teardown']
skip_ocs_deployment = config.ENV_DATA['skip_ocs_deployment']
if not (teardown or skip_ocs_deployment):
ceph_health_check_base()
log.info("Ceph health check passed at teardown")
except CephHealthException:
log.info("Ceph health check failed at teardown")
# Retrying to increase the chance the cluster health will be OK
# for next test
ceph_health_check()
raise
node = request.node
request.addfinalizer(finalizer)
for mark in node.iter_markers():
if mark.name in tier_marks_name:
log.info("Checking for Ceph Health OK ")
try:
status = ceph_health_check_base()
if status:
log.info("Ceph health check passed at setup")
return
except CephHealthException:
# skip because ceph is not in good health
pytest.skip("Ceph health check failed at setup")
@pytest.fixture(scope="session", autouse=True)
def cluster(request, log_cli_level):
"""
This fixture initiates deployment for both OCP and OCS clusters.
Specific platform deployment classes will handle the fine details
of action
"""
log.info(f"All logs located at {ocsci_log_path()}")
teardown = config.RUN['cli_params']['teardown']
deploy = config.RUN['cli_params']['deploy']
factory = dep_factory.DeploymentFactory()
deployer = factory.get_deployment()
# Add a finalizer to teardown the cluster after test execution is finished
if teardown:
def cluster_teardown_finalizer():
deployer.destroy_cluster(log_cli_level)
request.addfinalizer(cluster_teardown_finalizer)
log.info("Will teardown cluster because --teardown was provided")
# Download client
force_download = (
config.RUN['cli_params'].get('deploy')
and config.DEPLOYMENT['force_download_client']
)
get_openshift_client(force_download=force_download)
if deploy:
# Deploy cluster
deployer.deploy_cluster(log_cli_level)
@pytest.fixture(scope='class')
def environment_checker(request):
node = request.node
# List of marks for which we will ignore the leftover checker
marks_to_ignore = [m.mark for m in [deployment, ignore_leftovers]]
for mark in node.iter_markers():
if mark in marks_to_ignore:
return
request.addfinalizer(get_status_after_execution)
get_status_before_execution()
@pytest.fixture(scope="session")
def log_cli_level(pytestconfig):
"""
Retrieves the log_cli_level set in pytest.ini
Returns:
str: log_cli_level set in pytest.ini or DEBUG if not set
"""
return pytestconfig.getini('log_cli_level') or 'DEBUG'
@pytest.fixture(scope="session")
def run_io_in_background(request):
"""
Run IO during the test execution
"""
if config.RUN['cli_params'].get('io_in_bg'):
log.info(f"Tests will be running while IO is in the background")
g_sheet = None
if config.RUN['google_api_secret']:
g_sheet = GoogleSpreadSheetAPI("IO BG results", 0)
else:
log.warning(
"Google API secret was not found. IO won't be reported to "
"a Google spreadsheet"
)
results = list()
temp_file = tempfile.NamedTemporaryFile(
mode='w+', prefix='test_status', delete=False
)
def get_test_status():
with open(temp_file.name, 'r') as t_file:
return t_file.readline()
def set_test_status(status):
with open(temp_file.name, 'w') as t_file:
t_file.writelines(status)
set_test_status('running')
def finalizer():
"""
Delete the resources created during setup, used for
running IO in the test background
"""
set_test_status('finished')
try:
for status in TimeoutSampler(90, 3, get_test_status):
if status == 'terminated':
break
except TimeoutExpiredError:
log.warning(
"Background IO was still in progress before IO "
"thread termination"
)
if thread:
thread.join()
log.info(f"Background IO has stopped")
for result in results:
log.info(f"IOPs after FIO for pod {pod_obj.name}:")
log.info(f"Read: {result[0]}")
log.info(f"Write: {result[1]}")
if pod_obj:
pod_obj.delete()
pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
if pvc_obj:
pvc_obj.delete()
pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
if sc_obj:
sc_obj.delete()
if cbp_obj:
cbp_obj.delete()
if secret_obj:
secret_obj.delete()
request.addfinalizer(finalizer)
secret_obj = helpers.create_secret(
interface_type=constants.CEPHBLOCKPOOL
)
cbp_obj = helpers.create_ceph_block_pool()
sc_obj = helpers.create_storage_class(
interface_type=constants.CEPHBLOCKPOOL,
interface_name=cbp_obj.name,
secret_name=secret_obj.name
)
pvc_obj = helpers.create_pvc(sc_name=sc_obj.name, size='2Gi')
helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
pvc_obj.reload()
pod_obj = helpers.create_pod(
interface_type=constants.CEPHBLOCKPOOL, pvc_name=pvc_obj.name
)
helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
pod_obj.reload()
def run_io_in_bg():
"""
Run IO by executing FIO and deleting the file created for FIO on
the pod, in a while true loop. Will be running as long as
the test is running.
"""
while get_test_status() == 'running':
pod_obj.run_io('fs', '1G')
result = pod_obj.get_fio_results()
reads = result.get('jobs')[0].get('read').get('iops')
writes = result.get('jobs')[0].get('write').get('iops')
if g_sheet:
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
g_sheet.insert_row([now, reads, writes])
results.append((reads, writes))
file_path = os.path.join(
pod_obj.get_storage_path(storage_type='fs'),
pod_obj.io_params['filename']
)
pod_obj.exec_cmd_on_pod(f'rm -rf {file_path}')
set_test_status('terminated')
log.info(f"Start running IO in the test background")
thread = threading.Thread(target=run_io_in_bg)
thread.start()
@pytest.fixture(
params=[
pytest.param({'interface': constants.CEPHBLOCKPOOL}),
pytest.param({'interface': constants.CEPHFILESYSTEM})
],
ids=["RBD", "CephFS"]
)
def interface_iterate(request):
"""
Iterate over interfaces - CephBlockPool and CephFileSystem
"""
return request.param['interface']
@pytest.fixture(scope='class')
def multi_pvc_factory_class(
project_factory_class,
pvc_factory_class
):
return multi_pvc_factory_fixture(
project_factory_class,
pvc_factory_class
)
@pytest.fixture(scope='session')
def multi_pvc_factory_session(
project_factory_session,
pvc_factory_session
):
return multi_pvc_factory_fixture(
project_factory_session,
pvc_factory_session
)
@pytest.fixture(scope='function')
def multi_pvc_factory(project_factory, pvc_factory):
return multi_pvc_factory_fixture(
project_factory,
pvc_factory
)
def multi_pvc_factory_fixture(
project_factory,
pvc_factory
):
"""
Create a Persistent Volume Claims factory. Calling this fixture creates a
set of new PVCs. Options for PVC creation based on provided assess modes:
1. For each PVC, choose random value from the list of access modes
2. Create PVCs based on the specified distribution number of access modes.
Create sets of PVCs based on the order of access modes.
3. Create PVCs based on the specified distribution number of access modes.
The order of PVC creation is independent of access mode.
"""
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_modes=None,
access_modes_selection='distribute_sequential',
access_mode_dist_ratio=None,
status=constants.STATUS_BOUND,
num_of_pvc=1,
wait_each=False,
timeout=60
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_modes (list): List of access modes. One of the access modes
will be chosen for creating each PVC. If not specified,
ReadWriteOnce will be selected for all PVCs. To specify
volume mode, append volume mode in the access mode name
separated by '-'.
eg: ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany',
'ReadWriteMany-Block']
access_modes_selection (str): Decides how to select accessMode for
each PVC from the options given in 'access_modes' list.
Values are 'select_random', 'distribute_random'
'select_random' : While creating each PVC, one access mode will
be selected from the 'access_modes' list.
'distribute_random' : The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will not be based on the access modes. For example, 1st and
6th PVC might have same access mode.
'distribute_sequential' :The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will be as sets of PVCs of same assess mode. For example,
first set of 10 will be having same access mode followed by
next set of 13 with a different access mode.
access_mode_dist_ratio (list): Contains the number of PVCs to be
created for each access mode. If not specified, the given list
of access modes will be equally distributed among the PVCs.
eg: [10,12] for num_of_pvc=22 and
access_modes=['ReadWriteOnce', 'ReadWriteMany']
status (str): If provided then factory waits for object to reach
desired state.
num_of_pvc(int): Number of PVCs to be created
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
timeout(int): Time in seconds to wait
Returns:
list: objects of PVC class.
"""
pvc_list = []
if wait_each:
status_tmp = status
else:
status_tmp = ""
project = project or project_factory()
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
access_modes = access_modes or [constants.ACCESS_MODE_RWO]
access_modes_list = []
if access_modes_selection == 'select_random':
for _ in range(num_of_pvc):
mode = random.choice(access_modes)
access_modes_list.append(mode)
else:
if not access_mode_dist_ratio:
num_of_modes = len(access_modes)
dist_val = floor(num_of_pvc / num_of_modes)
access_mode_dist_ratio = [dist_val] * num_of_modes
access_mode_dist_ratio[-1] = (
dist_val + (num_of_pvc % num_of_modes)
)
zipped_share = list(zip(access_modes, access_mode_dist_ratio))
for mode, share in zipped_share:
access_modes_list.extend([mode] * share)
if access_modes_selection == 'distribute_random':
random.shuffle(access_modes_list)
for access_mode in access_modes_list:
if '-' in access_mode:
access_mode, volume_mode = access_mode.split('-')
else:
volume_mode = ''
pvc_obj = pvc_factory(
interface=interface,
project=project,
storageclass=storageclass,
size=size,
access_mode=access_mode,
status=status_tmp,
volume_mode=volume_mode
)
pvc_list.append(pvc_obj)
pvc_obj.project = project
if status and not wait_each:
for pvc_obj in pvc_list:
helpers.wait_for_resource_state(pvc_obj, status, timeout=timeout)
return pvc_list
return factory
@pytest.fixture(scope="session", autouse=True)
def rook_repo(request):
get_rook_repo(
config.RUN['rook_branch'], config.RUN.get('rook_to_checkout')
)
@pytest.fixture(scope="function")
def memory_leak_function(request):
"""
Function to start Memory leak thread which will be executed parallel with test run
Memory leak data will be captured in all worker nodes for ceph-osd process
Data will be appended in /tmp/(worker)-top-output.txt file for each worker
During teardown created tmp files will be deleted
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
def finalizer():
"""
Finalizer to stop memory leak data capture thread and cleanup the files
"""
set_flag_status('terminated')
try:
for status in TimeoutSampler(90, 3, get_flag_status):
if status == 'terminated':
break
except TimeoutExpiredError:
log.warning(
"Background test execution still in progress before"
"memory leak thread terminated"
)
if thread:
thread.join()
for worker in helpers.get_worker_nodes():
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
os.remove(f"/tmp/{worker}-top-output.txt")
log.info(f"Memory leak capture has stopped")
request.addfinalizer(finalizer)
temp_file = tempfile.NamedTemporaryFile(
mode='w+', prefix='test_status', delete=False
)
def get_flag_status():
with open(temp_file.name, 'r') as t_file:
return t_file.readline()
def set_flag_status(value):
with open(temp_file.name, 'w') as t_file:
t_file.writelines(value)
set_flag_status('running')
def run_memory_leak_in_bg():
"""
Function to run memory leak in background thread
Memory leak data is written in below format
date time PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
"""
oc = ocp.OCP(
namespace=config.ENV_DATA['cluster_namespace']
)
while get_flag_status() == 'running':
for worker in helpers.get_worker_nodes():
filename = f"/tmp/{worker}-top-output.txt"
top_cmd = f"debug nodes/{worker} -- chroot /host top -n 2 b"
with open("/tmp/file.txt", "w+") as temp:
temp.write(str(oc.exec_oc_cmd(
command=top_cmd, out_yaml_format=False
)))
temp.seek(0)
for line in temp:
if line.__contains__("ceph-osd"):
with open(filename, "a+") as f:
f.write(str(datetime.now()))
f.write(' ')
f.write(line)
log.info(f"Start memory leak data capture in the test background")
thread = threading.Thread(target=run_memory_leak_in_bg)
thread.start()
@pytest.fixture()
def aws_obj():
"""
Initialize AWS instance
Returns:
AWS: An instance of AWS class
"""
aws_obj = aws.AWS()
return aws_obj
@pytest.fixture()
def ec2_instances(request, aws_obj):
"""
Get cluster instances
Returns:
dict: The ID keys and the name values of the instances
"""
# Get all cluster nodes objects
nodes = node.get_node_objs()
# Get the cluster nodes ec2 instances
ec2_instances = aws.get_instances_ids_and_names(nodes)
assert ec2_instances, f"Failed to get ec2 instances for node {[n.name for n in nodes]}"
def finalizer():
"""
Make sure all instances are running
"""
# Getting the instances that are in status 'stopping' (if there are any), to wait for them to
# get to status 'stopped' so it will be possible to start them
stopping_instances = {
key: val for key, val in ec2_instances.items() if (
aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPING
)
}
# Waiting fot the instances that are in status 'stopping'
# (if there are any) to reach 'stopped'
if stopping_instances:
for stopping_instance in stopping_instances:
instance = aws_obj.get_ec2_instance(stopping_instance.key())
instance.wait_until_stopped()
stopped_instances = {
key: val for key, val in ec2_instances.items() if (
aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPED
)
}
# Start the instances
if stopped_instances:
aws_obj.start_ec2_instances(instances=stopped_instances, wait=True)
request.addfinalizer(finalizer)
return ec2_instances
@pytest.fixture()
def mcg_obj(request):
return mcg_obj_fixture(request)
@pytest.fixture(scope='session')
def mcg_obj_session(request):
return mcg_obj_fixture(request)
def mcg_obj_fixture(request):
"""
Returns an MCG resource that's connected to the S3 endpoint
Returns:
MCG: An MCG resource
"""
mcg_obj = MCG()
if config.ENV_DATA['platform'].lower() == 'aws':
def finalizer():
mcg_obj.cred_req_obj.delete()
request.addfinalizer(finalizer)
return mcg_obj
@pytest.fixture()
def created_pods(request):
return created_pods_fixture(request)
@pytest.fixture(scope='session')
def created_pods_session(request):
return created_pods_fixture(request)
def created_pods_fixture(request):
"""
Deletes all pods that were created as part of the test
Returns:
list: An empty list of pods
"""
created_pods_objects = []
def pod_cleanup():
for pod in created_pods_objects:
log.info(f'Deleting pod {pod.name}')
pod.delete()
request.addfinalizer(pod_cleanup)
return created_pods_objects
@pytest.fixture()
def awscli_pod(mcg_obj, created_pods):
return awscli_pod_fixture(mcg_obj, created_pods)
@pytest.fixture(scope='session')
def awscli_pod_session(mcg_obj_session, created_pods_session):
return awscli_pod_fixture(mcg_obj_session, created_pods_session)
def awscli_pod_fixture(mcg_obj, created_pods):
"""
Creates a new AWSCLI pod for relaying commands
Args:
created_pods (Fixture/list): A fixture used to keep track of created
pods and clean them up in the teardown
Returns:
pod: A pod running the AWS CLI
"""
awscli_pod_obj = helpers.create_pod(
namespace=mcg_obj.namespace,
pod_dict_path=constants.AWSCLI_POD_YAML
)
helpers.wait_for_resource_state(awscli_pod_obj, constants.STATUS_RUNNING)
created_pods.append(awscli_pod_obj)
return awscli_pod_obj
@pytest.fixture()
def nodes():
"""
Return an instance of the relevant platform nodes class
(e.g. AWSNodes, VMWareNodes) to be later used in the test
for nodes related operations, like nodes restart,
detach/attach volume, etc.
"""
factory = platform_nodes.PlatformNodesFactory()
nodes = factory.get_nodes_platform()
return nodes
@pytest.fixture()
def uploaded_objects(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
return uploaded_objects_fixture(
request,
mcg_obj,
awscli_pod,
verify_rgw_restart_count
)
@pytest.fixture(scope='session')
def uploaded_objects_session(
request,
mcg_obj_session,
awscli_pod_session,
verify_rgw_restart_count_session
):
return uploaded_objects_fixture(
request,
mcg_obj_session,
awscli_pod_session,
verify_rgw_restart_count_session
)
def uploaded_objects_fixture(
request,
mcg_obj,
awscli_pod,
verify_rgw_restart_count
):
"""
Deletes all objects that were created as part of the test
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
awscli_pod (Pod): A pod running the AWSCLI tools
Returns:
list: An empty list of objects
"""
uploaded_objects_paths = []
def object_cleanup():
for uploaded_filename in uploaded_objects_paths:
log.info(f'Deleting object {uploaded_filename}')
awscli_pod.exec_cmd_on_pod(
command=helpers.craft_s3_command(
mcg_obj, "rm " + uploaded_filename
),
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_endpoint
]
)
request.addfinalizer(object_cleanup)
return uploaded_objects_paths
@pytest.fixture()
def verify_rgw_restart_count(request):
return verify_rgw_restart_count_fixture(request)
@pytest.fixture(scope='session')
def verify_rgw_restart_count_session(request):
return verify_rgw_restart_count_fixture(request)
def verify_rgw_restart_count_fixture(request):
"""
Verifies the RGW restart count at start and end of a test
"""
if config.ENV_DATA['platform'].lower() == 'vsphere':
log.info("Getting RGW pod restart count before executing the test")
initial_count = get_rgw_restart_count()
def finalizer():
rgw_pod = get_rgw_pod()
rgw_pod.reload()
log.info("Verifying whether RGW pod changed after executing the test")
assert rgw_pod.restart_count == initial_count, 'RGW pod restarted'
request.addfinalizer(finalizer)
@pytest.fixture()
def bucket_factory(request, mcg_obj):
return bucket_factory_fixture(request, mcg_obj)
@pytest.fixture(scope='session')
def bucket_factory_session(request, mcg_obj_session):
return bucket_factory_fixture(request, mcg_obj_session)
def bucket_factory_fixture(request, mcg_obj):
"""
Create a bucket factory. Calling this fixture creates a new bucket(s).
For a custom amount, provide the 'amount' parameter.
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
"""
created_buckets = []
bucketMap = {
's3': S3Bucket,
'oc': OCBucket,
'cli': CLIBucket
}
def _create_buckets(amount=1, interface='S3', *args, **kwargs):
"""
Creates and deletes all buckets that were created as part of the test
Args:
amount (int): The amount of buckets to create
interface (str): The interface to use for creation of buckets.
S3 | OC | CLI
Returns:
list: A list of s3.Bucket objects, containing all the created
buckets
"""
if interface.lower() not in bucketMap:
raise RuntimeError(
f'Invalid interface type received: {interface}. '
f'available types: {", ".join(bucketMap.keys())}'
)
for i in range(amount):
bucket_name = helpers.create_unique_resource_name(
resource_description='bucket', resource_type=interface.lower()
)
created_buckets.append(
bucketMap[interface.lower()](
mcg_obj,
bucket_name,
*args,
**kwargs
)
)
return created_buckets
def bucket_cleanup():
all_existing_buckets = mcg_obj.s3_get_all_bucket_names()
for bucket in created_buckets:
if bucket.name in all_existing_buckets:
log.info(f'Cleaning up bucket {bucket.name}')
bucket.delete()
log.info(
f"Verifying whether bucket: {bucket.name} exists after"
f" deletion"
)
assert not mcg_obj.s3_verify_bucket_exists(bucket.name)
else:
log.info(f'Bucket {bucket.name} not found.')
request.addfinalizer(bucket_cleanup)
return _create_buckets
@pytest.fixture()
def multiregion_resources(request, mcg_obj):
return multiregion_resources_fixture(request, mcg_obj)
@pytest.fixture(scope='session')
def multiregion_resources_session(request, mcg_obj_session):
return multiregion_resources_fixture(request, mcg_obj_session)
def multiregion_resources_fixture(request, mcg_obj):
bs_objs, bs_secrets, bucketclasses, aws_buckets = (
[] for _ in range(4)
)
# Cleans up all resources that were created for the test
def resource_cleanup():
for resource in chain(bs_secrets, bucketclasses):
resource.delete()
for backingstore in bs_objs:
backingstore.delete()
mcg_obj.send_rpc_query(
'pool_api',
'delete_pool',
{'name': backingstore.name}
)
for aws_bucket_name in aws_buckets:
mcg_obj.toggle_aws_bucket_readwrite(aws_bucket_name, block=False)
for _ in range(10):
try:
mcg_obj.aws_s3_resource.Bucket(
aws_bucket_name
).objects.all().delete()
mcg_obj.aws_s3_resource.Bucket(aws_bucket_name).delete()
break
except ClientError:
log.info(
f'Deletion of bucket {aws_bucket_name} failed. Retrying...'
)
sleep(3)
request.addfinalizer(resource_cleanup)
return aws_buckets, bs_secrets, bs_objs, bucketclasses
@pytest.fixture()
def multiregion_mirror_setup(mcg_obj, multiregion_resources, bucket_factory):
return multiregion_mirror_setup_fixture(
mcg_obj,
multiregion_resources,
bucket_factory
)
@pytest.fixture(scope='session')
def multiregion_mirror_setup_session(
mcg_obj_session,
multiregion_resources_session,
bucket_factory_session
):
return multiregion_mirror_setup_fixture(
mcg_obj_session,
multiregion_resources_session,
bucket_factory_session
)
def multiregion_mirror_setup_fixture(
mcg_obj,
multiregion_resources,
bucket_factory
):
# Setup
# Todo:
# add region and amount parametrization - note that `us-east-1`
# will cause an error as it is the default region. If usage of `us-east-1`
# needs to be tested, keep the 'region' field out.
(
aws_buckets,
backingstore_secrets,
backingstore_objects,
bucketclasses
) = multiregion_resources
# Define backing stores
backingstore1 = {
'name': helpers.create_unique_resource_name(
resource_description='testbs',
resource_type='s3bucket'
),
'region': f'us-west-{randrange(1, 3)}'
}
backingstore2 = {
'name': helpers.create_unique_resource_name(
resource_description='testbs',
resource_type='s3bucket'
),
'region': f'us-east-2'
}
# Create target buckets for them
mcg_obj.create_new_backingstore_aws_bucket(backingstore1)
mcg_obj.create_new_backingstore_aws_bucket(backingstore2)
aws_buckets.extend((backingstore1['name'], backingstore2['name']))
# Create a backing store secret
backingstore_secret = mcg_obj.create_aws_backingstore_secret(
backingstore1['name'] + 'secret'
)
backingstore_secrets.append(backingstore_secret)
# Create AWS-backed backing stores on NooBaa
backingstore_obj_1 = mcg_obj.oc_create_aws_backingstore(
backingstore1['name'],
backingstore1['name'],
backingstore_secret.name,
backingstore1['region']
)
backingstore_obj_2 = mcg_obj.oc_create_aws_backingstore(
backingstore2['name'],
backingstore2['name'],
backingstore_secret.name,
backingstore2['region']
)
backingstore_objects.extend((backingstore_obj_1, backingstore_obj_2))
# Create a new mirror bucketclass that'll use all the backing stores we
# created
bucketclass = mcg_obj.oc_create_bucketclass(
helpers.create_unique_resource_name(
resource_description='testbc',
resource_type='bucketclass'
),
[backingstore.name for backingstore in backingstore_objects], 'Mirror'
)
bucketclasses.append(bucketclass)
# Create a NooBucket that'll use the bucket class in order to test
# the mirroring policy
bucket = bucket_factory(1, 'OC', bucketclass=bucketclass.name)[0]
return bucket, backingstore1, backingstore2
@pytest.fixture(scope='session')
def default_storageclasses(request, teardown_factory_session):
"""
Returns dictionary with storageclasses. Keys represent reclaim policy of
storageclass. There are two storageclasses for each key. First is RBD based
and the second one is CephFS based. Storageclasses with Retain Reclaim
Policy are created from default storageclasses.
"""
scs = {
constants.RECLAIM_POLICY_DELETE: [],
constants.RECLAIM_POLICY_RETAIN: []
}
# TODO(fbalak): Use proper constants after
# https://github.com/red-hat-storage/ocs-ci/issues/1056
# is resolved
for sc_name in (
'ocs-storagecluster-ceph-rbd',
'ocs-storagecluster-cephfs'
):
sc = OCS(
kind=constants.STORAGECLASS,
metadata={'name': sc_name}
)
sc.reload()
scs[constants.RECLAIM_POLICY_DELETE].append(sc)
sc.data['reclaimPolicy'] = constants.RECLAIM_POLICY_RETAIN
sc.data['metadata']['name'] += '-retain'
sc._name = sc.data['metadata']['name']
sc.create()
teardown_factory_session(sc)
scs[constants.RECLAIM_POLICY_RETAIN].append(sc)
return scs
@pytest.fixture(scope='class')
def install_logging(request):
"""
Setup and teardown
* The setup will deploy openshift-logging in the cluster
* The teardown will uninstall cluster-logging from the cluster
"""
def finalizer():
uninstall_cluster_logging()
request.addfinalizer(finalizer)
# Checks OCP version
ocp_version = get_ocp_version()
# Creates namespace opensift-operators-redhat
ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
# Creates an operator-group for elasticsearch
assert ocp_logging_obj.create_elasticsearch_operator_group(
yaml_file=constants.EO_OG_YAML,
resource_name='openshift-operators-redhat'
)
# Set RBAC policy on the project
assert ocp_logging_obj.set_rbac(
yaml_file=constants.EO_RBAC_YAML, resource_name='prometheus-k8s'
)
# Creates subscription for elastic-search operator
subscription_yaml = templating.load_yaml(constants.EO_SUB_YAML)
subscription_yaml['spec']['channel'] = ocp_version
helpers.create_resource(**subscription_yaml)
assert ocp_logging_obj.get_elasticsearch_subscription()
# Creates a namespace openshift-logging
ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
# Creates an operator-group for cluster-logging
assert ocp_logging_obj.create_clusterlogging_operator_group(
yaml_file=constants.CL_OG_YAML
)
# Creates subscription for cluster-logging
cl_subscription = templating.load_yaml(constants.CL_SUB_YAML)
cl_subscription['spec']['channel'] = ocp_version
helpers.create_resource(**cl_subscription)
assert ocp_logging_obj.get_clusterlogging_subscription()
# Creates instance in namespace openshift-logging
cluster_logging_operator = OCP(
kind=constants.POD, namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
)
log.info(f"The cluster-logging-operator {cluster_logging_operator.get()}")
ocp_logging_obj.create_instance()
@pytest.fixture
def fio_pvc_dict():
return fio_pvc_dict_fixture()
@pytest.fixture(scope='session')
def fio_pvc_dict_session():
return fio_pvc_dict_fixture()
def fio_pvc_dict_fixture():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
# TODO(fbalak): load dictionary fixtures from one place
template = textwrap.dedent("""
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: fio-target
spec:
storageClassName: None
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: None
""")
pvc_dict = yaml.safe_load(template)
return pvc_dict
@pytest.fixture
def fio_configmap_dict():
return fio_configmap_dict_fixture()
@pytest.fixture(scope='session')
def fio_configmap_dict_session():
return fio_configmap_dict_fixture()
def fio_configmap_dict_fixture():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
# TODO(fbalak): load dictionary fixtures from one place
template = textwrap.dedent("""
kind: ConfigMap
apiVersion: v1
metadata:
name: fio-config
data:
workload.fio: |
# here comes workload configuration
""")
cm_dict = yaml.safe_load(template)
return cm_dict
@pytest.fixture
def fio_job_dict():
return fio_job_dict_fixture()
@pytest.fixture(scope='session')
def fio_job_dict_session():
return fio_job_dict_fixture()
def fio_job_dict_fixture():
"""
Job template for fio workloads.
"""
# TODO(fbalak): load dictionary fixtures from one place
template = textwrap.dedent("""
apiVersion: batch/v1
kind: Job
metadata:
name: fio
spec:
backoffLimit: 1
template:
metadata:
name: fio
spec:
containers:
- name: fio
image: quay.io/johnstrunk/fs-performance:latest
command:
- "/usr/bin/fio"
- "--output-format=json"
- "/etc/fio/workload.fio"
volumeMounts:
- name: fio-target
mountPath: /mnt/target
- name: fio-config-volume
mountPath: /etc/fio
restartPolicy: Never
volumes:
- name: fio-target
persistentVolumeClaim:
claimName: fio-target
- name: fio-config-volume
configMap:
name: fio-config
""")
job_dict = yaml.safe_load(template)
return job_dict
|
test-preview-inference-overlay-iter-vam_ref.py | # Copyright (c) 2019, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of The Linux Foundation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import sys
import socket
import time
import threading
import subprocess
from iotccsdk.camera import CameraClient
def getWlanIp():
#if(os.name == "nt") :
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
if IP.split('.')[0] == '172':
print("Ip address detected is :: " + IP )
IP = '127.0.0.1'
print("Ip address changed to :: " + IP + "to avoid docker interface")
print("Ip address detected is :: " + IP )
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
def main(protocol=None):
#global camera_client
print("\nPython %s\n" % sys.version)
parser = argparse.ArgumentParser()
parser.add_argument('--ip', help='ip address of the camera', default='127.0.0.1')
parser.add_argument('--username', help='username of the camera', default='admin')
parser.add_argument('--password', help='password of the camera', default='admin')
parser.add_argument('--iteration', help='Number of iterations', type=int, default=1)
parser.add_argument('--runtime', help='runtime for each iteration', type=int, default=60)
args = parser.parse_args()
ip_addr = args.ip
username = args.username
password = args.password
iter = args.iteration
runtime = args.runtime
with CameraClient.connect(ip_address=ip_addr, username=username, password=password) as camera_client:
print(camera_client.configure_preview(resolution="1080P", display_out=1))
camera_client.set_preview_state("on")
print(camera_client.preview_url)
for x in range(0, iter):
print("-------------------- Iteration {} - Start ---------------------".format(x+1))
camera_client.set_analytics_state("on")
print(camera_client.vam_url)
print(" ANALYTICS STATE IS ON--")
camera_client.configure_overlay("inference")
print(" Overlay is configured--")
camera_client.set_overlay_state("on")
print(" OVERLAY STATE IS ON--")
try:
with camera_client.get_inferences() as results:
t = threading.Thread(target=print_inferences, args=(results,))
print(" BEFORE THREAD--")
t.start()
print(" AFTER START THREAD--sleep {}", format(runtime))
time.sleep(runtime)
except:
print("Stopping")
print(" STOPPING OVERLAY--")
camera_client.set_overlay_state("off")
print(" STOPPING ANALYTICS--")
camera_client.set_analytics_state("off")
subprocess.run(["cp", "vam_model_folder/va-snpe-engine-library_config.json", "vam_model_folder/va-snpe-engine-library_config_temp.json"])
subprocess.run(["cp", "vam_model_folder/va-snpe-engine-library_config_1.json", "vam_model_folder/va-snpe-engine-library_config.json"])
subprocess.run(["cp", "vam_model_folder/va-snpe-engine-library_config_temp.json", "vam_model_folder/va-snpe-engine-library_config_1.json"])
print(" OVERLAY AND ANALYTICS STATE IS OFF--")
t.join()
print("-------------------- Iteration {} - End --------------------".format(x+1))
camera_client.set_preview_state("off")
def print_inferences(results=None):
print("Starting prints")
for result in results:
if result is not None and result.objects is not None and len(result.objects):
timestamp = result.timestamp
if timestamp:
print("timestamp={}".format(timestamp))
else:
print("timestamp= " + "None")
for object in result.objects:
id = object.id
print("id={}".format(id))
label = object.label
print("label={}".format(label))
confidence = object.confidence
print("confidence={}".format(confidence))
x = object.position.x
y = object.position.y
w = object.position.width
h = object.position.height
print("Position(x,y,w,h)=({},{},{},{})".format(x, y, w, h))
print("")
else:
print("No results")
print("Stoping prints")
if __name__ == '__main__':
main()
|
inference_standard_yolov3.py | """
Inference script for the yolov3.yolov3_aleatoric class.
Produces detection files for each input image conforming to the ECP .json format.
The output of this script can be directly used by the ECP evaluation code.
"""
import json
import logging
import os
import threading
import time
import numpy as np
import tensorflow as tf
from lib_yolo import dataset_utils, yolov3
class Inference:
def __init__(self, yolo, config):
self.batch_size = config['batch_size']
dataset = dataset_utils.TestingDataset(config)
self.img_tensor, self.filename_tensor = dataset.iterator.get_next()
checkpoints = os.path.join(config['checkpoint_path'], config['run_id'])
if config['step'] == 'last':
self.checkpoint = tf.train.latest_checkpoint(checkpoints)
else:
self.checkpoint = None
for cp in os.listdir(checkpoints):
if cp.endswith('-{}.meta'.format(config['step'])):
self.checkpoint = os.path.join(checkpoints, os.path.splitext(cp)[0])
break
assert self.checkpoint is not None
step = self.checkpoint.split('-')[-1]
self.img_size = config['full_img_size']
assert not config['crop']
self.out_path = '{}_{}'.format(config['out_path'], step)
os.makedirs(self.out_path)
self.config = config
self.worker_thread = None
self.model = yolo.init_model(inputs=self.img_tensor, training=False).get_model()
bbox = concat_bbox([self.model.det_layers[0].bbox,
self.model.det_layers[1].bbox,
self.model.det_layers[2].bbox])
self.nms = nms(bbox, self.model)
def run(self):
with tf.Session(config=tf.ConfigProto(device_count={'GPU': 1})) as sess:
tf.train.Saver().restore(sess, self.checkpoint)
step = 0
while True:
try:
step += 1
processed = self.process_batch(sess)
logging.info('Processed {} images.'.format((step - 1) * self.batch_size + processed))
except tf.errors.OutOfRangeError:
break
if self.worker_thread:
self.worker_thread.join()
return self
def process_batch(self, sess):
boxes, files = sess.run([self.nms, self.filename_tensor])
if self.worker_thread:
self.worker_thread.join()
self.worker_thread = threading.Thread(target=self.write_to_disc, args=(boxes, files))
self.worker_thread.start()
return len(files)
def write_to_disc(self, all_boxes, files):
for batch, filename in enumerate(files):
filename = filename[0].decode('utf-8')
boxes = all_boxes[batch]
self.write_ecp_json(boxes, filename)
def write_ecp_json(self, boxes, img_name):
out_name = '{}.json'.format(os.path.splitext(os.path.basename(img_name))[0])
out_file = os.path.join(self.out_path, out_name)
with open(out_file, 'w') as f:
json.dump({
'children': [bbox_to_ecp_format(bbox, self.img_size, self.model, self.config) for bbox in boxes],
}, f, default=lambda x: x.tolist())
# -----------------------------------------------------------------#
# helpers #
# -----------------------------------------------------------------#
def nms(all_boxes, model):
def nms_op(boxes):
# nms ignoring classes
nms_indices = tf.image.non_max_suppression(boxes[:, :4], boxes[:, model.obj_idx], 1000)
all_boxes = tf.gather(boxes, nms_indices, axis=0)
all_boxes = tf.expand_dims(all_boxes, axis=0)
# # nms for each class individually, works only for data with 2 classes (e.g. ECP dataset)
# # this was used to produce the results for the paper
# nms_boxes = None
# for cls in ['ped', 'rider']:
# if cls == 'ped':
# tmp = tf.greater(b[:, model.cls_start_idx], b[:, model.cls_start_idx + 1])
# elif cls == 'rider':
# tmp = tf.greater(b[:, model.cls_start_idx + 1], b[:, model.cls_start_idx])
# else:
# raise ValueError('invalid class: {}'.format(cls))
#
# cls_indices = tf.cast(tf.reshape(tf.where(tmp), [-1]), tf.int32)
#
# cls_boxes = tf.gather(b, cls_indices)
# ind = tf.image.non_max_suppression(cls_boxes[:, :4], cls_boxes[:, model.obj_idx], 1000)
# cls_boxes = tf.gather(cls_boxes, ind, axis=0)
#
# if nms_boxes is None:
# nms_boxes = cls_boxes
# else:
# nms_boxes = tf.concat([nms_boxes, cls_boxes], axis=0)
#
# return nms_boxes
return all_boxes
body = lambda i, r: [i + 1, tf.concat([r, nms_op(all_boxes[i, ...])], axis=0)]
r0 = nms_op(all_boxes[0, ...]) # do while
i0 = tf.constant(1) # start with 1!!!
cond = lambda i, m: i < tf.shape(all_boxes)[0]
ilast, result = tf.while_loop(cond, body, loop_vars=[i0, r0],
shape_invariants=[i0.get_shape(), tf.TensorShape([None, None, all_boxes.shape[2]])])
return result
def bbox_to_ecp_format(bbox, img_size, model, config):
img_height, img_width = img_size[:2]
label_to_cls_name = { # edit if not ECP dataset
1: 'pedestrian', # starts at 0 if no implicit background class
2: 'rider',
}
cls_scores = bbox[model.cls_start_idx:model.cls_start_idx + model.cls_cnt]
cls = np.argmax(cls_scores)
cls_idx = cls
if config['implicit_background_class']:
cls += 1
return {
'y0': float(bbox[0] * img_height),
'x0': float(bbox[1] * img_width),
'y1': float(bbox[2] * img_height),
'x1': float(bbox[3] * img_width),
'score': float(bbox[model.obj_idx]) * float(bbox[model.cls_start_idx + cls_idx]),
'cls_scores': cls_scores,
'identity': label_to_cls_name.get(cls, cls),
}
def concat_bbox(net_out):
bbox = None
for det_layer in net_out:
for prior in det_layer:
batches, lw, lh, det_size = prior.shape.as_list()
tmp = tf.reshape(prior, shape=[-1, lw * lh, det_size])
if bbox is None:
bbox = tmp
else:
bbox = tf.concat([bbox, tmp], axis=1)
return bbox
# -----------------------------------------------------------------#
# main #
# -----------------------------------------------------------------#
def inference(config):
assert not config['crop']
logging.info(json.dumps(config, indent=4, default=lambda x: str(x)))
logging.info('----- START -----')
start = time.time()
yolo = yolov3.yolov3(config)
Inference(yolo, config).run()
end = time.time()
elapsed = int(end - start)
logging.info('----- FINISHED in {:02d}:{:02d}:{:02d} -----'.format(elapsed // 3600,
(elapsed // 60) % 60,
elapsed % 60))
def main():
config = {
'checkpoint_path': './checkpoints', # edit
'run_id': 'yolo', # edit
# 'step': 500000, # edit
'step': 'last',
'full_img_size': [1024, 1920, 3],
'cls_cnt': 2, # edit
'batch_size': 11, # edit
'cpu_thread_cnt': 24, # edit
'crop': False,
'training': False,
'aleatoric_loss': True,
'priors': yolov3.ECP_9_PRIORS, # edit
'implicit_background_class': True,
'data': {
'path': '$HOME/data/ecp/tfrecords', # edit
'file_pattern': 'ecp-day-val-*-of-*', # edit
}
}
config['data']['file_pattern'] = os.path.join(os.path.expandvars(config['data']['path']),
config['data']['file_pattern'])
config['out_path'] = os.path.join('./inference', config['run_id']) # edit
inference(config)
if __name__ == '__main__':
np.set_printoptions(suppress=True, formatter={'float_kind': '{:5.3}'.format})
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s, pid: %(process)d, %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
)
main()
|
queue_consumer.py | from threading import Thread
import pika
class ConsumerQueue(object):
def __init__(self, name, event, host, port):
super(ConsumerQueue, self).__init__()
self._name = name
self._event = event
self._received_values = []
self._connection = pika.BlockingConnection(
pika.ConnectionParameters(host, port))
self._channel = self._connection.channel()
self._channel.queue_declare(queue=self._name)
self._consumer_tag = self._channel.basic_consume(
self._queue_callback, queue=self._name, no_ack=True)
self._thread = Thread(target=lambda x: x._channel.start_consuming(), args=(self,))
def __del__(self):
if self._connection:
self._connection.close()
def _queue_callback(self, ch, method, properties, body):
self._received_values.append({'ch': ch, 'method': method, 'properties': properties, 'body': body})
self._event.set()
def get_last_value(self):
return self._received_values.pop()
def start(self):
self._thread.start()
def stop(self):
self._channel.stop_consuming(self._consumer_tag)
self._thread.join()
|
chunk.py | import logging
import threading
import warnings
import requests
logger = logging.getLogger(__name__)
class Chunk(object):
INIT = 0
DOWNLOADING = 1
PAUSED = 2
FINISHED = 3
STOPPED = 4
def __init__(self, downloader, url, file, start_byte=-1, end_byte=-1, number=-1,
high_speed=False, headers=None, params=None):
self.url = url
self.start_byte = int(start_byte)
self.end_byte = int(end_byte)
self.file = file
self.number = number
self.downloader = downloader
self.high_speed = high_speed
if headers is None:
headers = {}
self.headers = headers
if params is None:
params = {}
self.params = params
self.__state = Chunk.INIT
self.progress = 0
self.total_length = 0
if self.high_speed:
self.download_iter_size = 1024*512 # Half a megabyte
else:
self.download_iter_size = 1024 # a kilobyte
def start(self):
self.thread = threading.Thread(target=self.run)
self.thread.start()
def stop(self):
self.__state = Chunk.STOPPED
def pause(self):
if self.__state == Chunk.DOWNLOADING:
self.__state = Chunk.PAUSED
else:
warnings.warn("Cannot pause at this stage")
def resume(self):
if self.__state == Chunk.PAUSED:
logger.debug(self.__paused_request)
self.thread = threading.Thread(target=self.run, kwargs={'r': self.__paused_request})
self.thread.start()
logger.debug("chunk thread started")
def run(self, r=None):
self.__state = Chunk.DOWNLOADING
if r is None:
if self.start_byte == -1 and self.end_byte == -1:
r = requests.get(self.url, stream=True, headers=self.headers, params=self.params)
else:
self.headers['Range'] = "bytes=" + str(self.start_byte) + "-" + str(self.end_byte)
if 'range' in self.headers:
del self.headers['range']
r = requests.get(self.url, stream=True, headers=self.headers, params=self.params)
self.total_length = int(r.headers.get("content-length"))
break_flag = False
for part in r.iter_content(chunk_size=self.download_iter_size):
self.progress += len(part)
if part and self.__state != Chunk.STOPPED: # filter out keep-alive new chunks
self.file.write(part)
if self.__state == Chunk.PAUSED:
self.__paused_request = r
break_flag = True
break
elif self.__state == Chunk.STOPPED:
break_flag = True
break
if not break_flag:
self.__state = Chunk.FINISHED
def is_finished(self):
return self.__state == Chunk.FINISHED
|
test_tcp.py | import asyncio
import asyncio.sslproto
import gc
import os
import select
import socket
import unittest.mock
import ssl
import sys
import threading
import time
import weakref
from OpenSSL import SSL as openssl_ssl
from uvloop import _testbase as tb
SSL_HANDSHAKE_TIMEOUT = 15.0
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class _TestTCP:
def test_create_server_1(self):
CNT = 0 # number of clients that were successful
TOTAL_CNT = 25 # total number of clients that test will create
TIMEOUT = 5.0 # timeout for this test
A_DATA = b'A' * 1024 * 1024
B_DATA = b'B' * 1024 * 1024
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(len(A_DATA))
self.assertEqual(data, A_DATA)
writer.write(b'OK')
data = await reader.readexactly(len(B_DATA))
self.assertEqual(data, B_DATA)
writer.writelines([b'S', b'P'])
writer.write(bytearray(b'A'))
writer.write(memoryview(b'M'))
if self.implementation == 'uvloop':
tr = writer.transport
sock = tr.get_extra_info('socket')
self.assertTrue(
sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
await writer.drain()
writer.close()
CNT += 1
async def test_client(addr):
sock = socket.socket()
with sock:
sock.setblocking(False)
await self.loop.sock_connect(sock, addr)
await self.loop.sock_sendall(sock, A_DATA)
buf = b''
while len(buf) != 2:
buf += await self.loop.sock_recv(sock, 1)
self.assertEqual(buf, b'OK')
await self.loop.sock_sendall(sock, B_DATA)
buf = b''
while len(buf) != 4:
buf += await self.loop.sock_recv(sock, 1)
self.assertEqual(buf, b'SPAM')
self.assertEqual(sock.fileno(), -1)
self.assertEqual(sock._io_refs, 0)
self.assertTrue(sock._closed)
async def start_server():
nonlocal CNT
CNT = 0
srv = await asyncio.start_server(
handle_client,
('127.0.0.1', 'localhost'), 0,
family=socket.AF_INET)
srv_socks = srv.sockets
self.assertTrue(srv_socks)
self.assertTrue(srv.is_serving())
addr = srv_socks[0].getsockname()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(addr))
await asyncio.wait_for(asyncio.gather(*tasks), TIMEOUT)
self.loop.call_soon(srv.close)
await srv.wait_closed()
# Check that the server cleaned-up proxy-sockets
for srv_sock in srv_socks:
self.assertEqual(srv_sock.fileno(), -1)
self.assertFalse(srv.is_serving())
async def start_server_sock():
nonlocal CNT
CNT = 0
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
addr = sock.getsockname()
srv = await asyncio.start_server(
handle_client,
None, None,
family=socket.AF_INET,
sock=sock)
self.assertIs(srv.get_loop(), self.loop)
srv_socks = srv.sockets
self.assertTrue(srv_socks)
self.assertTrue(srv.is_serving())
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(addr))
await asyncio.wait_for(asyncio.gather(*tasks), TIMEOUT)
srv.close()
await srv.wait_closed()
# Check that the server cleaned-up proxy-sockets
for srv_sock in srv_socks:
self.assertEqual(srv_sock.fileno(), -1)
self.assertFalse(srv.is_serving())
self.loop.run_until_complete(start_server())
self.assertEqual(CNT, TOTAL_CNT)
self.loop.run_until_complete(start_server_sock())
self.assertEqual(CNT, TOTAL_CNT)
def test_create_server_2(self):
with self.assertRaisesRegex(ValueError, 'nor sock were specified'):
self.loop.run_until_complete(self.loop.create_server(object))
def test_create_server_3(self):
''' check ephemeral port can be used '''
async def start_server_ephemeral_ports():
for port_sentinel in [0, None]:
srv = await self.loop.create_server(
asyncio.Protocol,
'127.0.0.1', port_sentinel,
family=socket.AF_INET)
srv_socks = srv.sockets
self.assertTrue(srv_socks)
self.assertTrue(srv.is_serving())
host, port = srv_socks[0].getsockname()
self.assertNotEqual(0, port)
self.loop.call_soon(srv.close)
await srv.wait_closed()
# Check that the server cleaned-up proxy-sockets
for srv_sock in srv_socks:
self.assertEqual(srv_sock.fileno(), -1)
self.assertFalse(srv.is_serving())
self.loop.run_until_complete(start_server_ephemeral_ports())
def test_create_server_4(self):
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
with sock:
addr = sock.getsockname()
with self.assertRaisesRegex(OSError,
r"error while attempting.*\('127.*: "
r"address already in use"):
self.loop.run_until_complete(
self.loop.create_server(object, *addr))
def test_create_server_5(self):
# Test that create_server sets the TCP_IPV6ONLY flag,
# so it can bind to ipv4 and ipv6 addresses
# simultaneously.
port = tb.find_free_port()
async def runner():
srv = await self.loop.create_server(
asyncio.Protocol,
None, port)
srv.close()
await srv.wait_closed()
self.loop.run_until_complete(runner())
def test_create_server_6(self):
if not hasattr(socket, 'SO_REUSEPORT'):
raise unittest.SkipTest(
'The system does not support SO_REUSEPORT')
port = tb.find_free_port()
async def runner():
srv1 = await self.loop.create_server(
asyncio.Protocol,
None, port,
reuse_port=True)
srv2 = await self.loop.create_server(
asyncio.Protocol,
None, port,
reuse_port=True)
srv1.close()
srv2.close()
await srv1.wait_closed()
await srv2.wait_closed()
self.loop.run_until_complete(runner())
def test_create_server_7(self):
# Test that create_server() stores a hard ref to the server object
# somewhere in the loop. In asyncio it so happens that
# loop.sock_accept() has a reference to the server object so it
# never gets GCed.
class Proto(asyncio.Protocol):
def connection_made(self, tr):
self.tr = tr
self.tr.write(b'hello')
async def test():
port = tb.find_free_port()
srv = await self.loop.create_server(Proto, '127.0.0.1', port)
wsrv = weakref.ref(srv)
del srv
gc.collect()
gc.collect()
gc.collect()
s = socket.socket(socket.AF_INET)
with s:
s.setblocking(False)
await self.loop.sock_connect(s, ('127.0.0.1', port))
d = await self.loop.sock_recv(s, 100)
self.assertEqual(d, b'hello')
srv = wsrv()
srv.close()
await srv.wait_closed()
del srv
# Let all transports shutdown.
await asyncio.sleep(0.1)
gc.collect()
gc.collect()
gc.collect()
self.assertIsNone(wsrv())
self.loop.run_until_complete(test())
def test_create_server_8(self):
with self.assertRaisesRegex(
ValueError, 'ssl_handshake_timeout is only meaningful'):
self.loop.run_until_complete(
self.loop.create_server(
lambda: None, host='::', port=0, ssl_handshake_timeout=10))
def test_create_server_9(self):
async def handle_client(reader, writer):
pass
async def start_server():
srv = await asyncio.start_server(
handle_client,
'127.0.0.1', 0,
family=socket.AF_INET,
start_serving=False)
await srv.start_serving()
self.assertTrue(srv.is_serving())
# call start_serving again
await srv.start_serving()
self.assertTrue(srv.is_serving())
srv.close()
await srv.wait_closed()
self.assertFalse(srv.is_serving())
self.loop.run_until_complete(start_server())
def test_create_server_10(self):
async def handle_client(reader, writer):
pass
async def start_server():
srv = await asyncio.start_server(
handle_client,
'127.0.0.1', 0,
family=socket.AF_INET,
start_serving=False)
async with srv:
fut = asyncio.ensure_future(srv.serve_forever())
await asyncio.sleep(0)
self.assertTrue(srv.is_serving())
fut.cancel()
with self.assertRaises(asyncio.CancelledError):
await fut
self.assertFalse(srv.is_serving())
self.loop.run_until_complete(start_server())
def test_create_connection_open_con_addr(self):
async def client(addr):
reader, writer = await asyncio.open_connection(*addr)
writer.write(b'AAAA')
self.assertEqual(await reader.readexactly(2), b'OK')
re = r'(a bytes-like object)|(must be byte-ish)'
with self.assertRaisesRegex(TypeError, re):
writer.write('AAAA')
writer.write(b'BBBB')
self.assertEqual(await reader.readexactly(4), b'SPAM')
if self.implementation == 'uvloop':
tr = writer.transport
sock = tr.get_extra_info('socket')
self.assertTrue(
sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
writer.close()
await self.wait_closed(writer)
self._test_create_connection_1(client)
def test_create_connection_open_con_sock(self):
async def client(addr):
sock = socket.socket()
sock.connect(addr)
reader, writer = await asyncio.open_connection(sock=sock)
writer.write(b'AAAA')
self.assertEqual(await reader.readexactly(2), b'OK')
writer.write(b'BBBB')
self.assertEqual(await reader.readexactly(4), b'SPAM')
if self.implementation == 'uvloop':
tr = writer.transport
sock = tr.get_extra_info('socket')
self.assertTrue(
sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
writer.close()
await self.wait_closed(writer)
self._test_create_connection_1(client)
def _test_create_connection_1(self, client):
CNT = 0
TOTAL_CNT = 100
def server(sock):
data = sock.recv_all(4)
self.assertEqual(data, b'AAAA')
sock.send(b'OK')
data = sock.recv_all(4)
self.assertEqual(data, b'BBBB')
sock.send(b'SPAM')
async def client_wrapper(addr):
await client(addr)
nonlocal CNT
CNT += 1
def run(coro):
nonlocal CNT
CNT = 0
with self.tcp_server(server,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT) as srv:
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
self.loop.run_until_complete(asyncio.gather(*tasks))
self.assertEqual(CNT, TOTAL_CNT)
run(client_wrapper)
def test_create_connection_2(self):
sock = socket.socket()
with sock:
sock.bind(('127.0.0.1', 0))
addr = sock.getsockname()
async def client():
reader, writer = await asyncio.open_connection(*addr)
writer.close()
await self.wait_closed(writer)
async def runner():
with self.assertRaises(ConnectionRefusedError):
await client()
self.loop.run_until_complete(runner())
def test_create_connection_3(self):
CNT = 0
TOTAL_CNT = 100
def server(sock):
data = sock.recv_all(4)
self.assertEqual(data, b'AAAA')
sock.close()
async def client(addr):
reader, writer = await asyncio.open_connection(*addr)
writer.write(b'AAAA')
with self.assertRaises(asyncio.IncompleteReadError):
await reader.readexactly(10)
writer.close()
await self.wait_closed(writer)
nonlocal CNT
CNT += 1
def run(coro):
nonlocal CNT
CNT = 0
with self.tcp_server(server,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT) as srv:
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
self.loop.run_until_complete(asyncio.gather(*tasks))
self.assertEqual(CNT, TOTAL_CNT)
run(client)
def test_create_connection_4(self):
sock = socket.socket()
sock.close()
async def client():
reader, writer = await asyncio.open_connection(sock=sock)
writer.close()
await self.wait_closed(writer)
async def runner():
with self.assertRaisesRegex(OSError, 'Bad file'):
await client()
self.loop.run_until_complete(runner())
def test_create_connection_5(self):
def server(sock):
try:
data = sock.recv_all(4)
except ConnectionError:
return
self.assertEqual(data, b'AAAA')
sock.send(b'OK')
async def client(addr):
fut = asyncio.ensure_future(
self.loop.create_connection(asyncio.Protocol, *addr))
await asyncio.sleep(0)
fut.cancel()
with self.assertRaises(asyncio.CancelledError):
await fut
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
self.loop.run_until_complete(client(srv.addr))
def test_create_connection_6(self):
with self.assertRaisesRegex(
ValueError, 'ssl_handshake_timeout is only meaningful'):
self.loop.run_until_complete(
self.loop.create_connection(
lambda: None, host='::', port=0, ssl_handshake_timeout=10))
def test_transport_shutdown(self):
CNT = 0 # number of clients that were successful
TOTAL_CNT = 100 # total number of clients that test will create
TIMEOUT = 5.0 # timeout for this test
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(4)
self.assertEqual(data, b'AAAA')
writer.write(b'OK')
writer.write_eof()
writer.write_eof()
await writer.drain()
writer.close()
CNT += 1
async def test_client(addr):
reader, writer = await asyncio.open_connection(*addr)
writer.write(b'AAAA')
data = await reader.readexactly(2)
self.assertEqual(data, b'OK')
writer.close()
await self.wait_closed(writer)
async def start_server():
nonlocal CNT
CNT = 0
srv = await asyncio.start_server(
handle_client,
'127.0.0.1', 0,
family=socket.AF_INET)
srv_socks = srv.sockets
self.assertTrue(srv_socks)
addr = srv_socks[0].getsockname()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(addr))
await asyncio.wait_for(asyncio.gather(*tasks), TIMEOUT)
srv.close()
await srv.wait_closed()
self.loop.run_until_complete(start_server())
self.assertEqual(CNT, TOTAL_CNT)
def test_tcp_handle_exception_in_connection_made(self):
# Test that if connection_made raises an exception,
# 'create_connection' still returns.
# Silence error logging
self.loop.set_exception_handler(lambda *args: None)
fut = asyncio.Future()
connection_lost_called = asyncio.Future()
async def server(reader, writer):
try:
await reader.read()
finally:
writer.close()
class Proto(asyncio.Protocol):
def connection_made(self, tr):
1 / 0
def connection_lost(self, exc):
connection_lost_called.set_result(exc)
srv = self.loop.run_until_complete(asyncio.start_server(
server,
'127.0.0.1', 0,
family=socket.AF_INET))
async def runner():
tr, pr = await asyncio.wait_for(
self.loop.create_connection(
Proto, *srv.sockets[0].getsockname()),
timeout=1.0)
fut.set_result(None)
tr.close()
self.loop.run_until_complete(runner())
srv.close()
self.loop.run_until_complete(srv.wait_closed())
self.loop.run_until_complete(fut)
self.assertIsNone(
self.loop.run_until_complete(connection_lost_called))
class Test_UV_TCP(_TestTCP, tb.UVTestCase):
def test_create_server_buffered_1(self):
SIZE = 123123
eof = False
fut = asyncio.Future()
class Proto(asyncio.BaseProtocol):
def connection_made(self, tr):
self.tr = tr
self.recvd = b''
self.data = bytearray(50)
self.buf = memoryview(self.data)
def get_buffer(self, sizehint):
return self.buf
def buffer_updated(self, nbytes):
self.recvd += self.buf[:nbytes]
if self.recvd == b'a' * SIZE:
self.tr.write(b'hello')
def eof_received(self):
nonlocal eof
eof = True
def connection_lost(self, exc):
fut.set_result(exc)
async def test():
port = tb.find_free_port()
srv = await self.loop.create_server(Proto, '127.0.0.1', port)
s = socket.socket(socket.AF_INET)
with s:
s.setblocking(False)
await self.loop.sock_connect(s, ('127.0.0.1', port))
await self.loop.sock_sendall(s, b'a' * SIZE)
d = await self.loop.sock_recv(s, 100)
self.assertEqual(d, b'hello')
srv.close()
await srv.wait_closed()
self.loop.run_until_complete(test())
self.loop.run_until_complete(fut)
self.assertTrue(eof)
self.assertIsNone(fut.result())
def test_create_server_buffered_2(self):
class ProtoExc(asyncio.BaseProtocol):
def __init__(self):
self._lost_exc = None
def get_buffer(self, sizehint):
1 / 0
def buffer_updated(self, nbytes):
pass
def connection_lost(self, exc):
self._lost_exc = exc
def eof_received(self):
pass
class ProtoZeroBuf1(asyncio.BaseProtocol):
def __init__(self):
self._lost_exc = None
def get_buffer(self, sizehint):
return bytearray(0)
def buffer_updated(self, nbytes):
pass
def connection_lost(self, exc):
self._lost_exc = exc
def eof_received(self):
pass
class ProtoZeroBuf2(asyncio.BaseProtocol):
def __init__(self):
self._lost_exc = None
def get_buffer(self, sizehint):
return memoryview(bytearray(0))
def buffer_updated(self, nbytes):
pass
def connection_lost(self, exc):
self._lost_exc = exc
def eof_received(self):
pass
class ProtoUpdatedError(asyncio.BaseProtocol):
def __init__(self):
self._lost_exc = None
def get_buffer(self, sizehint):
return memoryview(bytearray(100))
def buffer_updated(self, nbytes):
raise RuntimeError('oups')
def connection_lost(self, exc):
self._lost_exc = exc
def eof_received(self):
pass
async def test(proto_factory, exc_type, exc_re):
port = tb.find_free_port()
proto = proto_factory()
srv = await self.loop.create_server(
lambda: proto, '127.0.0.1', port)
try:
s = socket.socket(socket.AF_INET)
with s:
s.setblocking(False)
await self.loop.sock_connect(s, ('127.0.0.1', port))
await self.loop.sock_sendall(s, b'a')
d = await self.loop.sock_recv(s, 100)
if not d:
raise ConnectionResetError
except ConnectionResetError:
pass
else:
self.fail("server didn't abort the connection")
return
finally:
srv.close()
await srv.wait_closed()
if proto._lost_exc is None:
self.fail("connection_lost() was not called")
return
with self.assertRaisesRegex(exc_type, exc_re):
raise proto._lost_exc
self.loop.set_exception_handler(lambda loop, ctx: None)
self.loop.run_until_complete(
test(ProtoExc, RuntimeError, 'unhandled error .* get_buffer'))
self.loop.run_until_complete(
test(ProtoZeroBuf1, RuntimeError, 'unhandled error .* get_buffer'))
self.loop.run_until_complete(
test(ProtoZeroBuf2, RuntimeError, 'unhandled error .* get_buffer'))
self.loop.run_until_complete(
test(ProtoUpdatedError, RuntimeError, r'^oups$'))
def test_transport_get_extra_info(self):
# This tests is only for uvloop. asyncio should pass it
# too in Python 3.6.
fut = asyncio.Future()
async def handle_client(reader, writer):
with self.assertRaises(asyncio.IncompleteReadError):
await reader.readexactly(4)
writer.close()
# Previously, when we used socket.fromfd to create a socket
# for UVTransports (to make get_extra_info() work), a duplicate
# of the socket was created, preventing UVTransport from being
# properly closed.
# This test ensures that server handle will receive an EOF
# and finish the request.
fut.set_result(None)
async def test_client(addr):
t, p = await self.loop.create_connection(
lambda: asyncio.Protocol(), *addr)
if hasattr(t, 'get_protocol'):
p2 = asyncio.Protocol()
self.assertIs(t.get_protocol(), p)
t.set_protocol(p2)
self.assertIs(t.get_protocol(), p2)
t.set_protocol(p)
self.assertFalse(t._paused)
self.assertTrue(t.is_reading())
t.pause_reading()
t.pause_reading() # Check that it's OK to call it 2nd time.
self.assertTrue(t._paused)
self.assertFalse(t.is_reading())
t.resume_reading()
t.resume_reading() # Check that it's OK to call it 2nd time.
self.assertFalse(t._paused)
self.assertTrue(t.is_reading())
sock = t.get_extra_info('socket')
self.assertIs(sock, t.get_extra_info('socket'))
sockname = sock.getsockname()
peername = sock.getpeername()
with self.assertRaisesRegex(RuntimeError, 'is used by transport'):
self.loop.add_writer(sock.fileno(), lambda: None)
with self.assertRaisesRegex(RuntimeError, 'is used by transport'):
self.loop.remove_writer(sock.fileno())
with self.assertRaisesRegex(RuntimeError, 'is used by transport'):
self.loop.add_reader(sock.fileno(), lambda: None)
with self.assertRaisesRegex(RuntimeError, 'is used by transport'):
self.loop.remove_reader(sock.fileno())
self.assertEqual(t.get_extra_info('sockname'),
sockname)
self.assertEqual(t.get_extra_info('peername'),
peername)
t.write(b'OK') # We want server to fail.
self.assertFalse(t._closing)
t.abort()
self.assertTrue(t._closing)
self.assertFalse(t.is_reading())
# Check that pause_reading and resume_reading don't raise
# errors if called after the transport is closed.
t.pause_reading()
t.resume_reading()
await fut
# Test that peername and sockname are available after
# the transport is closed.
self.assertEqual(t.get_extra_info('peername'),
peername)
self.assertEqual(t.get_extra_info('sockname'),
sockname)
async def start_server():
srv = await asyncio.start_server(
handle_client,
'127.0.0.1', 0,
family=socket.AF_INET)
addr = srv.sockets[0].getsockname()
await test_client(addr)
srv.close()
await srv.wait_closed()
self.loop.run_until_complete(start_server())
def test_create_server_float_backlog(self):
# asyncio spits out a warning we cannot suppress
async def runner(bl):
await self.loop.create_server(
asyncio.Protocol,
None, 0, backlog=bl)
for bl in (1.1, '1'):
with self.subTest(backlog=bl):
with self.assertRaisesRegex(TypeError, 'integer'):
self.loop.run_until_complete(runner(bl))
def test_many_small_writes(self):
N = 10000
TOTAL = 0
fut = self.loop.create_future()
async def server(reader, writer):
nonlocal TOTAL
while True:
d = await reader.read(10000)
if not d:
break
TOTAL += len(d)
fut.set_result(True)
writer.close()
async def run():
srv = await asyncio.start_server(
server,
'127.0.0.1', 0,
family=socket.AF_INET)
addr = srv.sockets[0].getsockname()
r, w = await asyncio.open_connection(*addr)
DATA = b'x' * 102400
# Test _StreamWriteContext with short sequences of writes
w.write(DATA)
await w.drain()
for _ in range(3):
w.write(DATA)
await w.drain()
for _ in range(10):
w.write(DATA)
await w.drain()
for _ in range(N):
w.write(DATA)
try:
w.write('a')
except TypeError:
pass
await w.drain()
for _ in range(N):
w.write(DATA)
await w.drain()
w.close()
await fut
await self.wait_closed(w)
srv.close()
await srv.wait_closed()
self.assertEqual(TOTAL, N * 2 * len(DATA) + 14 * len(DATA))
self.loop.run_until_complete(run())
@unittest.skipIf(sys.version_info[:3] >= (3, 8, 0),
"3.8 has a different method of GCing unclosed streams")
def test_tcp_handle_unclosed_gc(self):
fut = self.loop.create_future()
async def server(reader, writer):
writer.transport.abort()
fut.set_result(True)
async def run():
addr = srv.sockets[0].getsockname()
await asyncio.open_connection(*addr)
await fut
srv.close()
await srv.wait_closed()
srv = self.loop.run_until_complete(asyncio.start_server(
server,
'127.0.0.1', 0,
family=socket.AF_INET))
if self.loop.get_debug():
rx = r'unclosed resource <TCP.*; ' \
r'object created at(.|\n)*test_tcp_handle_unclosed_gc'
else:
rx = r'unclosed resource <TCP.*'
with self.assertWarnsRegex(ResourceWarning, rx):
self.loop.create_task(run())
self.loop.run_until_complete(srv.wait_closed())
self.loop.run_until_complete(asyncio.sleep(0.1))
srv = None
gc.collect()
gc.collect()
gc.collect()
self.loop.run_until_complete(asyncio.sleep(0.1))
# Since one TCPTransport handle wasn't closed correctly,
# we need to disable this check:
self.skip_unclosed_handles_check()
def test_tcp_handle_abort_in_connection_made(self):
async def server(reader, writer):
try:
await reader.read()
finally:
writer.close()
class Proto(asyncio.Protocol):
def connection_made(self, tr):
tr.abort()
srv = self.loop.run_until_complete(asyncio.start_server(
server,
'127.0.0.1', 0,
family=socket.AF_INET))
async def runner():
tr, pr = await asyncio.wait_for(
self.loop.create_connection(
Proto, *srv.sockets[0].getsockname()),
timeout=1.0)
# Asyncio would return a closed socket, which we
# can't do: the transport was aborted, hence there
# is no FD to attach a socket to (to make
# get_extra_info() work).
self.assertIsNone(tr.get_extra_info('socket'))
tr.close()
self.loop.run_until_complete(runner())
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_connect_accepted_socket_ssl_args(self):
with self.assertRaisesRegex(
ValueError, 'ssl_handshake_timeout is only meaningful'):
with socket.socket() as s:
self.loop.run_until_complete(
self.loop.connect_accepted_socket(
(lambda: None),
s,
ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT
)
)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.socket(socket.AF_INET)
lsock.bind(('127.0.0.1', 0))
lsock.listen(1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket(socket.AF_INET)
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
extras = {}
if server_ssl:
extras = dict(ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT)
f = loop.create_task(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl,
**extras))
loop.run_forever()
conn.close()
lsock.close()
thread.join(1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
tr, _ = f.result()
if server_ssl:
self.assertIn('SSL', tr.__class__.__name__)
tr.close()
# let it close
self.loop.run_until_complete(asyncio.sleep(0.1))
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'no Unix sockets')
def test_create_connection_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_connection(MyBaseProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'no Unix sockets')
def test_create_server_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_server(MyBaseProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
def test_create_server_stream_bittype(self):
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with sock:
coro = self.loop.create_server(lambda: None, sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_flowcontrol_mixin_set_write_limits(self):
async def client(addr):
paused = False
class Protocol(asyncio.Protocol):
def pause_writing(self):
nonlocal paused
paused = True
def resume_writing(self):
nonlocal paused
paused = False
t, p = await self.loop.create_connection(Protocol, *addr)
t.write(b'q' * 512)
self.assertEqual(t.get_write_buffer_size(), 512)
t.set_write_buffer_limits(low=16385)
self.assertFalse(paused)
self.assertEqual(t.get_write_buffer_limits(), (16385, 65540))
with self.assertRaisesRegex(ValueError, 'high.*must be >= low'):
t.set_write_buffer_limits(high=0, low=1)
t.set_write_buffer_limits(high=1024, low=128)
self.assertFalse(paused)
self.assertEqual(t.get_write_buffer_limits(), (128, 1024))
t.set_write_buffer_limits(high=256, low=128)
self.assertTrue(paused)
self.assertEqual(t.get_write_buffer_limits(), (128, 256))
t.close()
with self.tcp_server(lambda sock: sock.recv_all(1),
max_clients=1,
backlog=1) as srv:
self.loop.run_until_complete(client(srv.addr))
class Test_AIO_TCP(_TestTCP, tb.AIOTestCase):
pass
class _TestSSL(tb.SSLTestCase):
ONLYCERT = tb._cert_fullname(__file__, 'ssl_cert.pem')
ONLYKEY = tb._cert_fullname(__file__, 'ssl_key.pem')
PAYLOAD_SIZE = 1024 * 100
TIMEOUT = 60
def test_create_server_ssl_1(self):
CNT = 0 # number of clients that were successful
TOTAL_CNT = 25 # total number of clients that test will create
TIMEOUT = 10.0 # timeout for this test
A_DATA = b'A' * 1024 * 1024
B_DATA = b'B' * 1024 * 1024
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
clients = []
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(len(A_DATA))
self.assertEqual(data, A_DATA)
writer.write(b'OK')
data = await reader.readexactly(len(B_DATA))
self.assertEqual(data, B_DATA)
writer.writelines([b'SP', bytearray(b'A'), memoryview(b'M')])
await writer.drain()
writer.close()
CNT += 1
async def test_client(addr):
fut = asyncio.Future()
def prog(sock):
try:
sock.starttls(client_sslctx)
sock.connect(addr)
sock.send(A_DATA)
data = sock.recv_all(2)
self.assertEqual(data, b'OK')
sock.send(B_DATA)
data = sock.recv_all(4)
self.assertEqual(data, b'SPAM')
sock.close()
except Exception as ex:
self.loop.call_soon_threadsafe(fut.set_exception, ex)
else:
self.loop.call_soon_threadsafe(fut.set_result, None)
client = self.tcp_client(prog)
client.start()
clients.append(client)
await fut
async def start_server():
extras = dict(ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT)
srv = await asyncio.start_server(
handle_client,
'127.0.0.1', 0,
family=socket.AF_INET,
ssl=sslctx,
**extras)
try:
srv_socks = srv.sockets
self.assertTrue(srv_socks)
addr = srv_socks[0].getsockname()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(addr))
await asyncio.wait_for(asyncio.gather(*tasks), TIMEOUT)
finally:
self.loop.call_soon(srv.close)
await srv.wait_closed()
with self._silence_eof_received_warning():
self.loop.run_until_complete(start_server())
self.assertEqual(CNT, TOTAL_CNT)
for client in clients:
client.stop()
def test_create_connection_ssl_1(self):
if self.implementation == 'asyncio':
# Don't crash on asyncio errors
self.loop.set_exception_handler(None)
CNT = 0
TOTAL_CNT = 25
A_DATA = b'A' * 1024 * 1024
B_DATA = b'B' * 1024 * 1024
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
def server(sock):
sock.starttls(
sslctx,
server_side=True)
data = sock.recv_all(len(A_DATA))
self.assertEqual(data, A_DATA)
sock.send(b'OK')
data = sock.recv_all(len(B_DATA))
self.assertEqual(data, B_DATA)
sock.send(b'SPAM')
sock.close()
async def client(addr):
extras = dict(ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT)
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
**extras)
writer.write(A_DATA)
self.assertEqual(await reader.readexactly(2), b'OK')
writer.write(B_DATA)
self.assertEqual(await reader.readexactly(4), b'SPAM')
nonlocal CNT
CNT += 1
writer.close()
await self.wait_closed(writer)
async def client_sock(addr):
sock = socket.socket()
sock.connect(addr)
reader, writer = await asyncio.open_connection(
sock=sock,
ssl=client_sslctx,
server_hostname='')
writer.write(A_DATA)
self.assertEqual(await reader.readexactly(2), b'OK')
writer.write(B_DATA)
self.assertEqual(await reader.readexactly(4), b'SPAM')
nonlocal CNT
CNT += 1
writer.close()
await self.wait_closed(writer)
sock.close()
def run(coro):
nonlocal CNT
CNT = 0
with self.tcp_server(server,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT) as srv:
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
self.loop.run_until_complete(asyncio.gather(*tasks))
self.assertEqual(CNT, TOTAL_CNT)
with self._silence_eof_received_warning():
run(client)
with self._silence_eof_received_warning():
run(client_sock)
def test_create_connection_ssl_slow_handshake(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
client_sslctx = self._create_client_ssl_context()
# silence error logger
self.loop.set_exception_handler(lambda *args: None)
def server(sock):
try:
sock.recv_all(1024 * 1024)
except ConnectionAbortedError:
pass
finally:
sock.close()
async def client(addr):
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
ssl_handshake_timeout=1.0)
writer.close()
await self.wait_closed(writer)
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
with self.assertRaisesRegex(
ConnectionAbortedError,
r'SSL handshake.*is taking longer'):
self.loop.run_until_complete(client(srv.addr))
def test_create_connection_ssl_failed_certificate(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
# silence error logger
self.loop.set_exception_handler(lambda *args: None)
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context(disable_verify=False)
def server(sock):
try:
sock.starttls(
sslctx,
server_side=True)
sock.connect()
except (ssl.SSLError, OSError):
pass
finally:
sock.close()
async def client(addr):
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
ssl_handshake_timeout=1.0)
writer.close()
await self.wait_closed(writer)
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
with self.assertRaises(ssl.SSLCertVerificationError):
self.loop.run_until_complete(client(srv.addr))
def test_start_tls_wrong_args(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
async def main():
with self.assertRaisesRegex(TypeError, 'SSLContext, got'):
await self.loop.start_tls(None, None, None)
sslctx = self._create_server_ssl_context(
self.ONLYCERT, self.ONLYKEY)
with self.assertRaisesRegex(TypeError, 'is not supported'):
await self.loop.start_tls(None, None, sslctx)
self.loop.run_until_complete(main())
def test_ssl_handshake_timeout(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
# bpo-29970: Check that a connection is aborted if handshake is not
# completed in timeout period, instead of remaining open indefinitely
client_sslctx = self._create_client_ssl_context()
# silence error logger
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
server_side_aborted = False
def server(sock):
nonlocal server_side_aborted
try:
sock.recv_all(1024 * 1024)
except ConnectionAbortedError:
server_side_aborted = True
finally:
sock.close()
async def client(addr):
await asyncio.wait_for(
self.loop.create_connection(
asyncio.Protocol,
*addr,
ssl=client_sslctx,
server_hostname='',
ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT
),
0.5
)
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(client(srv.addr))
self.assertTrue(server_side_aborted)
# Python issue #23197: cancelling a handshake must not raise an
# exception or log an error, even if the handshake failed
self.assertEqual(messages, [])
def test_ssl_handshake_connection_lost(self):
# #246: make sure that no connection_lost() is called before
# connection_made() is called first
client_sslctx = self._create_client_ssl_context()
# silence error logger
self.loop.set_exception_handler(lambda loop, ctx: None)
connection_made_called = False
connection_lost_called = False
def server(sock):
sock.recv(1024)
# break the connection during handshake
sock.close()
class ClientProto(asyncio.Protocol):
def connection_made(self, transport):
nonlocal connection_made_called
connection_made_called = True
def connection_lost(self, exc):
nonlocal connection_lost_called
connection_lost_called = True
async def client(addr):
await self.loop.create_connection(
ClientProto,
*addr,
ssl=client_sslctx,
server_hostname=''),
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
with self.assertRaises(ConnectionResetError):
self.loop.run_until_complete(client(srv.addr))
if connection_lost_called:
if connection_made_called:
self.fail("unexpected call to connection_lost()")
else:
self.fail("unexpected call to connection_lost() without"
"calling connection_made()")
elif connection_made_called:
self.fail("unexpected call to connection_made()")
def test_ssl_connect_accepted_socket(self):
if hasattr(ssl, 'PROTOCOL_TLS'):
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
server_context = ssl.SSLContext(proto)
server_context.load_cert_chain(self.ONLYCERT, self.ONLYKEY)
if hasattr(server_context, 'check_hostname'):
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
client_context = ssl.SSLContext(proto)
if hasattr(server_context, 'check_hostname'):
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
Test_UV_TCP.test_connect_accepted_socket(
self, server_context, client_context)
def test_start_tls_client_corrupted_ssl(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
self.loop.set_exception_handler(lambda loop, ctx: None)
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
def server(sock):
orig_sock = sock.dup()
try:
sock.starttls(
sslctx,
server_side=True)
sock.sendall(b'A\n')
sock.recv_all(1)
orig_sock.send(b'please corrupt the SSL connection')
except ssl.SSLError:
pass
finally:
sock.close()
orig_sock.close()
async def client(addr):
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='')
self.assertEqual(await reader.readline(), b'A\n')
writer.write(b'B')
with self.assertRaises(ssl.SSLError):
await reader.readline()
writer.close()
try:
await self.wait_closed(writer)
except ssl.SSLError:
pass
return 'OK'
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
res = self.loop.run_until_complete(client(srv.addr))
self.assertEqual(res, 'OK')
def test_start_tls_client_reg_proto_1(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
server_context = self._create_server_ssl_context(
self.ONLYCERT, self.ONLYKEY)
client_context = self._create_client_ssl_context()
def serve(sock):
sock.settimeout(self.TIMEOUT)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.starttls(server_context, server_side=True)
sock.sendall(b'O')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.unwrap()
sock.close()
class ClientProto(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(proto, tr):
proto.con_made_cnt += 1
# Ensure connection_made gets called only once.
self.assertEqual(proto.con_made_cnt, 1)
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProto(on_data, on_eof), *addr)
tr.write(HELLO_MSG)
new_tr = await self.loop.start_tls(tr, proto, client_context)
self.assertEqual(await on_data, b'O')
new_tr.write(HELLO_MSG)
await on_eof
new_tr.close()
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr), timeout=10))
def test_create_connection_memory_leak(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
server_context = self._create_server_ssl_context(
self.ONLYCERT, self.ONLYKEY)
client_context = self._create_client_ssl_context()
def serve(sock):
sock.settimeout(self.TIMEOUT)
sock.starttls(server_context, server_side=True)
sock.sendall(b'O')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.unwrap()
sock.close()
class ClientProto(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(proto, tr):
# XXX: We assume user stores the transport in protocol
proto.tr = tr
proto.con_made_cnt += 1
# Ensure connection_made gets called only once.
self.assertEqual(proto.con_made_cnt, 1)
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProto(on_data, on_eof), *addr,
ssl=client_context)
self.assertEqual(await on_data, b'O')
tr.write(HELLO_MSG)
await on_eof
tr.close()
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr), timeout=10))
# No garbage is left for SSL client from loop.create_connection, even
# if user stores the SSLTransport in corresponding protocol instance
client_context = weakref.ref(client_context)
self.assertIsNone(client_context())
def test_start_tls_client_buf_proto_1(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
server_context = self._create_server_ssl_context(
self.ONLYCERT, self.ONLYKEY)
client_context = self._create_client_ssl_context()
client_con_made_calls = 0
def serve(sock):
sock.settimeout(self.TIMEOUT)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.starttls(server_context, server_side=True)
sock.sendall(b'O')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.sendall(b'2')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.unwrap()
sock.close()
class ClientProtoFirst(asyncio.BaseProtocol):
def __init__(self, on_data):
self.on_data = on_data
self.buf = bytearray(1)
def connection_made(self, tr):
nonlocal client_con_made_calls
client_con_made_calls += 1
def get_buffer(self, sizehint):
return self.buf
def buffer_updated(self, nsize):
assert nsize == 1
self.on_data.set_result(bytes(self.buf[:nsize]))
def eof_received(self):
pass
class ClientProtoSecond(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(self, tr):
nonlocal client_con_made_calls
client_con_made_calls += 1
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data1 = self.loop.create_future()
on_data2 = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProtoFirst(on_data1), *addr)
tr.write(HELLO_MSG)
new_tr = await self.loop.start_tls(tr, proto, client_context)
self.assertEqual(await on_data1, b'O')
new_tr.write(HELLO_MSG)
new_tr.set_protocol(ClientProtoSecond(on_data2, on_eof))
self.assertEqual(await on_data2, b'2')
new_tr.write(HELLO_MSG)
await on_eof
new_tr.close()
# connection_made() should be called only once -- when
# we establish connection for the first time. Start TLS
# doesn't call connection_made() on application protocols.
self.assertEqual(client_con_made_calls, 1)
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr),
timeout=self.TIMEOUT))
def test_start_tls_slow_client_cancel(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
client_context = self._create_client_ssl_context()
server_waits_on_handshake = self.loop.create_future()
def serve(sock):
sock.settimeout(self.TIMEOUT)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
try:
self.loop.call_soon_threadsafe(
server_waits_on_handshake.set_result, None)
data = sock.recv_all(1024 * 1024)
except ConnectionAbortedError:
pass
finally:
sock.close()
class ClientProto(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(proto, tr):
proto.con_made_cnt += 1
# Ensure connection_made gets called only once.
self.assertEqual(proto.con_made_cnt, 1)
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProto(on_data, on_eof), *addr)
tr.write(HELLO_MSG)
await server_waits_on_handshake
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(
self.loop.start_tls(tr, proto, client_context),
0.5)
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr), timeout=10))
def test_start_tls_server_1(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
server_context = self._create_server_ssl_context(
self.ONLYCERT, self.ONLYKEY)
client_context = self._create_client_ssl_context()
def client(sock, addr):
sock.settimeout(self.TIMEOUT)
sock.connect(addr)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.starttls(client_context)
sock.sendall(HELLO_MSG)
sock.unwrap()
sock.close()
class ServerProto(asyncio.Protocol):
def __init__(self, on_con, on_eof, on_con_lost):
self.on_con = on_con
self.on_eof = on_eof
self.on_con_lost = on_con_lost
self.data = b''
def connection_made(self, tr):
self.on_con.set_result(tr)
def data_received(self, data):
self.data += data
def eof_received(self):
self.on_eof.set_result(1)
def connection_lost(self, exc):
if exc is None:
self.on_con_lost.set_result(None)
else:
self.on_con_lost.set_exception(exc)
async def main(proto, on_con, on_eof, on_con_lost):
tr = await on_con
tr.write(HELLO_MSG)
self.assertEqual(proto.data, b'')
new_tr = await self.loop.start_tls(
tr, proto, server_context,
server_side=True,
ssl_handshake_timeout=self.TIMEOUT)
await on_eof
await on_con_lost
self.assertEqual(proto.data, HELLO_MSG)
new_tr.close()
async def run_main():
on_con = self.loop.create_future()
on_eof = self.loop.create_future()
on_con_lost = self.loop.create_future()
proto = ServerProto(on_con, on_eof, on_con_lost)
server = await self.loop.create_server(
lambda: proto, '127.0.0.1', 0)
addr = server.sockets[0].getsockname()
with self.tcp_client(lambda sock: client(sock, addr),
timeout=self.TIMEOUT):
await asyncio.wait_for(
main(proto, on_con, on_eof, on_con_lost),
timeout=self.TIMEOUT)
server.close()
await server.wait_closed()
self.loop.run_until_complete(run_main())
def test_create_server_ssl_over_ssl(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest('asyncio does not support SSL over SSL')
CNT = 0 # number of clients that were successful
TOTAL_CNT = 25 # total number of clients that test will create
TIMEOUT = 20.0 # timeout for this test
A_DATA = b'A' * 1024 * 1024
B_DATA = b'B' * 1024 * 1024
sslctx_1 = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx_1 = self._create_client_ssl_context()
sslctx_2 = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx_2 = self._create_client_ssl_context()
clients = []
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(len(A_DATA))
self.assertEqual(data, A_DATA)
writer.write(b'OK')
data = await reader.readexactly(len(B_DATA))
self.assertEqual(data, B_DATA)
writer.writelines([b'SP', bytearray(b'A'), memoryview(b'M')])
await writer.drain()
writer.close()
CNT += 1
class ServerProtocol(asyncio.StreamReaderProtocol):
def connection_made(self, transport):
super_ = super()
transport.pause_reading()
fut = self._loop.create_task(self._loop.start_tls(
transport, self, sslctx_2, server_side=True))
def cb(_):
try:
tr = fut.result()
except Exception as ex:
super_.connection_lost(ex)
else:
super_.connection_made(tr)
fut.add_done_callback(cb)
def server_protocol_factory():
reader = asyncio.StreamReader()
protocol = ServerProtocol(reader, handle_client)
return protocol
async def test_client(addr):
fut = asyncio.Future()
def prog(sock):
try:
sock.connect(addr)
sock.starttls(client_sslctx_1)
# because wrap_socket() doesn't work correctly on
# SSLSocket, we have to do the 2nd level SSL manually
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
sslobj = client_sslctx_2.wrap_bio(incoming, outgoing)
def do(func, *args):
while True:
try:
rv = func(*args)
break
except ssl.SSLWantReadError:
if outgoing.pending:
sock.send(outgoing.read())
incoming.write(sock.recv(65536))
if outgoing.pending:
sock.send(outgoing.read())
return rv
do(sslobj.do_handshake)
do(sslobj.write, A_DATA)
data = do(sslobj.read, 2)
self.assertEqual(data, b'OK')
do(sslobj.write, B_DATA)
data = b''
while True:
chunk = do(sslobj.read, 4)
if not chunk:
break
data += chunk
self.assertEqual(data, b'SPAM')
do(sslobj.unwrap)
sock.close()
except Exception as ex:
self.loop.call_soon_threadsafe(fut.set_exception, ex)
sock.close()
else:
self.loop.call_soon_threadsafe(fut.set_result, None)
client = self.tcp_client(prog)
client.start()
clients.append(client)
await fut
async def start_server():
extras = dict(ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT)
srv = await self.loop.create_server(
server_protocol_factory,
'127.0.0.1', 0,
family=socket.AF_INET,
ssl=sslctx_1,
**extras)
try:
srv_socks = srv.sockets
self.assertTrue(srv_socks)
addr = srv_socks[0].getsockname()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(addr))
await asyncio.wait_for(asyncio.gather(*tasks), TIMEOUT)
finally:
self.loop.call_soon(srv.close)
await srv.wait_closed()
with self._silence_eof_received_warning():
self.loop.run_until_complete(start_server())
self.assertEqual(CNT, TOTAL_CNT)
for client in clients:
client.stop()
def test_renegotiation(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest('asyncio does not support renegotiation')
CNT = 0
TOTAL_CNT = 25
A_DATA = b'A' * 1024 * 1024
B_DATA = b'B' * 1024 * 1024
sslctx = openssl_ssl.Context(openssl_ssl.TLSv1_2_METHOD)
if hasattr(openssl_ssl, 'OP_NO_SSLV2'):
sslctx.set_options(openssl_ssl.OP_NO_SSLV2)
sslctx.use_privatekey_file(self.ONLYKEY)
sslctx.use_certificate_chain_file(self.ONLYCERT)
client_sslctx = self._create_client_ssl_context()
if hasattr(ssl, 'OP_NO_TLSv1_3'):
client_sslctx.options |= ssl.OP_NO_TLSv1_3
def server(sock):
conn = openssl_ssl.Connection(sslctx, sock)
conn.set_accept_state()
data = b''
while len(data) < len(A_DATA):
try:
chunk = conn.recv(len(A_DATA) - len(data))
if not chunk:
break
data += chunk
except openssl_ssl.WantReadError:
pass
self.assertEqual(data, A_DATA)
conn.renegotiate()
if conn.renegotiate_pending():
conn.send(b'OK')
else:
conn.send(b'ER')
data = b''
while len(data) < len(B_DATA):
try:
chunk = conn.recv(len(B_DATA) - len(data))
if not chunk:
break
data += chunk
except openssl_ssl.WantReadError:
pass
self.assertEqual(data, B_DATA)
if conn.renegotiate_pending():
conn.send(b'ERRO')
else:
conn.send(b'SPAM')
conn.shutdown()
async def client(addr):
extras = dict(ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT)
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
**extras)
writer.write(A_DATA)
self.assertEqual(await reader.readexactly(2), b'OK')
writer.write(B_DATA)
self.assertEqual(await reader.readexactly(4), b'SPAM')
nonlocal CNT
CNT += 1
writer.close()
await self.wait_closed(writer)
async def client_sock(addr):
sock = socket.socket()
sock.connect(addr)
reader, writer = await asyncio.open_connection(
sock=sock,
ssl=client_sslctx,
server_hostname='')
writer.write(A_DATA)
self.assertEqual(await reader.readexactly(2), b'OK')
writer.write(B_DATA)
self.assertEqual(await reader.readexactly(4), b'SPAM')
nonlocal CNT
CNT += 1
writer.close()
await self.wait_closed(writer)
sock.close()
def run(coro):
nonlocal CNT
CNT = 0
with self.tcp_server(server,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT) as srv:
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
self.loop.run_until_complete(
asyncio.gather(*tasks))
self.assertEqual(CNT, TOTAL_CNT)
with self._silence_eof_received_warning():
run(client)
with self._silence_eof_received_warning():
run(client_sock)
def test_shutdown_timeout(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
CNT = 0 # number of clients that were successful
TOTAL_CNT = 25 # total number of clients that test will create
TIMEOUT = 10.0 # timeout for this test
A_DATA = b'A' * 1024 * 1024
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
clients = []
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(len(A_DATA))
self.assertEqual(data, A_DATA)
writer.write(b'OK')
await writer.drain()
writer.close()
with self.assertRaisesRegex(asyncio.TimeoutError,
'SSL shutdown timed out'):
await reader.read()
CNT += 1
async def test_client(addr):
fut = asyncio.Future()
def prog(sock):
try:
sock.starttls(client_sslctx)
sock.connect(addr)
sock.send(A_DATA)
data = sock.recv_all(2)
self.assertEqual(data, b'OK')
data = sock.recv(1024)
self.assertEqual(data, b'')
fd = sock.detach()
try:
select.select([fd], [], [], 3)
finally:
os.close(fd)
except Exception as ex:
self.loop.call_soon_threadsafe(fut.set_exception, ex)
else:
self.loop.call_soon_threadsafe(fut.set_result, None)
client = self.tcp_client(prog)
client.start()
clients.append(client)
await fut
async def start_server():
extras = {'ssl_handshake_timeout': SSL_HANDSHAKE_TIMEOUT}
if self.implementation != 'asyncio': # or self.PY38
extras['ssl_shutdown_timeout'] = 0.5
srv = await asyncio.start_server(
handle_client,
'127.0.0.1', 0,
family=socket.AF_INET,
ssl=sslctx,
**extras)
try:
srv_socks = srv.sockets
self.assertTrue(srv_socks)
addr = srv_socks[0].getsockname()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(addr))
await asyncio.wait_for(
asyncio.gather(*tasks),
TIMEOUT)
finally:
self.loop.call_soon(srv.close)
await srv.wait_closed()
with self._silence_eof_received_warning():
self.loop.run_until_complete(start_server())
self.assertEqual(CNT, TOTAL_CNT)
for client in clients:
client.stop()
def test_shutdown_cleanly(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
CNT = 0
TOTAL_CNT = 25
A_DATA = b'A' * 1024 * 1024
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
def server(sock):
sock.starttls(
sslctx,
server_side=True)
data = sock.recv_all(len(A_DATA))
self.assertEqual(data, A_DATA)
sock.send(b'OK')
sock.unwrap()
sock.close()
async def client(addr):
extras = dict(ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT)
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
**extras)
writer.write(A_DATA)
self.assertEqual(await reader.readexactly(2), b'OK')
self.assertEqual(await reader.read(), b'')
nonlocal CNT
CNT += 1
writer.close()
await self.wait_closed(writer)
def run(coro):
nonlocal CNT
CNT = 0
with self.tcp_server(server,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT) as srv:
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
self.loop.run_until_complete(
asyncio.gather(*tasks))
self.assertEqual(CNT, TOTAL_CNT)
with self._silence_eof_received_warning():
run(client)
def test_write_to_closed_transport(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
future = None
def server(sock):
sock.starttls(sslctx, server_side=True)
sock.shutdown(socket.SHUT_RDWR)
sock.close()
def unwrap_server(sock):
sock.starttls(sslctx, server_side=True)
while True:
try:
sock.unwrap()
break
except ssl.SSLError as ex:
# Since OpenSSL 1.1.1, it raises "application data after
# close notify"
# Python < 3.8:
if ex.reason == 'KRB5_S_INIT':
break
# Python >= 3.8:
if ex.reason == 'APPLICATION_DATA_AFTER_CLOSE_NOTIFY':
break
raise ex
except OSError as ex:
# OpenSSL < 1.1.1
if ex.errno != 0:
raise
sock.close()
async def client(addr):
nonlocal future
future = self.loop.create_future()
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='')
writer.write(b'I AM WRITING NOWHERE1' * 100)
try:
data = await reader.read()
self.assertEqual(data, b'')
except (ConnectionResetError, BrokenPipeError):
pass
for i in range(25):
writer.write(b'I AM WRITING NOWHERE2' * 100)
self.assertEqual(
writer.transport.get_write_buffer_size(), 0)
await future
writer.close()
await self.wait_closed(writer)
def run(meth):
def wrapper(sock):
try:
meth(sock)
except Exception as ex:
self.loop.call_soon_threadsafe(future.set_exception, ex)
else:
self.loop.call_soon_threadsafe(future.set_result, None)
return wrapper
with self._silence_eof_received_warning():
with self.tcp_server(run(server)) as srv:
self.loop.run_until_complete(client(srv.addr))
with self.tcp_server(run(unwrap_server)) as srv:
self.loop.run_until_complete(client(srv.addr))
def test_flush_before_shutdown(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
CHUNK = 1024 * 128
SIZE = 32
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
sslctx_openssl = openssl_ssl.Context(openssl_ssl.TLSv1_2_METHOD)
if hasattr(openssl_ssl, 'OP_NO_SSLV2'):
sslctx_openssl.set_options(openssl_ssl.OP_NO_SSLV2)
sslctx_openssl.use_privatekey_file(self.ONLYKEY)
sslctx_openssl.use_certificate_chain_file(self.ONLYCERT)
client_sslctx = self._create_client_ssl_context()
if hasattr(ssl, 'OP_NO_TLSv1_3'):
client_sslctx.options |= ssl.OP_NO_TLSv1_3
future = None
def server(sock):
sock.starttls(sslctx, server_side=True)
self.assertEqual(sock.recv_all(4), b'ping')
sock.send(b'pong')
time.sleep(0.5) # hopefully stuck the TCP buffer
data = sock.recv_all(CHUNK * SIZE)
self.assertEqual(len(data), CHUNK * SIZE)
sock.close()
def run(meth):
def wrapper(sock):
try:
meth(sock)
except Exception as ex:
self.loop.call_soon_threadsafe(future.set_exception, ex)
else:
self.loop.call_soon_threadsafe(future.set_result, None)
return wrapper
async def client(addr):
nonlocal future
future = self.loop.create_future()
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='')
sslprotocol = writer.get_extra_info('uvloop.sslproto')
writer.write(b'ping')
data = await reader.readexactly(4)
self.assertEqual(data, b'pong')
sslprotocol.pause_writing()
for _ in range(SIZE):
writer.write(b'x' * CHUNK)
writer.close()
sslprotocol.resume_writing()
await self.wait_closed(writer)
try:
data = await reader.read()
self.assertEqual(data, b'')
except ConnectionResetError:
pass
await future
with self.tcp_server(run(server)) as srv:
self.loop.run_until_complete(client(srv.addr))
def test_remote_shutdown_receives_trailing_data(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
CHUNK = 1024 * 128
SIZE = 32
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
future = None
def server(sock):
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
sslobj = sslctx.wrap_bio(incoming, outgoing, server_side=True)
while True:
try:
sslobj.do_handshake()
except ssl.SSLWantReadError:
if outgoing.pending:
sock.send(outgoing.read())
incoming.write(sock.recv(16384))
else:
if outgoing.pending:
sock.send(outgoing.read())
break
while True:
try:
data = sslobj.read(4)
except ssl.SSLWantReadError:
incoming.write(sock.recv(16384))
else:
break
self.assertEqual(data, b'ping')
sslobj.write(b'pong')
sock.send(outgoing.read())
time.sleep(0.2) # wait for the peer to fill its backlog
# send close_notify but don't wait for response
with self.assertRaises(ssl.SSLWantReadError):
sslobj.unwrap()
sock.send(outgoing.read())
# should receive all data
data_len = 0
while True:
try:
chunk = len(sslobj.read(16384))
data_len += chunk
except ssl.SSLWantReadError:
incoming.write(sock.recv(16384))
except ssl.SSLZeroReturnError:
break
self.assertEqual(data_len, CHUNK * SIZE)
# verify that close_notify is received
sslobj.unwrap()
sock.close()
def eof_server(sock):
sock.starttls(sslctx, server_side=True)
self.assertEqual(sock.recv_all(4), b'ping')
sock.send(b'pong')
time.sleep(0.2) # wait for the peer to fill its backlog
# send EOF
sock.shutdown(socket.SHUT_WR)
# should receive all data
data = sock.recv_all(CHUNK * SIZE)
self.assertEqual(len(data), CHUNK * SIZE)
sock.close()
async def client(addr):
nonlocal future
future = self.loop.create_future()
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='')
writer.write(b'ping')
data = await reader.readexactly(4)
self.assertEqual(data, b'pong')
# fill write backlog in a hacky way - renegotiation won't help
for _ in range(SIZE):
writer.transport._test__append_write_backlog(b'x' * CHUNK)
try:
data = await reader.read()
self.assertEqual(data, b'')
except (BrokenPipeError, ConnectionResetError):
pass
await future
writer.close()
await self.wait_closed(writer)
def run(meth):
def wrapper(sock):
try:
meth(sock)
except Exception as ex:
self.loop.call_soon_threadsafe(future.set_exception, ex)
else:
self.loop.call_soon_threadsafe(future.set_result, None)
return wrapper
with self.tcp_server(run(server)) as srv:
self.loop.run_until_complete(client(srv.addr))
with self.tcp_server(run(eof_server)) as srv:
self.loop.run_until_complete(client(srv.addr))
def test_connect_timeout_warning(self):
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
addr = s.getsockname()
async def test():
try:
await asyncio.wait_for(
self.loop.create_connection(asyncio.Protocol,
*addr, ssl=True),
0.1)
except (ConnectionRefusedError, asyncio.TimeoutError):
pass
else:
self.fail('TimeoutError is not raised')
with s:
try:
with self.assertWarns(ResourceWarning) as cm:
self.loop.run_until_complete(test())
gc.collect()
gc.collect()
gc.collect()
except AssertionError as e:
self.assertEqual(str(e), 'ResourceWarning not triggered')
else:
self.fail('Unexpected ResourceWarning: {}'.format(cm.warning))
def test_handshake_timeout_handler_leak(self):
if self.implementation == 'asyncio':
# Okay this turns out to be an issue for asyncio.sslproto too
raise unittest.SkipTest()
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen(1)
addr = s.getsockname()
async def test(ctx):
try:
await asyncio.wait_for(
self.loop.create_connection(asyncio.Protocol, *addr,
ssl=ctx),
0.1)
except (ConnectionRefusedError, asyncio.TimeoutError):
pass
else:
self.fail('TimeoutError is not raised')
with s:
ctx = ssl.create_default_context()
self.loop.run_until_complete(test(ctx))
ctx = weakref.ref(ctx)
# SSLProtocol should be DECREF to 0
self.assertIsNone(ctx())
def test_shutdown_timeout_handler_leak(self):
loop = self.loop
def server(sock):
sslctx = self._create_server_ssl_context(self.ONLYCERT,
self.ONLYKEY)
sock = sslctx.wrap_socket(sock, server_side=True)
sock.recv(32)
sock.close()
class Protocol(asyncio.Protocol):
def __init__(self):
self.fut = asyncio.Future(loop=loop)
def connection_lost(self, exc):
self.fut.set_result(None)
async def client(addr, ctx):
tr, pr = await loop.create_connection(Protocol, *addr, ssl=ctx)
tr.close()
await pr.fut
with self.tcp_server(server) as srv:
ctx = self._create_client_ssl_context()
loop.run_until_complete(client(srv.addr, ctx))
ctx = weakref.ref(ctx)
if self.implementation == 'asyncio':
# asyncio has no shutdown timeout, but it ends up with a circular
# reference loop - not ideal (introduces gc glitches), but at least
# not leaking
gc.collect()
gc.collect()
gc.collect()
# SSLProtocol should be DECREF to 0
self.assertIsNone(ctx())
def test_shutdown_timeout_handler_not_set(self):
loop = self.loop
eof = asyncio.Event()
extra = None
def server(sock):
sslctx = self._create_server_ssl_context(self.ONLYCERT,
self.ONLYKEY)
sock = sslctx.wrap_socket(sock, server_side=True)
sock.send(b'hello')
assert sock.recv(1024) == b'world'
sock.send(b'extra bytes')
# sending EOF here
sock.shutdown(socket.SHUT_WR)
loop.call_soon_threadsafe(eof.set)
# make sure we have enough time to reproduce the issue
assert sock.recv(1024) == b''
sock.close()
class Protocol(asyncio.Protocol):
def __init__(self):
self.fut = asyncio.Future(loop=loop)
self.transport = None
def connection_made(self, transport):
self.transport = transport
def data_received(self, data):
if data == b'hello':
self.transport.write(b'world')
# pause reading would make incoming data stay in the sslobj
self.transport.pause_reading()
else:
nonlocal extra
extra = data
def connection_lost(self, exc):
if exc is None:
self.fut.set_result(None)
else:
self.fut.set_exception(exc)
async def client(addr):
ctx = self._create_client_ssl_context()
tr, pr = await loop.create_connection(Protocol, *addr, ssl=ctx)
await eof.wait()
tr.resume_reading()
await pr.fut
tr.close()
assert extra == b'extra bytes'
with self.tcp_server(server) as srv:
loop.run_until_complete(client(srv.addr))
class Test_UV_TCPSSL(_TestSSL, tb.UVTestCase):
pass
class Test_AIO_TCPSSL(_TestSSL, tb.AIOTestCase):
pass
|
opencvhelper.py | #!/usr/bin/env python
"""
Copyright 2018, Zixin Luo, HKUST.
OpenCV helper.
"""
from __future__ import print_function
from threading import Thread
from queue import Queue
import numpy as np
import cv2
class SiftWrapper(object):
""""OpenCV SIFT wrapper."""
def __init__(self, nfeatures=0, n_octave_layers=3,
peak_thld=0.0067, edge_thld=10, sigma=1.6,
n_sample=8192, patch_size=32):
self.sift = None
self.nfeatures = nfeatures
self.n_octave_layers = n_octave_layers
self.peak_thld = peak_thld
self.edge_thld = edge_thld
self.sigma = sigma
self.n_sample = n_sample
self.down_octave = True
self.sift_init_sigma = 0.5
self.sift_descr_scl_fctr = 3.
self.sift_descr_width = 4
self.first_octave = None
self.max_octave = None
self.pyr = None
self.patch_size = patch_size
self.output_gird = None
def create(self):
"""Create OpenCV SIFT detector."""
self.sift = cv2.xfeatures2d.SIFT_create(
self.nfeatures, self.n_octave_layers, self.peak_thld, self.edge_thld, self.sigma)
def detect(self, gray_img):
"""Detect keypoints in the gray-scale image.
Args:
gray_img: The input gray-scale image.
Returns:
npy_kpts: (n_kpts, 6) Keypoints represented as NumPy array.
cv_kpts: A list of keypoints represented as cv2.KeyPoint.
"""
cv_kpts = self.sift.detect(gray_img, None)
all_octaves = [np.int8(i.octave & 0xFF) for i in cv_kpts]
self.first_octave = int(np.min(all_octaves))
self.max_octave = int(np.max(all_octaves))
npy_kpts, cv_kpts = sample_by_octave(cv_kpts, self.n_sample, self.down_octave)
return npy_kpts, cv_kpts
def compute(self, img, cv_kpts):
"""Compute SIFT descriptions on given keypoints.
Args:
img: The input image, can be either color or gray-scale.
cv_kpts: A list of cv2.KeyPoint.
Returns:
sift_desc: (n_kpts, 128) SIFT descriptions.
"""
_, sift_desc = self.sift.compute(img, cv_kpts)
return sift_desc
def build_pyramid(self, gray_img):
"""Build pyramid. It would be more efficient to use the pyramid
constructed in the detection step.
Args:
gray_img: Input gray-scale image.
Returns:
pyr: A list of gaussian blurred images (gaussian scale space).
"""
gray_img = gray_img.astype(np.float32)
n_octaves = self.max_octave - self.first_octave + 1
# create initial image.
if self.first_octave < 0:
sig_diff = np.sqrt(np.maximum(
np.square(self.sigma) - np.square(self.sift_init_sigma) * 4, 0.01))
base = cv2.resize(gray_img, (gray_img.shape[1] * 2, gray_img.shape[0] * 2),
interpolation=cv2.INTER_LINEAR)
base = cv2.GaussianBlur(base, None, sig_diff)
else:
sig_diff = np.sqrt(np.maximum(np.square(self.sigma) -
np.square(self.sift_init_sigma), 0.01))
base = cv2.GaussianBlur(gray_img, None, sig_diff)
# compute gaussian kernels.
sig = np.zeros((self.n_octave_layers + 3,))
self.pyr = [None] * (n_octaves * (self.n_octave_layers + 3))
sig[0] = self.sigma
k = np.power(2, 1. / self.n_octave_layers)
for i in range(1, self.n_octave_layers + 3):
sig_prev = np.power(k, i - 1) * self.sigma
sig_total = sig_prev * k
sig[i] = np.sqrt(sig_total * sig_total - sig_prev * sig_prev)
# construct gaussian scale space.
for o in range(0, n_octaves):
for i in range(0, self.n_octave_layers + 3):
if o == 0 and i == 0:
dst = base
elif i == 0:
src = self.pyr[(o - 1) * (self.n_octave_layers + 3) + self.n_octave_layers]
dst = cv2.resize(
src, (int(src.shape[1] / 2), int(src.shape[0] / 2)), interpolation=cv2.INTER_NEAREST)
else:
src = self.pyr[o * (self.n_octave_layers + 3) + i - 1]
dst = cv2.GaussianBlur(src, None, sig[i])
self.pyr[o * (self.n_octave_layers + 3) + i] = dst
def unpack_octave(self, kpt):
"""Get scale coefficients of a keypoints.
Args:
kpt: A keypoint object represented as cv2.KeyPoint.
Returns:
octave: The octave index.
layer: The level index.
scale: The sampling step.
"""
octave = kpt.octave & 255
layer = (kpt.octave >> 8) & 255
octave = octave if octave < 128 else (-128 | octave)
scale = 1. / (1 << octave) if octave >= 0 else float(1 << -octave)
return octave, layer, scale
def get_interest_region(self, kpt_queue, all_patches, standardize=True):
"""Get the interest region around a keypoint.
Args:
kpt_queue: A queue to produce keypoint.
all_patches: A list of cropped patches.
standardize: (True by default) Whether to standardize patches as network inputs.
Returns:
Nothing.
"""
while True:
idx, cv_kpt = kpt_queue.get()
# preprocess
octave, layer, scale = self.unpack_octave(cv_kpt)
size = cv_kpt.size * scale * 0.5
ptf = (cv_kpt.pt[0] * scale, cv_kpt.pt[1] * scale)
scale_img = self.pyr[(int(octave) - self.first_octave) *
(self.n_octave_layers + 3) + int(layer)]
ori = (360. - cv_kpt.angle) * (np.pi / 180.)
radius = np.round(self.sift_descr_scl_fctr * size * np.sqrt(2)
* (self.sift_descr_width + 1) * 0.5)
radius = np.minimum(radius, np.sqrt(np.sum(np.square(scale_img.shape))))
# construct affine transformation matrix.
affine_mat = np.zeros((3, 2), dtype=np.float32)
m_cos = np.cos(ori) * radius
m_sin = np.sin(ori) * radius
affine_mat[0, 0] = m_cos
affine_mat[1, 0] = m_sin
affine_mat[2, 0] = ptf[0]
affine_mat[0, 1] = -m_sin
affine_mat[1, 1] = m_cos
affine_mat[2, 1] = ptf[1]
# get input grid.
input_grid = np.matmul(self.output_grid, affine_mat)
# sample image pixels.
patch = cv2.remap(scale_img.astype(np.float32), np.reshape(input_grid, (-1, 1, 2)),
None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
patch = np.reshape(patch, (self.patch_size, self.patch_size))
# standardize patches.
if standardize:
patch = (patch - np.mean(patch)) / (np.std(patch) + 1e-8)
all_patches[idx] = patch
kpt_queue.task_done()
def get_patches(self, cv_kpts):
"""Get all patches around given keypoints.
Args:
cv_kpts: A list of keypoints represented as cv2.KeyPoint.
Return:
all_patches: (n_kpts, 32, 32) Cropped patches.
"""
# generate sampling grids.
n_pixel = np.square(self.patch_size)
self.output_grid = np.zeros((n_pixel, 3), dtype=np.float32)
for i in range(n_pixel):
self.output_grid[i, 0] = (i % self.patch_size) * 1. / self.patch_size * 2 - 1
self.output_grid[i, 1] = (i / self.patch_size) * 1. / self.patch_size * 2 - 1
self.output_grid[i, 2] = 1
all_patches = [None] * len(cv_kpts)
# parallel patch cropping.
kpt_queue = Queue()
for i in range(4):
worker_thread = Thread(target=self.get_interest_region, args=(kpt_queue, all_patches))
worker_thread.daemon = True
worker_thread.start()
for idx, val in enumerate(cv_kpts):
kpt_queue.put((idx, val))
kpt_queue.join()
all_patches = np.stack(all_patches)
return all_patches
class MatcherWrapper(object):
"""OpenCV matcher wrapper."""
def __init__(self):
self.matcher = cv2.BFMatcher(cv2.NORM_L2)
def get_matches(self, feat1, feat2, cv_kpts1, cv_kpts2, ratio=None, cross_check=True, info=''):
"""Compute putative and inlier matches.
Args:
feat: (n_kpts, 128) Local features.
cv_kpts: A list of keypoints represented as cv2.KeyPoint.
ratio: The threshold to apply ratio test.
cross_check: (True by default) Whether to apply cross check.
info: Info to print out.
Returns:
good_matches: Putative matches.
mask: The mask to distinguish inliers/outliers on putative matches.
"""
init_matches1 = self.matcher.knnMatch(feat1, feat2, k=2)
init_matches2 = self.matcher.knnMatch(feat2, feat1, k=2)
good_matches = []
for i in range(len(init_matches1)):
# cross check
if cross_check and init_matches2[init_matches1[i][0].trainIdx][0].trainIdx == i:
# ratio test
if ratio is not None and init_matches1[i][0].distance <= ratio * init_matches1[i][1].distance:
good_matches.append(init_matches1[i][0])
elif ratio is None:
good_matches.append(init_matches1[i][0])
elif not cross_check:
good_matches.append(init_matches1[i][0])
good_kpts1 = np.array([cv_kpts1[m.queryIdx].pt for m in good_matches])
good_kpts2 = np.array([cv_kpts2[m.trainIdx].pt for m in good_matches])
_, mask = cv2.findFundamentalMat(points1=good_kpts1,points2= good_kpts2,method= cv2.RANSAC,param1=4.0,param2=0.999)
n_inlier = np.count_nonzero(mask)
print(info, 'n_putative', len(good_matches), 'n_inlier', n_inlier)
return good_matches, mask
def draw_matches(self, img1, cv_kpts1, img2, cv_kpts2, good_matches, mask,
match_color=(0, 255, 0), pt_color=(0, 0, 255)):
"""Draw matches."""
display = cv2.drawMatches(img1, cv_kpts1, img2, cv_kpts2, good_matches,
None,
matchColor=match_color,
singlePointColor=pt_color,
matchesMask=mask.ravel().tolist(), flags=4)
return display
def sample_by_octave(cv_kpts, n_sample, down_octave=True):
"""Sample keypoints by octave.
Args:
cv_kpts: The list of keypoints representd as cv2.KeyPoint.
n_sample: The sampling number of keypoint. Leave to -1 if no sampling needed
down_octave: (True by default) Perform sampling downside of octave.
Returns:
npy_kpts: (n_kpts, 5) Keypoints in NumPy format, represenetd as
(x, y, size, orientation, octave).
cv_kpts: A list of sampled cv2.KeyPoint.
"""
n_kpts = len(cv_kpts)
npy_kpts = np.zeros((n_kpts, 5))
for idx, val in enumerate(cv_kpts):
npy_kpts[idx, 0] = val.pt[0]
npy_kpts[idx, 1] = val.pt[1]
npy_kpts[idx, 2] = val.size
npy_kpts[idx, 3] = val.angle * np.pi / 180.
npy_kpts[idx, 4] = np.int8(val.octave & 0xFF)
if down_octave:
sort_idx = (-npy_kpts[:, 2]).argsort()
else:
sort_idx = (npy_kpts[:, 2]).argsort()
npy_kpts = npy_kpts[sort_idx]
cv_kpts = [cv_kpts[i] for i in sort_idx]
if n_sample > -1 and n_kpts > n_sample:
# get the keypoint number in each octave.
_, unique_counts = np.unique(npy_kpts[:, 4], return_counts=True)
if down_octave:
unique_counts = list(reversed(unique_counts))
n_keep = 0
for i in unique_counts:
if n_keep < n_sample:
n_keep += i
else:
break
print('Sampled', n_keep, 'from', n_kpts)
npy_kpts = npy_kpts[:n_keep]
cv_kpts = cv_kpts[:n_keep]
return npy_kpts, cv_kpts
|
MK14ProgramedBy7Bot.py | # Program to program a Science of Cambridge MK14 with a 7Bot
# An MK14 is a very old micro-computer from what became Sinclair Research
# A 7Bot is a 7 degrees of freedom robot arm which orignated here:
# More information on this project is here: http://robdobson.com/2016/10/mk14-meets-7bot/
from __future__ import print_function
import serial
import numpy as np
import time
import threading
import sys
import tkinter
# Name of the program to "send" to the MK14 (entered physically by robot arm)
programToRun = "Duck Shoot"
# Setup of the 7Bot position and speed in relation to the MK14
normalSpeeds = [80, 80, 80, 150, 100, 100, 100]
homePositionAngles = [10, 150, 75, 83, 121, 89.64, 56]
readyPositionAngles = [7, 115, 65, 83, 121, 89.64, 56]
keyboardClearAngles = [7, 107, 90, 90, 118, 89.64, 56]
# Distance to move between hovering over a key and pressing it
# the three values are azimuth (0 means no change), shoulder position (+ve value) and
# elbow position (-ve value)
keyPunchDownAngleDeltas = [0, 5, -5]
# Enter a sequence here to override sending the MK14 a program and instead just press keys
testKeySequence = []
#testKeySequence = ["0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F","TRM","MEM","ABT","GO"]
# Positions of keys on the MK14 keypad
keyPositions = {
"0": [12, 103-2, 102+2, 90, 115.56, 89.64, 56],
"1": [7.92, 94+2, 103.52-2, 84.06, 116.1, 89.46, 56],
"2": [12.6, 95, 101.52, 83.52, 116.64, 89.46, 56],
"3": [18.2, 95, 102, 90, 114.3, 89.46, 56],
"4": [7.2, 89, 101-0.5, 81.0, 117.9, 89.46, 56],
"5": [11.7, 87+1.5, 102-1.5, 79.92, 118.8, 89.46, 56],
"6": [15.5, 86+1, 102-1, 79.92, 120.42, 89.46, 56],
"7": [9, 84, 101, 90, 114, 90, 56],
"8": [13, 81, 100, 90, 121.86, 89.46, 56],
"9": [16.8, 80+.5, 100-.5, 90, 122.22, 89.64, 57],
"A": [24, 77-1, 92+5.5, 82.98, 124.2, 89.64, 56],
"B": [27.72, 77-2.5, 95+1.5, 86.94, 127.44, 89.64, 56],
"C": [30+1.5, 75-4, 95+1, 87.48, 126.18, 89.46, 56],
"D": [25.56, 86-3.5, 98+2.5, 87.48, 122.04, 89.28, 56],
"E": [28+1.5, 84-4, 98+2.5, 87.48, 121.5, 89.64, 56],
"F": [33, 81.0-3.5, 98+1.5, 87.84, 121.14, 89.64, 56],
"ABT": [27, 88, 103.34, 90, 117.9, 89.64, 56],
"TRM": [30.5, 86-1, 103, 90, 117.9, 89.64, 56],
"MEM": [28.62, 94, 105-1, 90.0, 117.72, 89.46, 56],
"GO": [33, 92, 105.14, 90, 115.74, 89.46, 56],
}
# Programs to be sent to the MK14
programs = {
"Duck Shoot":
{
"execAddr": "0F12",
"hexLines": [
":180F1200C40D35C40031C401C8F4C410C8F1C400C8EEC40801C0E71EB2",
":180F2A00C8E49404C4619002C400C9808F01C0D89C0EC180E4FF980811",
":160F4200C8CEC0CAE480C8C64003FC0194D6B8BF98C8C40790CEDD"
]
},
"Moon Landing":
{
"execAddr": "0F52",
"hexLines": [
":180F14000850009980009998000258003EC8E3C40135C8DFC40B31C877",
":180F2C00DBC0D702D40F01C180CF01C4008F04C0C91C1C1C1C010603EA",
":180F440094EDC400CF01C0BB35C0B93190CEC40F35C41431C40F36C4EA",
":180F5C002032C40CCAE4C10BCDFFBAE49CF8C40C37C4FF33C401CAE473",
":180F7400C5069404C5049032C402CAE302C5FFE902C900BAE39CF6C19A",
":180F8C00029402C499EDFFC900BAE494E3C50CAAE303C5FFF9FEC900A9",
":180FA40008BAE394F50694029004C400C9FFC1FF03EC94C9FDC499ECF9",
":180FBC0000C9FCC1003EC1F9940AC49903F9FA03EC009002C1FA3EC173",
":180FD400F73EC7FFC5F63EC40ACAE4C7FF940AE4DF9A31BAE49CF492E3",
":0A0FEC0049C109980333C90992496D"
]
}
}
# Servo info
NUM_SERVOS = 7
isAllConverge = False
measuredForces = [0] * NUM_SERVOS
measuredRotationDegs = [0] * NUM_SERVOS
# Abort flag
globalAbortFlag = False
# Read data from 7bot - this is done in a separate thread and global variables are altered as the means
# of communication between the thread and the main program - ugly !
def botPortRead(ser):
global isAllConverge, measuredRotationDegs, measuredForces, NUM_SERVOS
global serialIsClosing
rxBuf = [0] * (NUM_SERVOS * 2 + 1)
beginFlag = False
instruction = 0
cnt = 0
while True:
# Handle closing down
if serialIsClosing or not ser.isOpen():
break
# Get a char if there is one
val = ser.read(1)
if len(val) == 0:
continue
# print("Rx", "%02X " % ord(val))
if not beginFlag:
beginFlag = (ord(val) == 0xFE)
if not beginFlag:
if (ord(val) < 0x20 or ord(val) > 0x7e) and ord(val) != 0x0d and ord(val) != 0x0a:
print("<%02X>" % ord(val))
sys.stdout.flush()
else:
print(val.decode("utf-8"), end="")
sys.stdout.flush()
instruction = 0
cnt = 0
elif instruction == 0:
instruction = ord(val) - 240
elif instruction == 6:
forceStatus = ord(val)
print("<== ForceStatus", forceStatus)
beginFlag = False
instruction = 0
cnt = 0
elif instruction == 9:
rxBuf[cnt] = ord(val)
cnt += 1
if cnt >= NUM_SERVOS * 2 + 1:
beginFlag = False
instruction = 0
cnt = 0
for i in range(NUM_SERVOS):
posCode = rxBuf[i*2] * 128 + rxBuf[i*2+1]
measuredForces[i] = posCode % 16384 / 1024
if posCode / 16384 > 0:
measuredForces[i] = -measuredForces[i]
# Convert 0-1000 code to 0-180 deg
measuredRotationDegs[i] = (posCode % 1024) * 9 / 50
isAllConverge = (rxBuf[(NUM_SERVOS-1)*2+2] == 1)
# print("Forces:", measuredForces, ",Angles:", measuredRotationDegs, isAllConverge)
else:
beginFlag = False
# Utility functions
def appendTwoByteVal(buf, val):
buf.append((int(val) // 128) & 0x7F)
buf.append(int(val) & 0x7F)
def appendVecToSend(buf, vec):
for el in vec:
val = int(abs(el)) + (0 if el >= 0 else 1024)
appendTwoByteVal(buf, val)
# Called while waiting for the robot arm to reach its destination
# Also allows the TKINTER UI to have some time to operate
def waitAndFlush(timeInSecs):
for tii in range(int(timeInSecs * 1000)):
sys.stdout.flush()
masterTk.update_idletasks()
masterTk.update()
time.sleep(0.001)
# Limit value between two thresholds
def constrain(val, valMin, valMax):
if val < valMin:
return valMin
if val > valMax:
return valMax
return val
# Set Servo angles
def setServoAngles(servoAngles):
global isAllConverge
isAllConverge = False
# 1- Process Data
sendData = bytearray([0xFE, 0xF9])
for servoAngle in servoAngles:
val = servoAngle*50//9
appendTwoByteVal(sendData, val)
# 2- Send Data
botPort.write(sendData)
# set motor force status: 0-forceless, 1-normal servo, 2-protection
def setForceStatus(status):
data = bytearray([0xFE, 0xF5, status])
botPort.write(data)
# get servo angles
def getForceStatus():
data = bytearray([0xFE, 0xF6, 0x00])
botPort.write(data)
# set motion fluency & speeds (0~250 ---> 0~25)
def setSpeed(fluentEnables, speeds):
# 1- Process Data
sendData = bytearray([0xFE, 0xF7])
servoIdx = 0
for speed in speeds:
sendData.append(constrain(speed, 0, 250)//10)
if fluentEnables[servoIdx]:
sendData[len(sendData)-1] += 64
servoIdx += 1
# 2- Send Data
# Byte 1 (Beginning Flag) 0xFE
# Byte 2 (Instruction Type) 0xF7
# Byte 3 (motor 0 data)
# Byte 4 (motor 1 data)
# Byte 5 (motor 2 data)
# Byte 6 (motor 3 data)
# Byte 7 (motor 4 data)
# Byte 8 (motor 5 data)
# Byte 9 (motor 6 data)
# For each of bytes 3..9
# bit 6: 0-disable fluency, 1-enable fluency;
# bit 5~0: speed value(range:0~25, 10 means 100 degrees per second)
botPort.write(sendData)
# IK6(6 angles)
# j6:mm(-500~500), vec:(-1.0~1.0)--->(-500~500), theta:Degrees
def setIK6(j6, vec56, vec67, theta6):
global isAllConverge
isAllConverge = False
# 1- Process Data
j6_c = np.array([constrain(j6[0], -500, 500), constrain(j6[1], -500, 500), constrain(j6[2], -500, 500)])
vec56_c = np.copy(vec56)
vec56_c /= np.linalg.norm(vec56_c)
vec56_c *= 500
vec67_c = np.copy(vec67)
vec67_c /= np.linalg.norm(vec67_c)
vec67_c *= 500
sendData = bytearray([0xFE, 0xFA])
appendVecToSend(sendData, j6_c)
appendVecToSend(sendData, vec56_c)
appendVecToSend(sendData, vec67_c)
appendTwoByteVal(sendData, int((theta6*50/9)))
# 2- Send Data
# for dat in sendData:
# print("%02X " % dat, end = "")
botPort.write(sendData)
# Move to a specific azimuth - the 7Bot standard firmware doesn't seem to do this in one go even though it
# can tell that the arm has not reached the desired position - not sure why this is but this is a fix which
# iterates towards the correct point by requesting positions greater or lesser than actually required until
# the arm gets near enough to the desired point
def moveToAzimuth(anglesDown):
azimAngles = keyboardClearAngles[:]
azimAngles[0] = anglesDown[0]
implAngles = azimAngles[:]
for i in range(5):
setServoAngles(implAngles)
while not isAllConverge and not globalAbortFlag:
waitAndFlush(0.1)
waitAndFlush(0.1)
if globalAbortFlag:
break
angleErrs = calcAngleError(azimAngles)
print("Azimuth attempt", i)
dispAngleError(azimAngles)
if abs(angleErrs[0]) < 1:
break
implAngles[0] += 1 if (angleErrs[0] > 0) else -1
return implAngles[0]
# Send the robot arm to predefined locations
def goToHome():
setServoAngles(homePositionAngles)
while not isAllConverge and not globalAbortFlag:
waitAndFlush(0.1)
waitAndFlush(0.1)
def goToReady():
setServoAngles(readyPositionAngles)
while not isAllConverge and not globalAbortFlag:
waitAndFlush(0.1)
waitAndFlush(0.1)
def punchDownOnKey(armAngles, tryAdjustments):
# Check for abort
if globalAbortFlag:
return
print(" hovering ", formatAngles(armAngles), "TryAdjust", tryAdjustments)
# Initially go to a hover position above the key
acceptedErrorDegs = [1, 1, 1, 1, 1]
anglesToTry = armAngles[:]
for i in range(len(keyPunchDownAngleDeltas)):
anglesToTry[i] += keyPunchDownAngleDeltas[i]
# Try several adjustments to get where we want to be
setServoAngles(anglesToTry)
while not isAllConverge and not globalAbortFlag:
waitAndFlush(0.1)
waitAndFlush(0.1)
if globalAbortFlag:
return
print(" punchDown ")
setServoAngles(armAngles)
while not isAllConverge and not globalAbortFlag:
waitAndFlush(0.1)
waitAndFlush(0.1)
if globalAbortFlag:
return
dispAngleError(armAngles)
print(" pullUp ")
setServoAngles(anglesToTry)
while not isAllConverge and not globalAbortFlag:
waitAndFlush(0.1)
waitAndFlush(0.1)
if globalAbortFlag:
return
# Press a single key
def pressKey(key):
print("pressKey", key, "...................")
# Get the key position
anglesDown = keyPositions[key]
okAzimuth = moveToAzimuth(anglesDown)
keyAngles = anglesDown[:]
keyAngles[0] = okAzimuth
punchDownOnKey(keyAngles, False)
# Send a sequence of keys (in a list)
def sendKeySequence(keys):
for key in keys:
if globalAbortFlag:
return
# Press the key
pressKey(key)
# Set address on MK14
def setAddress(addr, forExec=False):
print("Setting address to", addr)
if forExec:
keys = ["ABT", "GO"]
else:
keys = ["ABT", "MEM"]
for ch in addr:
keys.append(str(ch))
sendKeySequence(keys)
# Send hex codes to the MK14
def sendHexCodes(hexcodes):
digitPairs = [hexcodes[i:i + 2] for i in range(0, len(hexcodes), 2)]
for pair in digitPairs:
if globalAbortFlag:
return
keys = []
keys.append("TRM")
for ch in pair:
keys.append(str(ch))
keys.append("TRM")
keys.append("MEM")
# print(pair)
sendKeySequence(keys)
# Convert a 2 digit hex number to decimal
def hexVal(inStr, pos, len):
return int(inStr[pos:pos + len], 16)
# Sand an entire program to the MK14
def sendProgram(programName):
if programName not in programs:
return
programDef = programs[programName]
for hexLine in programDef["hexLines"]:
leng = hexVal(hexLine, 1, 2)
if leng > 0:
setAddress(hexLine[3:7])
sendHexCodes(hexLine[9:(9+leng*2)])
setAddress(programDef["execAddr"], True)
# Debugging code
def formatAngles(angs):
s = ""
for i in range(len(angs)):
s += "%6.1f" % (round(angs[i],1))
return s
def dispAngleError(wantedAngles):
angleErrs = calcAngleError(wantedAngles)
print(" Wanted ", formatAngles(wantedAngles))
print(" Actual ", formatAngles(measuredRotationDegs))
print(" Errs ", formatAngles(angleErrs))
def calcAngleError(wantedAngles):
curAngles = measuredRotationDegs[:]
angleErrs = []
acceptedErrorDegs = [1, 1, 1, 1, 1]
for angleIdx in range(len(acceptedErrorDegs)):
angleErr = wantedAngles[angleIdx] - curAngles[angleIdx]
angleErrs.append(round(angleErr,1))
return angleErrs
# Function to handle UI abort key
def abortFn():
global globalAbortFlag
print("Aborted")
globalAbortFlag = True
# Main program
print("Programming MK14 with 7Bot ...")
masterTk = tkinter.Tk()
masterTk.title("Programming MK14 with 7Bot")
masterTk.geometry("400x100")
cancelButton = tkinter.Button(masterTk, text="ABORT", command=abortFn)
cancelButton.pack(expand=tkinter.YES, fill=tkinter.BOTH)
# Serial connection to 7Bot
serialIsClosing = False
botPort = serial.Serial(port="COM12", baudrate=115200, timeout=1)
#botPort = serial.Serial(port="/dev/ttyAMA0", baudrate=115200, timeout=1)
# Thread for reading from port
thread = threading.Thread(target=botPortRead, args=(botPort,))
thread.start()
waitAndFlush(1)
# Set angles and wait for motion converge
print("Setting normal servo mode")
setForceStatus(1)
goToReady()
# Reboot 7Bot if previous status is not normal servo
# To make motion much more stable, highly recommend you use fluency all the time
print("Setting speed and fluency")
fluentEnables = [True, True, True, True, True, True, True]
setSpeed(fluentEnables, normalSpeeds)
# Set angles and wait for motion converge
print("Programming ...")
if len(testKeySequence) > 0:
sendKeySequence(testKeySequence)
else:
sendProgram(programToRun)
print("Pulling clear of the keyboard")
goToReady()
print("Going home")
goToHome()
setForceStatus(0)
serialIsClosing = True
waitAndFlush(1)
botPort.close()
|
dijkstra_cpu.py | from time import time
import numpy as np
from queue import PriorityQueue
from multiprocessing import Process, cpu_count, Manager
from multiprocessing.sharedctypes import RawArray
from classes.result import Result
from utils.debugger import Logger
from utils.settings import INF
from method.sssp.dijkstra_cpu import dijkstra as dij_sssp
logger = Logger(__name__)
def dijkstra(para):
"""
function:
use dijkstra algorithm in CPU to solve the APSP.
parameters:
class, Parameter object. (see the 'sparry/classes/parameter.py/Parameter').
return:
class, Result object. (see the 'sparry/classes/result.py/Result').
"""
logger.debug("turning to func dijkstra-cpu-apsp")
if para.useMultiPro == True:
return dijkstra_multi(para)
else:
return dijkstra_single(para)
def dijkstra_single(para):
"""
function:
use dijkstra algorithm in A SINGLE CPU core to solve the APSP.
parameters:
class, Parameter object. (see the 'sparry/classes/parameter.py/Parameter').
return:
class, Result object. (see the 'sparry/classes/result.py/Result').
"""
logger.debug("turning to func dijkstra-cpu-apsp single-process")
t1 = time()
CSR, n, pathRecordBool = para.graph.graph, para.graph.n, para.pathRecordBool
dist = []
for s in range(n):
para.srclist = s
resulti = dij_sssp(para)
dist.append(resulti.dist)
para.srclist = None
dist = np.array(dist)
timeCost = time() - t1
# result
result = Result(dist = dist, timeCost = timeCost, graph = para.graph)
if pathRecordBool:
result.calcPath()
return result
def dijkstra_multi_sssp(V, E, W, n, sources, distQ, id0):
"""
function:
use dijkstra algorithm to solve a sssp as a process.
parameters:
V, array, the CSR[0].
E, array, the CSR[1].
W, array, the CSR[2]. (see the document)
s, int, the source vertex.
n, int, the number of vertices.
return:
dist, array, the distance array.
"""
# priority queue
q = PriorityQueue()
# job scheduling
while sources.empty() == False:
# get a resource vertex
s = sources.get()
dist = np.full((n,), INF).astype(np.int32)
dist[s] = 0
# vis list
vis = np.full((n, ), 0).astype(np.int32)
# run!
q.put((0, s)) # put the source vertex s
while q.empty() == False:
p = q.get()[1]
if vis[p] == 1: # if the vertex is done, the continue.
continue
vis[p] = 1
for j in range(V[p], V[p + 1]):
if dist[E[j]] > dist[p] + W[j]:
dist[E[j]] = dist[p] + W[j]
q.put((dist[E[j]], E[j]))
distQ.put((s, dist))
# print(f"id = {id0} is finished....., source empty? {sources.empty()}")
def dijkstra_multi(para):
"""
function:
use dijkstra algorithm in ALL CPU cores to solve the APSP PARALLEL.
parameters:
class, Parameter object. (see the 'sparry/classes/parameter.py/Parameter').
return:
class, Result object. (see the 'sparry/classes/result.py/Result').
"""
logger.debug("turning to func dijkstra-cpu-apsp multi-process")
t1 = time()
# q = Queue()
# maybe as this I can exit?
# but if not there may be a bug to "running forever of the son thread and can't exit" without using 'manager'
manager = Manager()
q = manager.Queue()
CSR, n, pathRecordBool = para.graph.graph, para.graph.n, para.pathRecordBool
shared_V = RawArray('i', CSR[0])
shared_E = RawArray('i', CSR[1])
shared_W = RawArray('i', CSR[2])
del CSR
# the queue of sources
sources = manager.Queue()
for i in range(n):
sources.put(i)
# create as many as the number of the cores threads, and schecule them through queue.
cores = cpu_count()
myProcesses = [Process(target = dijkstra_multi_sssp, args = (shared_V, shared_E, shared_W, n, sources, q, _)) for _ in range(cores)]
for myProcess in myProcesses:
myProcess.start()
for myProcess in myProcesses:
if myProcess.is_alive():
myProcess.join()
dist = [None for i in range(n)]
while q.empty() == False:
temp = q.get()
dist[temp[0]] = temp[1]
dist = np.array(dist)
timeCost = time() - t1
# result
result = Result(dist = dist, timeCost = timeCost, graph = para.graph)
if pathRecordBool:
result.calcPath()
return result
|
McScrpGui.py | #بسم الله الرحمان الرحيم
# -*- coding: utf-8 -*-
# 110 DESKTOP
# 159 SCRAPING
# 8 DJANGO
from threading import Thread
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QThread
from PyQt5.QtWidgets import QApplication, QWidget, QTableWidgetItem, QFileDialog, QStyleFactory, QDialog, QMessageBox, QPushButton
import sys
import requests
import McScrp
from openpyxl import Workbook
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(573, 430)
icn = QtGui.QIcon('McScrp.ico')
Form.setWindowIcon(icn)
Form.setStyleSheet("""QPushButton {
background-color: #4CAF50; /* Green */
border: none;
color: white;
text-align: center;
text-decoration: none;
display: inline-block;
font-size: 16px;
-webkit-transition-duration: 0.4s; /* Safari */
transition-duration: 0.4s;
cursor: pointer;
}
QPushButton {
background-color: white;
color: black;
border: 2px solid #555555;
}
QPushButton:pressed {
background-color: #555555;
color: white;
}
""")
self.scrp = QtWidgets.QPushButton(Form)
self.scrp.setGeometry(QtCore.QRect(640, 20, 110, 31))
self.choice = QtWidgets.QComboBox(Form)
self.choice.setGeometry(QtCore.QRect(530, 20, 101, 31))
self.choice.addItems(["Text", "Tags"])
self.save = QtWidgets.QPushButton(Form)
self.save.setGeometry(QtCore.QRect(1200, 20, 121, 31))
self.pbar = QtWidgets.QProgressBar(self)
self.pbar.setGeometry(QtCore.QRect(790, 20, 350, 31))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(50)
self.scrp.setFont(font)
self.choice.setFont(font)
self.scrp.setObjectName("pushButton")
font = QtGui.QFont()
font.setPointSize(14)
self.lineEdit_2 = QtWidgets.QLineEdit(Form)
self.lineEdit_2.setGeometry(QtCore.QRect(20, 20, 500, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.lineEdit_2.setFont(font)
self.lineEdit_2.setFocusPolicy(QtCore.Qt.ClickFocus)
self.lineEdit_2.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_2.setObjectName("lineEdit_2")
self.tbl = QtWidgets.QTableWidget(Form)
self.tbl.setGeometry(QtCore.QRect(5, 71, 1355, 551))
self.abt = QtWidgets.QPushButton(Form)
self.abt.setGeometry(QtCore.QRect(640, 650, 91, 31))
self.abt.setText('About Me')
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "GuiMcScrp"))
self.scrp.setText(_translate("Form", "Scraping data"))
self.save.setText(_translate("Form", "Save data"))
self.lineEdit_2.setPlaceholderText(
_translate("Form", "URL Or Path Html"))
QApplication.setStyle(QStyleFactory.create('Fusion'))
class MainWindow(QWidget, Ui_Form):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
QWidget.__init__(self)
self.setupUi(self)
self.scrp.clicked.connect(self.thr)
self.save.clicked.connect(self.Save)
self.abt.clicked.connect(self.abtme)
def progress(self, datavalue):
scr = McScrp.mcscrp()
self.lis = scr.get_tags(datavalue)
cc = self.lis.count("")
if cc > 0:
for i in range(cc):
self.lis.remove("")
self.tbl.setColumnCount(len(self.lis))
self.tbl.setHorizontalHeaderLabels(self.lis)
row = 1
self.tbl.setRowCount(row)
r = 1
q = 0
p = 0
ch = {'Text': 'txt', 'Tags': 'tag'}
for i in self.lis:
resultat = scr.scrp(datavalue, i)[ch[self.choice.currentText()]]
if len(resultat) > row:
row = len(resultat)
self.tbl.setRowCount(row)
for u in resultat:
if len(u) != 0:
self.tbl.setItem(r, q, QTableWidgetItem(str(u)))
r += 1
p += 1
self.pbar.setValue(p)
q += 1
r = 0
def abtme(self):
QMessageBox(self).about(self, "About Me", '<h2 style="font-size: 20px; color: green;" >Contact me:</h2> <a style="font-size:20px;" href="https://www.facebook.com/ZakatKnowledge/">Page Facebook</a> <a style="font-size: 20px;" href="https://www.facebook.com/M97Chahboun">Account Facebook</a> <a style="font-size: 20px; color: black;" href="https://www.github.com/ZakatKnowledge">Github</a> <a style="font-size: 20px; color: deeppink;" href="https://www.instagram.com/zakat_of_knowledge/">Instagrame</a> <a style="font-size: 20px; color: red;" href="https://youtube.com/channel/UCCiBkOPPs1iTCOyEeL7zWQg">Channel Youtube</a> <h2 style="font-size: 20px; color: green;" >Developped By CHAHBOUN Mohammed</h2> ')
def thr(self):
Th = Thread(target=self.scraping)
Th.start()
Th.join()
def scraping(self):
self.tbl.clear()
urlPath = self.lineEdit_2.text()
if 'http' in urlPath:
try:
data = requests.get(urlPath).text
except:
self.error()
elif 'html' in urlPath:
try:
data = open(urlPath, 'r').read()
except FileNotFoundError:
self.error()
else:
self.error()
try:
self.progress(data)
except:
pass
def error(self):
self.lineEdit_2.setText('')
self.lineEdit_2.setPlaceholderText(
"bad File Html or Url without http/s")
def Save(self):
file = QFileDialog.getSaveFileName(self, 'Enregistrer Fichier', 'data',
("Excel file (*.xlsx)"))
wb = Workbook()
ws = wb.active
for u in range(len(self.lis)):
ws.cell(column=u+1, row=1, value=self.lis[u])
for w in range(self.tbl.rowCount()):
for i in range(len(self.lis)):
try:
ws.cell(column=i+1, row=w+2,
value=self.tbl.item(w, i).text())
except:
pass
wb.save(file[0])
def main():
app = QApplication(sys.argv)
window = MainWindow()
window.showMaximized()
app.exec_()
if __name__ == '__main__':
main()
|
test_slow_close.py | # Copyright 2013, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# pylint: disable=protected-access,missing-docstring,too-few-public-methods,invalid-name,too-many-public-methods
import socket
import threading
import unittest
import httpplus
_RESPONSE = """HTTP/1.1 200 OK
Server: wat
Content-Length: 2
hi""".replace("\n", "\r\n")
class SlowCloseServer(object):
def __init__(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
s.listen(1)
unused_addr, self.port = s.getsockname()
self._socket = s
def loop(self):
reqdata = ''
while '/quit' not in reqdata:
con, unused_addr = self._socket.accept()
reqdata = ''
while '\r\n\r\n' not in reqdata:
reqdata += con.recv(1)
con.send(_RESPONSE)
if '/quit' in reqdata:
break
reqdata2 = ''
while '\r\n\r\n' not in reqdata2:
reqdata2 += con.recv(1)
con.close()
class SlowCloseTest(unittest.TestCase):
def setUp(self):
self.server = SlowCloseServer()
t = threading.Thread(target=self.server.loop)
t.start()
def test_request_response_request_close(self):
# The timeout in this client is 15 seconds. In practice, this
# test should complete in well under a tenth of a second, so
# timeouts should indicate a bug in the test code.
con = httpplus.HTTPConnection(
'localhost:%d' % self.server.port, timeout=15)
# Verify we can send a bunch of responses and they all work
# with the same client.
con.request('GET', '/')
resp = con.getresponse()
self.assertEqual(resp.read(), "hi")
con.request('GET', '/ohai')
resp = con.getresponse()
self.assertEqual(resp.read(), "hi")
con.request('GET', '/wat')
resp = con.getresponse()
self.assertEqual(resp.read(), "hi")
con.request('GET', '/quit')
resp = con.getresponse()
self.assertEqual(resp.read(), "hi")
|
ClientWindow.py | from PyQt5.QtWidgets import QApplication, QTableWidgetItem,QLineEdit, QTableWidget, QTextEdit,QWidget,QVBoxLayout,QHBoxLayout, QLabel,QPushButton
from PyQt5.QtGui import QFont, QIcon, QTextCursor,QCloseEvent
from PyQt5.QtCore import QRect, QSize, Qt
import threading
import os
import socket as sk
import time
import rsa
import pickle
from cryptography.fernet import Fernet
import hashlib
from tools.utls import get_ip_address, Logger,AuthenticationError,MsgParserThreadWorker
from config.config import client_port, server_port, icon_client_path, MAXSIZE,encoding
class ClientWindow(QWidget):
'''
design the main frame window
'''
def __init__(self,window_name, logger_path = None, log_flag = False, parent=None):
super().__init__(parent)
# the window title
self.setWindowTitle(window_name)
# basic componet
self.ip_address = QLineEdit(self)
self.port = QLineEdit(self)
self.name = QLineEdit(self)
self.log_display = QTextEdit(self)
self.log_display.setReadOnly(True)
self.user_list_table = QTableWidget(self)
self.message_content = QTextEdit(self)
self.state_info = QLabel(self)
# get the resulotion of the screen
self.screen_resolution = QApplication.desktop().screenGeometry()
self.width = self.screen_resolution.width()
self.height = self.screen_resolution.height()
# get the size of the window
self.window_width = self.width*0.5
self.window_height = self.height*0.5
# get the start position of the window
self.window_start_x = self.width/2 - self.window_width/2
self.window_start_y = self.height/2 - self.window_height/2
# set the size of the window
self.window_rect = QRect(self.window_start_x,self.window_start_y,self.window_width,self.window_height)
self.window_size = QSize(self.window_width,self.window_height)
# set the icon path
self.icon_path = icon_client_path
# set the threading event
# 监听线程控制事件
self.thread_event = threading.Event()
# 监听服务器通知报文线程控制事件
self.listen_serverinfo_thread_event = threading.Event()
# init the ui of main frame window
self.init_ui()
# set the font
self.font = QFont()
self.font.setPointSize(12)
self.font.setFamily("Consolas")
# for test
self.ip_address.setText(get_ip_address(sk.gethostname()))
self.port.setText(str(server_port))
# for host config
self.host_ip = get_ip_address(sk.gethostname())
self.host_port = client_port
self.__encoding = encoding
self.user_dict = {}
self.task_list = []
self.is_logined = threading.Event()
self.server_addr = None
# set the logger
self.log_flag = log_flag
if self.log_flag:
assert(logger_path != None)
if not os.path.exists(logger_path):
os.makedirs(logger_path)
self.logger = Logger(logger_path+"/client.log")
# create the client device
self.device = self._create_socket(5)
self.log_display_append("[+]Client: Successfully create socket. Host ip: %s, Host port: %s"%(self.host_ip,self.host_port))
# set state
self.set_state_info("bind the socket and listen the port %d"%self.host_port)
# # rsa decryption
self.rsa_key = rsa.newkeys(2048)
self.public_key = self.rsa_key[0] # public key
self.private_key = self.rsa_key[1] # private key
self.sym_key = None
# setlf wake time
self.wake_time = time.time()
# set the threading worker
self.thread_event.set()
self.listen_serverinfo_thread_event.set()
self.recv_thread = threading.Thread(target=self.listen_thread_for_client, args=(self.thread_event,))
self.recv_thread.start()
def init_ui(self):
# set the size of the window
self.setGeometry(self.window_rect)
self.setFixedSize(self.window_size)
# set icon of this window
self.setWindowIcon(QIcon(self.icon_path))
# set the layout
total_layout = QVBoxLayout()
top_layout = QHBoxLayout()
middle_layout = QHBoxLayout()
middle_layout_left = QVBoxLayout()
middle_layout_right = QVBoxLayout()
middle_layout_right_top = QVBoxLayout()
middle_layout_right_bottom = QVBoxLayout()
bottom_layout = QHBoxLayout()
# set the top layout
top_layout.addWidget(QLabel("Server IP:"))
top_layout.addWidget(self.ip_address)
top_layout.addWidget(QLabel("Server Port:"))
top_layout.addWidget(self.port)
top_layout.addWidget(QLabel("Your Name:"))
top_layout.addWidget(self.name)
login_button = QPushButton("Login",self)
login_button.clicked.connect(self.login_button_clicked)
top_layout.addWidget(login_button)
logout_button = QPushButton("Logout",self)
logout_button.clicked.connect(self.logout_button_clicked)
top_layout.addWidget(logout_button)
# set the middle layout
middle_layout_left.addWidget(QLabel("Log Display:"))
middle_layout_left.addWidget(self.log_display)
middle_layout_right_top.addWidget(QLabel("User List (Click the user to send message to him/her):"))
middle_layout_right_top.addWidget(self.user_list_table)
self.user_list_table.setColumnCount(3)
self.user_list_table.setHorizontalHeaderLabels([" User Name "," IP Address "," Port "])
self.user_list_table.setSortingEnabled (True)
self.user_list_table.setAlternatingRowColors(True)
middle_layout_right_bottom.addWidget(QLabel("Message Content:"))
middle_layout_right_bottom.addWidget(self.message_content)
send_button = QPushButton("Send",self)
send_button.clicked.connect(self.send_button_clicked)
send_to_all_button = QPushButton("Send to All",self)
send_to_all_button.clicked.connect(self.send_to_all_button_clicked)
middle_layout_right_bottom.addWidget(send_button)
middle_layout_right_bottom.addWidget(send_to_all_button)
middle_layout_right.addLayout(middle_layout_right_top)
middle_layout_right.addLayout(middle_layout_right_bottom)
middle_layout.addLayout(middle_layout_left)
middle_layout.addLayout(middle_layout_right)
# set the bottom layout
state_info_hint = QLabel("Running Status:",self)
bottom_layout.addWidget(state_info_hint)
bottom_layout.addWidget(self.state_info)
# set the total layout
total_layout.addLayout(top_layout)
total_layout.addLayout(middle_layout)
total_layout.addLayout(bottom_layout)
# set the widget
self.setLayout(total_layout)
# show the window
self.show()
def _create_socket(self,timeout):
sock_void = False
while not sock_void:
try:
sock = sk.socket(sk.AF_INET, sk.SOCK_DGRAM)
sock.bind((self.host_ip, self.host_port))
sock.settimeout(timeout)
sock_void = True
except Exception:
sock_void = False
import random
self.host_port+=random.randint(1,100)
return sock
def handle_sym_key(self,msg,lock):
sym_key_encrypted, sym_key_encrypted_sha256 = eval(msg[1])
if hashlib.sha256(sym_key_encrypted).hexdigest() != sym_key_encrypted_sha256:
lock.acquire()
self.log_display_append("[-]Client: The sym key is not correct.")
lock.release()
raise AuthenticationError('[-] Authentication failed.')
return
lock.acquire()
self.sym_key = rsa.decrypt(sym_key_encrypted, self.private_key)
lock.release()
self.is_logined.set()
# 客户端侦听线程
def listen_thread_for_client(self,thread_event):
'''
Listen the socket of the client.
'''
while True:
if thread_event.is_set():
ts = time.time() # Get the timestamp of the package.
timestamp = time.strftime('%Y-%m-%d %H:%M:%S', (time.localtime(ts)))
try:
data, addr = self.device.recvfrom(MAXSIZE) # Get the data and the address of the package.
if data.decode(encoding).startswith('SYMKEY'):
msg = data.decode(encoding).split('##')
lock = threading.Lock()
sym_key_thread = threading.Thread(target=self.handle_sym_key,args=(msg,lock))
sym_key_thread.setDaemon(True)
sym_key_thread.start()
elif data.decode(encoding).startswith('USERLIST'): # update the user directory
assert(self.sym_key != None)
f = Fernet(self.sym_key) # Create a Fernet object with the key.
recv_user_info = pickle.loads(f.decrypt(eval(data.decode(encoding).split('##')[1])))
lock = threading.Lock()
lock.acquire()
for key in recv_user_info.keys():
if key not in self.user_dict.keys():
self.user_dict[key] = recv_user_info[key]
self.log_display_append("[+] Client: User %s (%s,%s) is online."%(self.user_dict[key][1],self.user_dict[key][2],self.user_dict[key][3]))
del_list= []
for key in self.user_dict.keys():
if key not in recv_user_info.keys():
self.log_display_append("[+] Client: User %s (%s,%s) is offline."%(self.user_dict[key][1],self.user_dict[key][2],self.user_dict[key][3]))
del_list.append(key)
for key in del_list:
self.user_dict.pop(key)
lock.release()
user_list = [user[1:4] for user in self.user_dict.values()]
self.set_user_list_table(user_list)
elif data.decode(encoding).startswith('KEEPALIVE'):
self.wake_time = time.time()
elif data.decode(encoding).startswith('OK'):
self.is_logined.clear()
self.log_display_append("[+]Client << Server(%s): Logout success response."%str(addr))
elif data.decode(encoding).startswith('FAILED'):
self.log_display_append("[-]Client << Server(%s): Logout failed response."%str(addr))
elif data.decode(encoding).startswith('SERVERINFO'):
if self.listen_serverinfo_thread_event.is_set():
self.log_display_append("[-]Client << Server(%s): Server info response."%str(addr))
else:
continue # ignore the server info here
else: # msg from client
if not self.is_logined.is_set():
continue
assert(self.sym_key is not None)
f = Fernet(self.sym_key) # Create a Fernet object with the key.
decrypt_msg = f.decrypt(eval(data.decode(encoding))) # Decrypt the message.
self.log_display_append("[+]Client << (%s): %s (decrypted msg: %s) "%(str(addr),eval(data.decode(encoding)),decrypt_msg.decode(encoding)))
except Exception as e:
if self.log_flag:
print("%s [-] Client Recv MSG Error: %s"%(timestamp,e))
else:
return
def login_button_clicked(self):
if self.is_logined.is_set():
self.set_state_info("You have already logined.")
return
if self.ip_address.text() == "" or self.port.text() == "" or self.name.text() == "":
self.state_info.setText("Please fill in the blank. (server ip, server port, your name)")
return
self.wake_time = time.time()
self.server_addr=(self.ip_address.text(), int(self.port.text()))
try:
self.login_opt(username=self.name.text())
except Exception as e:
self.set_state_info("Login failed, please ensure the server is running.")
self.log_display_append("[-] Client: Login failed, please ensure the server is running.")
return
self.set_state_info("Login successfully.")
# 停止监听服务器通知报文
self.listen_serverinfo_thread_event.clear()
# 活跃状态检测线程
self.keep_active_thread = threading.Thread(target=self.keep_thread, args=(self.is_logined,))
self.keep_active_thread.start()
self.log_display_append("[+] Client: Login successfully, Server IP: %s, Server Port: %d, Local IP: %s, Local Port: %d"%(self.ip_address.text(),int(self.port.text()),self.host_ip,self.host_port))
self.ip_address.setDisabled(True)
self.port.setDisabled(True)
self.name.setDisabled(True)
username = self.name.text()
msg = b'KEEP##'+username.encode(self.__encoding)
self.device.sendto(msg, self.server_addr)
def keep_thread(self,event):
while True:
if event.is_set():
username = self.name.text()
msg = b'KEEP##'+username.encode(self.__encoding)
self.device.sendto(msg, self.server_addr)
time.sleep(5)
if time.time() - self.wake_time > 15:
self.log_display_append("[-] Client: Server is not responding.")
self.set_state_info("Server is not responding.")
self.is_logined.clear()
self.user_dict.clear()
self.ip_address.setEnabled(True)
self.port.setEnabled(True)
self.name.setEnabled(True)
self.user_list_table.clearContents()
else:
return
def login_opt(self,username):
assert(self.server_addr is not None)
sendkey = pickle.dumps(self.public_key)
sendkeySha256 = hashlib.sha256(sendkey).hexdigest()
msg = b'LOGIN##'+username.encode(self.__encoding)+b'##'+repr((sendkey,sendkeySha256)).encode(self.__encoding)
self.device.sendto(msg, self.server_addr)
count = 50
while(count>0):
if self.is_logined.is_set():
return
time.sleep(0.1)
raise AuthenticationError('[-] Login failed.')
def logout_opt(self):
if not self.is_logined.is_set():
return
username = self.name.text()
self.device.sendto(b'LOGOUT##'+username.encode(self.__encoding), self.server_addr)
count = 50
while(count>0):
if not self.is_logined.is_set():
return
time.sleep(0.1)
raise AuthenticationError('[-] Logout failed.')
def logout_button_clicked(self):
if self.device == None:
self.set_state_info(" You have not login yet!")
return
try:
self.logout_opt()
except Exception:
self.set_state_info("Logout failed!, Please try again.")
self.log_display_append("[-] Client: Logout failed.")
return
self.set_state_info("Logout successfully.")
self.listen_serverinfo_thread_event.set()
self.log_display_append("[+] Client: Logout successfully, Server IP: %s, Server Port: %d, Local IP: %s, Local Port: %d"%(self.ip_address.text(),int(self.port.text()),self.host_ip,self.host_port))
self.user_dict.clear()
self.ip_address.setEnabled(True)
self.port.setEnabled(True)
self.name.setEnabled(True)
self.user_list_table.clearContents()
def send(self, host, port, user_name, msg):
if host+user_name not in self.user_dict.keys():
if self.debugging: print('[-] User not in user dict', host+user_name)
return False
encrypt_key = self.user_dict[host+str(port)+user_name][0]
f = Fernet(encrypt_key)
sendmsg = repr(f.encrypt(msg.encode(self.__encoding))).encode(self.__encoding)
self.device.sendto(sendmsg, (host, port))
def send_to_all(self, msg):
for user in self.user_dict.values():
if (user[2],user[3]) != (self.host_ip,self.host_port):
f = Fernet(user[0])
sendmsg = repr(f.encrypt(msg.encode(self.__encoding))).encode(self.__encoding)
self.log_display_append("[+] Client>> %s: %s(encrypted msg:%s)"%(str((user[2],user[3])),msg,sendmsg))
self.device.sendto(sendmsg, (user[2], int(user[3])))
def send_to_all_button_clicked(self):
if not self.is_logined.is_set():
self.set_state_info(" You have not login yet!")
return
if self.message_content.toPlainText() == "":
self.set_state_info("Please input the message you want to send.")
return
msg = self.message_content.toPlainText()
self.send_to_all(msg)
self.log_display_append("[+] Client: Send to all: %s"%msg)
self.message_content.clear()
def send_to_selected(self, msg, user_list):
for user in user_list:
if (user[1],user[2]) != (self.host_ip,self.host_port):
f = Fernet(user[0])
sendmsg = repr(f.encrypt(msg.encode(self.__encoding))).encode(self.__encoding)
self.device.sendto(sendmsg, (user[1], user[2]))
self.log_display_append("[+] Client>> %s: %s(encrypted msg:%s)"%(str((user[1],user[2])),msg,sendmsg))
def send_button_clicked(self):
if not self.is_logined.is_set():
self.set_state_info(" You have not login yet!")
return
if self.message_content.toPlainText() == '':
self.set_state_info(" Please input message!")
return
msg = self.message_content.toPlainText()
sender_list = []
for i in range(self.user_list_table.rowCount()):
if self.user_list_table.item(i,0).checkState() == Qt.Checked:
key = self.user_list_table.item(i,1).text()+self.user_list_table.item(i,2).text()+self.user_list_table.item(i,0).text()
sender_list.append([self.user_dict[key][0],self.user_list_table.item(i,1).text(),int(self.user_list_table.item(i,2).text())])
if len(sender_list) == 0:
self.set_state_info(" Please select users!")
return
self.send_to_selected(msg, sender_list)
self.message_content.clear()
pass
def set_user_list_table(self,user_list):
print(user_list)
self.user_list_table.setRowCount(len(user_list))
for i in range(len(user_list)):
for j in range(3):
if j==0:
check = QTableWidgetItem()
check.setCheckState(Qt.Unchecked)
check.setText(user_list[i][0])
self.user_list_table.setItem(i,j,check)
else:
self.user_list_table.setItem(i,j,QTableWidgetItem(str(user_list[i][j])))
def set_log_display(self,log_text):
self.log_display.setText(log_text)
def log_display_append(self,log_text):
ts = time.time()
timestamp = time.strftime('%Y-%m-%d %H:%M:%S', (time.localtime(ts)))
self.log_display.append("$%s>> %s"%(timestamp,log_text))
self.log_display.moveCursor(QTextCursor.End)
if self.log_flag:
self.logger.log("$%s>> %s"%(timestamp,log_text))
def set_state_info(self,state_info):
self.state_info.setText(state_info)
def closeEvent(self, a0: QCloseEvent) -> None:
if self.is_logined.is_set():
self.is_logined.clear()
if self.device != None:
self.device = None
if self.thread_event.is_set():
self.thread_event.clear()
self.user_dict.clear()
self.user_list_table.clearContents()
if self.listen_serverinfo_thread_event.is_set():
self.listen_serverinfo_thread_event.clear()
return super().closeEvent(a0)
if __name__ == "__main__":
'''
Test the function.
'''
import sys
app = QApplication(sys.argv)
window = ClientWindow('Client')
sys.exit(app.exec_())
|
static_object_detector_node.py | #!/usr/bin/env python
import cv2
import numpy as np
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import Float32
from cv_bridge import CvBridge, CvBridgeError
from duckietown_msgs.msg import ObstacleImageDetection, ObstacleImageDetectionList, ObstacleType, Rect, BoolStamped
import sys
import threading
#from rgb_led import *
class Matcher:
CONE = [np.array(x, np.uint8) for x in [[0,80,80], [22, 255,255]] ]
DUCK = [np.array(x, np.uint8) for x in [[25,100,150], [35, 255, 255]] ]
terms = {ObstacleType.CONE :"cone", ObstacleType.DUCKIE:"duck"}
def __init__(self):
rospy.loginfo("[static_object_detector_node] Matcher __init__.")
self.cone_color_low = self.setupParam("~cone_low", [0,80,80])
self.cone_color_high = self.setupParam("~cone_high", [22, 255,255])
self.duckie_color_low = self.setupParam("~duckie_low", [25, 100, 150])
self.duckie_color_high = self.setupParam("~duckie_high", [35, 255,255])
self.CONE = [np.array(x, np.uint8) for x in [self.cone_color_low, self.cone_color_high] ]
self.DUCK = [np.array(x, np.uint8) for x in [self.duckie_color_low, self.duckie_color_high] ]
def setupParam(self, param_name, default_value):
value = rospy.get_param(param_name,default_value)
rospy.set_param(param_name,value) #Write to parameter server for transparancy
rospy.loginfo("[%s] %s = %s " %('static_object_detector_node',param_name,value))
return value
def get_filtered_contours(self,img, contour_type):
rospy.loginfo("[static_object_detector_node] [4.1.1].")
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
rospy.loginfo("[static_object_detector_node] [4.1.2].")
if contour_type == "CONE":
frame_threshed = cv2.inRange(hsv_img, self.CONE[0], self.CONE[1])
ret,thresh = cv2.threshold(frame_threshed,22,255,0)
elif contour_type == "DUCK_COLOR":
frame_threshed = cv2.inRange(hsv_img, self.DUCK[0], self.DUCK[1])
ret,thresh = cv2.threshold(frame_threshed,30,255,0)
elif contour_type == "DUCK_CANNY":
frame_threshed = cv2.inRange(hsv_img, self.DUCK[0], self.DUCK[1])
frame_threshed = cv2.adaptiveThreshold(frame_threshed,255,\
cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,5,2)
thresh = cv2.Canny(frame_threshed, 100,200)
else:
return
#rospy.loginfo("[static_object_detector_node] [4.1.3].")
filtered_contours = []
#rospy.loginfo("[static_object_detector_node] [4.1.4].")
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
#rospy.loginfo("[static_object_detector_node] [4.1.5].")
contour_area = [ (cv2.contourArea(c), (c) ) for c in contours]
contour_area = sorted(contour_area,reverse=True, key=lambda x: x[0])
height,width = img.shape[:2]
for (area,(cnt)) in contour_area:
# plot box around contour
x,y,w,h = cv2.boundingRect(cnt)
box = (x,y,w,h)
d = 0.5*(x-width/2)**2 + (y-height)**2
if not(h>15 and w >10 and h<200 and w<200 and d < 120000):
continue
if contour_type == "DUCK_CANNY":
continue
if contour_type =="DUCK_COLOR": # extra filtering to remove lines
if not(h>25 and w>25):
continue
if d>90000:
if not(h>35 and w>35):
continue
if cv2.contourArea(cnt)==0:
continue
val = cv2.arcLength(cnt,True)**2/ cv2.contourArea(cnt)
if val > 35: continue
rect = cv2.minAreaRect(cnt)
ctr, sides, deg = rect
val = 0.5*cv2.arcLength(cnt,True) / (w**2+h**2)**0.5
if val < 1.12: continue
#if area > 1000: continue
mask = np.zeros(thresh.shape,np.uint8)
cv2.drawContours(mask,[cnt],0,255,-1)
mean_val = cv2.mean(img,mask = mask)
aspect_ratio = float(w)/h
filtered_contours.append( (cnt, box, d, aspect_ratio, mean_val) )
return filtered_contours
def contour_match(self, img):
#rospy.loginfo("[static_object_detector_node] [4.0] in contour_match.")
'''
Returns 1. Image with bounding boxes added
2. an ObstacleImageDetectionList
'''
object_list = ObstacleImageDetectionList()
object_list.list = []
height,width = img.shape[:2]
object_list.imwidth = width
object_list.imheight = height
# get filtered contours
#rospy.loginfo("[static_object_detector_node] [4.1] before get_filtered_contours.")
try:
cone_contours = self.get_filtered_contours(img, "CONE")
except Exception as e:
rospy.loginfo("[static_object_detector_node] [4.1.5] get_filtered_contours ERROR. %s" %(e))
#rospy.loginfo("[static_object_detector_node] [4.2] after get_filtered_contours.")
# disable duck detection
# duck_contours = self.get_filtered_contours(img, "DUCK_COLOR")
# disable duck detection
# all_contours = [duck_contours, cone_contours]
all_contours = [[], cone_contours]
for i, contours in enumerate(all_contours):
for (cnt, box, ds, aspect_ratio, mean_color) in contours:
# plot box around contour
x,y,w,h = box
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,self.terms[i], (x,y), font, 0.5,mean_color,4)
cv2.rectangle(img,(x,y),(x+w,y+h), mean_color,2)
r = Rect()
r.x = x
r.y = y
r.w = w
r.h = h
t = ObstacleType()
t.type = i
d = ObstacleImageDetection()
d.bounding_box = r
d.type = t
object_list.list.append(d);
return img, object_list
class StaticObjectDetectorNode:
def __init__(self):
self.name = 'static_object_detector_node'
self.tm = Matcher()
self.active = True
self.thread_lock = threading.Lock()
self.sub_image = rospy.Subscriber("~image_raw", Image, self.cbImage, queue_size=1)
self.sub_switch = rospy.Subscriber("~switch",BoolStamped, self.cbSwitch, queue_size=1)
self.pub_image = rospy.Publisher("~cone_detection_image", Image, queue_size=1)
self.pub_detections_list = rospy.Publisher("~detection_list", ObstacleImageDetectionList, queue_size=1)
self.bridge = CvBridge()
#comand below commented until leds disable on our bot
#turn_off_LEDs(speed=5)
rospy.loginfo("[%s] Initialized." %(self.name))
def cbSwitch(self,switch_msg):
self.active = switch_msg.data
def cbImage(self,image_msg):
#rospy.loginfo("[%s] in cbImage, self.active:%s." %(self.name, self.active))
if not self.active:
return
#rospy.loginfo("[%s] before creating thread." %(self.name))
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
def processImage(self, image_msg):
#rospy.loginfo("[%s] [0] processImage entred." %(self.name))
if not self.thread_lock.acquire(False):
return
#rospy.loginfo("[%s] [1]processImage processing." %(self.name))
try:
#rospy.loginfo("[%s] [2] before bridge.imgmsg_to_cv2." %(self.name))
image_cv=self.bridge.imgmsg_to_cv2(image_msg,"bgr8")
#rospy.loginfo("[%s] [3] after bridge.imgmsg_to_cv2." %(self.name))
except CvBridgeErrer as e:
print e
#rospy.loginfo("[%s] [4] before contour_match." %(self.name))
img, detections = self.tm.contour_match(image_cv)
#rospy.loginfo("[%s] [5] after contour_match." %(self.name))
detections.header.stamp = image_msg.header.stamp
detections.header.frame_id = image_msg.header.frame_id
#rospy.loginfo("[%s] [6] publish detections." %(self.name))
self.pub_detections_list.publish(detections)
height,width = img.shape[:2]
try:
self.pub_image.publish(self.bridge.cv2_to_imgmsg(img, "bgr8"))
except CvBridgeError as e:
print(e)
#rospy.loginfo("[%s] processImage exit." %(self.name))
self.thread_lock.release()
if __name__=="__main__":
rospy.init_node('static_object_detector_node')
node = StaticObjectDetectorNode()
rospy.spin()
|
agent.py | from keras import backend as K
from keras import models
from keras import layers
from keras import optimizers
import tensorflow as tf
import numpy as np
from threading import Thread
from threading import Lock
import queue
import pickle
import copy
import os
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
K.tensorflow_backend.set_session(session)
class Connect6:
def __init__(self):
self.board_size = (19, 19)
self.model = None
self.session = None
self.graph = None
def init(self):
self.model = self.build_model()
self.model._make_predict_function()
self.session = K.get_session()
self.graph = tf.get_default_graph()
def build_model(self):
inputs = layers.Input(shape=(self.board_size[0], self.board_size[1], 5))
layer = layers.Conv2D(256, (1, 1), use_bias=False, padding='same', kernel_initializer='he_normal')(inputs)
for i in range(19):
conv_layer = layers.BatchNormalization()(layer)
conv_layer = layers.PReLU()(conv_layer)
conv_layer = layers.Conv2D(64, (1, 1), use_bias=False, padding='same', kernel_initializer='he_normal')(conv_layer)
conv_layer = layers.BatchNormalization()(conv_layer)
conv_layer = layers.PReLU()(conv_layer)
conv_layer = layers.Conv2D(64, (3, 3), use_bias=False, padding='same', kernel_initializer='he_normal')(conv_layer)
conv_layer = layers.BatchNormalization()(conv_layer)
conv_layer = layers.PReLU()(conv_layer)
conv_layer = layers.Conv2D(256, (1, 1), use_bias=False, padding='same', kernel_initializer='he_normal')(conv_layer)
layer = layers.Add()([layer, conv_layer])
layer = layers.BatchNormalization()(layer)
layer = layers.PReLU()(layer)
layer = layers.Conv2D(1, (1, 1), padding='same', kernel_initializer='he_normal')(layer)
layer = layers.Flatten()(layer)
outputs = layers.Activation('softmax')(layer)
model = models.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=optimizers.Adam(lr=3e-4), loss='categorical_crossentropy')
return model
def train(self, batch_data):
state = []
action = []
reward = []
target = []
if len(batch_data) == 0:
return
for data in batch_data:
state.append(data['state'])
action.append(data['action'])
reward.append(data['reward'])
target.append(np.zeros(self.board_size[0] * self.board_size[1]))
state = np.array(state)
action = np.array(action)
reward = np.array(reward)
target = np.array(target)
for idx in range(len(batch_data)):
target[idx][action[idx][0] * self.board_size[1] + action[idx][1]] = reward[idx]
self.model.fit(state, target, epochs=1, verbose=0)
def save(self, path, weight_only=False):
model_path = os.path.join(path, 'model')
if not os.path.exists(path):
os.mkdir(path)
if weight_only:
self.model.save_weights(model_path)
else:
self.model.save(model_path)
def load(self, path, weight_only=False):
model_path = os.path.join(path, 'model')
if weight_only:
self.model.load_weights(model_path)
else:
self.model = models.load_model(model_path)
self.model._make_predict_function()
self.session = K.get_session()
self.graph = tf.get_default_graph()
def get_action(self, environment, best):
action = None
if environment.turn == 1:
action = (self.board_size[0] // 2, self.board_size[1] // 2)
if action is None:
action = self.search_vcdt_action(environment)
if action is None:
action = self.search_action(environment, 1, best)[0]
return action
def search_action(self, environment, count, best):
player = environment.get_player()
opponent = environment.get_opponent()
player_threat = environment.get_threat_count(player)
opponent_threat = environment.get_threat_count(opponent)
mask = None
if mask is None and player_threat > 0:
mask = self.get_attack_mask(environment)
if mask is None and opponent_threat > 0:
mask = self.get_defense_mask(environment)
if mask is None:
mask = self.get_basic_mask(environment)
state = environment.get_state()
state = environment.pre_processing(state, player, environment.get_stone())
state = np.array([state])
with self.session.as_default():
with self.graph.as_default():
policy = self.model.predict(state)[0]
action_list = []
if best:
choice_list = np.argsort(policy)
for choice in reversed(choice_list):
action = (choice // self.board_size[1], choice % self.board_size[1])
if mask[action[0]][action[1]] > 0:
action_list.append(action)
if len(action_list) >= count:
return action_list
else:
nonzero = np.argwhere(policy > 1e-15).flatten()
zero = np.argwhere(policy <= 1e-15).flatten()
nonzero_policy = policy[nonzero]
choice_list = np.random.choice(nonzero, len(nonzero), p=nonzero_policy, replace=False)
for choice in choice_list:
action = (choice // self.board_size[1], choice % self.board_size[1])
if mask[action[0]][action[1]] > 0:
action_list.append(action)
if len(action_list) >= count:
return action_list
choice_list = np.random.permutation(zero)
for choice in choice_list:
action = (choice // self.board_size[1], choice % self.board_size[1])
if mask[action[0]][action[1]] > 0:
action_list.append(action)
if len(action_list) >= count:
return action_list
return action_list
def search_vcdt_action(self, environment):
player = environment.get_player()
search_list = queue.Queue()
next_search_list = queue.Queue()
cache = set()
lock = Lock()
thread_list = []
thread_limit = 8
search_list.put((environment, []))
while not search_list.empty():
while not search_list.empty():
current_environment, current_path = search_list.get()
current_player = current_environment.get_player()
if current_environment.check_win():
return (current_path[0][0], current_path[0][1])
args = (current_environment, current_path, next_search_list, cache, lock)
if current_player == player:
thread = Thread(target=self.search_vcdt_attack, args=args)
thread.start()
else:
thread = Thread(target=self.search_vcdt_defense, args=args)
thread.start()
thread_list.append(thread)
while len(thread_list) >= thread_limit:
thread_list = [thread for thread in thread_list if thread.isAlive()]
for thread in thread_list:
thread.join()
while not next_search_list.empty():
search_list.put(next_search_list.get())
del(cache)
return None
def search_vcdt_attack(self, environment, path, search_list, cache, lock):
player = environment.get_player()
stone = environment.get_stone()
action_list = self.search_action(environment, 3, True)
with lock:
for action in action_list:
next_environment = copy.deepcopy(environment)
next_environment.step(action)
next_threat = next_environment.get_threat_count(player)
next_path = copy.deepcopy(path)
next_path.append(action + (player,))
if pickle.dumps(sorted(next_path)) in cache:
continue
if not next_environment.check_win() and next_environment.check_done():
continue
if next_environment.check_win() or stone == 2 or (stone == 1 and next_threat >= 2):
search_list.put((next_environment, next_path))
cache.add(pickle.dumps(sorted(next_path)))
def search_vcdt_defense(self, environment, path, search_list, cache, lock):
player = environment.get_player()
opponent = environment.get_opponent()
stone = environment.get_stone()
action = self.search_action(environment, 1, True)[0]
with lock:
next_environment = copy.deepcopy(environment)
next_environment.step(action)
next_threat = next_environment.get_threat_count(opponent)
next_path = copy.deepcopy(path)
next_path.append(action + (player,))
if pickle.dumps(sorted(next_path)) in cache:
return
if not (next_environment.check_done() or (stone == 2 and next_threat <= 0)):
search_list.put((next_environment, path))
cache.add(pickle.dumps(sorted(next_path)))
def get_attack_mask(self, environment):
player = environment.get_player()
state = environment.get_state()
mask = np.zeros(environment.board_size)
threat_map = environment.get_threat_map(player)
for y in range(environment.board_size[0]):
for x in range(environment.board_size[1]):
if threat_map[y][x] > 0 and state[y][x] == environment.empty:
action = (y, x)
next_environment = copy.deepcopy(environment)
next_environment.step(action)
if next_environment.check_win():
mask[action[0]][action[1]] = 1
return mask
if next_environment.check_done():
continue
if environment.get_stone() == 1:
continue
if self.get_attack_mask(next_environment) is not None:
mask[action[0]][action[1]] = 1
return mask
return None
def get_defense_mask(self, environment):
opponent = environment.get_opponent()
state = environment.get_state()
mask = np.zeros(environment.board_size)
threat = environment.get_threat_count(opponent)
threat_map = environment.get_threat_map(opponent)
for y in range(environment.board_size[0]):
for x in range(environment.board_size[1]):
if threat_map[y][x] > 0 and state[y][x] == environment.empty:
action = (y, x)
next_environment = copy.deepcopy(environment)
next_environment.step(action)
next_threat = next_environment.get_threat_count(opponent)
if next_environment.check_done() or next_threat - threat < 0:
mask[action[0]][action[1]] = 1
return mask
def get_basic_mask(self, environment):
state = environment.get_state()
mask = np.zeros(environment.board_size)
bound_map = environment.get_bound_map()
for y in range(environment.board_size[0]):
for x in range(environment.board_size[1]):
if bound_map[y][x] > 0 and state[y][x] == environment.empty:
action = (y, x)
mask[action[0]][action[1]] = 1
return mask
|
test_tracer.py | import time
import mock
import opentracing
from opentracing import Format
from opentracing import InvalidCarrierException
from opentracing import SpanContextCorruptedException
from opentracing import UnsupportedFormatException
from opentracing import child_of
import pytest
import ddtrace
from ddtrace import Tracer as DDTracer
from ddtrace.constants import AUTO_KEEP
from ddtrace.opentracer import Tracer
from ddtrace.opentracer import set_global_tracer
from ddtrace.opentracer.span_context import SpanContext
from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID
from ddtrace.settings import ConfigException
class TestTracerConfig(object):
def test_config(self):
"""Test the configuration of the tracer"""
config = {"enabled": True}
tracer = Tracer(service_name="myservice", config=config)
assert tracer._service_name == "myservice"
assert tracer._dd_tracer.enabled is True
def test_no_service_name(self):
"""A service_name should be generated if one is not provided."""
tracer = Tracer()
assert tracer._service_name == "pytest"
def test_multiple_tracer_configs(self):
"""Ensure that a tracer config is a copy of the passed config."""
config = {"enabled": True}
tracer1 = Tracer(service_name="serv1", config=config)
assert tracer1._service_name == "serv1"
config["enabled"] = False
tracer2 = Tracer(service_name="serv2", config=config)
# Ensure tracer1's config was not mutated
assert tracer1._service_name == "serv1"
assert tracer2._service_name == "serv2"
def test_invalid_config_key(self):
"""A config with an invalid key should raise a ConfigException."""
config = {"enabeld": False}
# No debug flag should not raise an error
tracer = Tracer(service_name="mysvc", config=config)
# With debug flag should raise an error
config["debug"] = True
with pytest.raises(ConfigException) as ce_info:
tracer = Tracer(config=config)
assert "enabeld" in str(ce_info)
assert tracer is not None
# Test with multiple incorrect keys
config["setttings"] = {}
with pytest.raises(ConfigException) as ce_info:
tracer = Tracer(service_name="mysvc", config=config)
assert ["enabeld", "setttings"] in str(ce_info)
assert tracer is not None
def test_ddtrace_fallback_config(self, monkeypatch):
"""Ensure datadog configuration is used by default."""
monkeypatch.setenv("DD_TRACE_ENABLED", "false")
tracer = Tracer(dd_tracer=DDTracer())
assert tracer._dd_tracer.enabled is False
def test_global_tags(self):
"""Global tags should be passed from the opentracer to the tracer."""
config = {
"global_tags": {
"tag1": "value1",
"tag2": 2,
},
}
tracer = Tracer(service_name="mysvc", config=config)
with tracer.start_span("myop") as span:
# global tags should be attached to generated all datadog spans
assert span._dd_span.get_tag("tag1") == "value1"
assert span._dd_span.get_metric("tag2") == 2
with tracer.start_span("myop2") as span2:
assert span2._dd_span.get_tag("tag1") == "value1"
assert span2._dd_span.get_metric("tag2") == 2
class TestTracer(object):
def test_start_span(self, ot_tracer, test_spans):
"""Start and finish a span."""
with ot_tracer.start_span("myop") as span:
pass
# span should be finished when the context manager exits
assert span.finished
spans = test_spans.get_spans()
assert len(spans) == 1
def test_start_span_references(self, ot_tracer, test_spans):
"""Start a span using references."""
with ot_tracer.start_span("one", references=[child_of()]):
pass
spans = test_spans.pop()
assert spans[0].parent_id is None
root = ot_tracer.start_active_span("root")
# create a child using a parent reference that is not the context parent
with ot_tracer.start_active_span("one"):
with ot_tracer.start_active_span("two", references=[child_of(root.span)]):
pass
root.close()
spans = test_spans.pop()
assert spans[1].parent_id == spans[0].span_id
assert spans[2].parent_id == spans[0].span_id
def test_start_span_custom_start_time(self, ot_tracer):
"""Start a span with a custom start time."""
t = 100
with mock.patch("ddtrace.span.time_ns") as time:
time.return_value = 102 * 1e9
with ot_tracer.start_span("myop", start_time=t) as span:
pass
assert span._dd_span.start == t
assert span._dd_span.duration == 2
def test_start_span_with_spancontext(self, ot_tracer, test_spans):
"""Start and finish a span using a span context as the child_of
reference.
"""
with ot_tracer.start_span("myop") as span:
with ot_tracer.start_span("myop", child_of=span.context) as span2:
pass
# span should be finished when the context manager exits
assert span.finished
assert span2.finished
spans = test_spans.pop()
assert len(spans) == 2
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
def test_start_span_with_tags(self, ot_tracer):
"""Create a span with initial tags."""
tags = {"key": "value", "key2": "value2"}
with ot_tracer.start_span("myop", tags=tags) as span:
pass
assert span._dd_span.get_tag("key") == "value"
assert span._dd_span.get_tag("key2") == "value2"
def test_start_span_with_resource_name_tag(self, ot_tracer):
"""Create a span with the tag to set the resource name"""
tags = {"resource.name": "value", "key2": "value2"}
with ot_tracer.start_span("myop", tags=tags) as span:
pass
# Span resource name should be set to tag value, and should not get set as
# a tag on the underlying span.
assert span._dd_span.resource == "value"
assert span._dd_span.get_tag("resource.name") is None
# Other tags are set as normal
assert span._dd_span.get_tag("key2") == "value2"
def test_start_active_span_multi_child(self, ot_tracer, test_spans):
"""Start and finish multiple child spans.
This should ensure that child spans can be created 2 levels deep.
"""
with ot_tracer.start_active_span("myfirstop") as scope1:
time.sleep(0.009)
with ot_tracer.start_active_span("mysecondop") as scope2:
time.sleep(0.007)
with ot_tracer.start_active_span("mythirdop") as scope3:
time.sleep(0.005)
# spans should be finished when the context manager exits
assert scope1.span.finished
assert scope2.span.finished
assert scope3.span.finished
spans = test_spans.pop()
# check spans are captured in the trace
assert scope1.span._dd_span is spans[0]
assert scope2.span._dd_span is spans[1]
assert scope3.span._dd_span is spans[2]
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[1].span_id
# sanity check a lower bound on the durations
assert spans[0].duration >= 0.009 + 0.007 + 0.005
assert spans[1].duration >= 0.007 + 0.005
assert spans[2].duration >= 0.005
def test_start_active_span_multi_child_siblings(self, ot_tracer, test_spans):
"""Start and finish multiple span at the same level.
This should test to ensure a parent can have multiple child spans at the
same level.
"""
with ot_tracer.start_active_span("myfirstop") as scope1:
time.sleep(0.009)
with ot_tracer.start_active_span("mysecondop") as scope2:
time.sleep(0.007)
with ot_tracer.start_active_span("mythirdop") as scope3:
time.sleep(0.005)
# spans should be finished when the context manager exits
assert scope1.span.finished
assert scope2.span.finished
assert scope3.span.finished
spans = test_spans.pop()
# check spans are captured in the trace
assert scope1.span._dd_span is spans[0]
assert scope2.span._dd_span is spans[1]
assert scope3.span._dd_span is spans[2]
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[0].span_id
# sanity check a lower bound on the durations
assert spans[0].duration >= 0.009 + 0.007 + 0.005
assert spans[1].duration >= 0.007
assert spans[2].duration >= 0.005
def test_start_span_manual_child_of(self, ot_tracer, test_spans):
"""Start spans without using a scope manager.
Spans should be created without parents since there will be no call
for the active span.
"""
root = ot_tracer.start_span("zero")
with ot_tracer.start_span("one", child_of=root):
with ot_tracer.start_span("two", child_of=root):
with ot_tracer.start_span("three", child_of=root):
pass
root.finish()
spans = test_spans.pop()
assert spans[0].parent_id is None
# ensure each child span is a child of root
assert spans[1].parent_id is root._dd_span.span_id
assert spans[2].parent_id is root._dd_span.span_id
assert spans[3].parent_id is root._dd_span.span_id
assert spans[0].trace_id == spans[1].trace_id and spans[1].trace_id == spans[2].trace_id
def test_start_span_no_active_span(self, ot_tracer, test_spans):
"""Start spans without using a scope manager.
Spans should be created without parents since there will be no call
for the active span.
"""
with ot_tracer.start_span("one", ignore_active_span=True):
with ot_tracer.start_span("two", ignore_active_span=True):
pass
with ot_tracer.start_span("three", ignore_active_span=True):
pass
spans = test_spans.pop()
# ensure each span does not have a parent
assert spans[0].parent_id is None
assert spans[1].parent_id is None
assert spans[2].parent_id is None
# and that each span is a new trace
assert (
spans[0].trace_id != spans[1].trace_id
and spans[1].trace_id != spans[2].trace_id
and spans[0].trace_id != spans[2].trace_id
)
def test_start_active_span_child_finish_after_parent(self, ot_tracer, test_spans):
"""Start a child span and finish it after its parent."""
span1 = ot_tracer.start_active_span("one").span
span2 = ot_tracer.start_active_span("two").span
span1.finish()
time.sleep(0.005)
span2.finish()
spans = test_spans.pop()
assert len(spans) == 2
assert spans[0].parent_id is None
assert spans[1].parent_id is span1._dd_span.span_id
assert spans[1].duration > spans[0].duration
def test_start_span_multi_intertwined(self, ot_tracer, test_spans):
"""Start multiple spans at the top level intertwined.
Alternate calling between two traces.
"""
import threading
# synchronize threads with a threading event object
event = threading.Event()
def trace_one():
_id = 11
with ot_tracer.start_active_span(str(_id)):
_id += 1
with ot_tracer.start_active_span(str(_id)):
_id += 1
with ot_tracer.start_active_span(str(_id)):
pass
event.set()
def trace_two():
_id = 21
event.wait()
with ot_tracer.start_active_span(str(_id)):
_id += 1
with ot_tracer.start_active_span(str(_id)):
_id += 1
with ot_tracer.start_active_span(str(_id)):
pass
# the ordering should be
# t1.span1/t2.span1, t2.span2, t1.span2, t1.span3, t2.span3
t1 = threading.Thread(target=trace_one)
t2 = threading.Thread(target=trace_two)
t1.start()
t2.start()
# wait for threads to finish
t1.join()
t2.join()
spans = test_spans.pop()
# trace_one will finish before trace_two so its spans should be written
# before the spans from trace_two, let's confirm this
assert spans[0].name == "11"
assert spans[1].name == "12"
assert spans[2].name == "13"
assert spans[3].name == "21"
assert spans[4].name == "22"
assert spans[5].name == "23"
# next let's ensure that each span has the correct parent:
# trace_one
assert spans[0].parent_id is None
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[1].span_id
# trace_two
assert spans[3].parent_id is None
assert spans[4].parent_id is spans[3].span_id
assert spans[5].parent_id is spans[3].span_id
# finally we should ensure that the trace_ids are reasonable
# trace_one
assert spans[0].trace_id == spans[1].trace_id and spans[1].trace_id == spans[2].trace_id
# traces should be independent
assert spans[2].trace_id != spans[3].trace_id
# trace_two
assert spans[3].trace_id == spans[4].trace_id and spans[4].trace_id == spans[5].trace_id
def test_start_active_span(self, ot_tracer, test_spans):
with ot_tracer.start_active_span("one") as scope:
pass
assert scope.span._dd_span.name == "one"
assert scope.span.finished
spans = test_spans.pop()
assert spans
def test_start_active_span_finish_on_close(self, ot_tracer, test_spans):
with ot_tracer.start_active_span("one", finish_on_close=False) as scope:
pass
assert scope.span._dd_span.name == "one"
assert not scope.span.finished
spans = test_spans.pop()
assert not spans
scope.span.finish()
def test_start_active_span_nested(self, ot_tracer):
"""Test the active span of multiple nested calls of start_active_span."""
with ot_tracer.start_active_span("one") as outer_scope:
assert ot_tracer.active_span == outer_scope.span
with ot_tracer.start_active_span("two") as inner_scope:
assert ot_tracer.active_span == inner_scope.span
with ot_tracer.start_active_span("three") as innest_scope: # why isn't it innest? innermost so verbose
assert ot_tracer.active_span == innest_scope.span
with ot_tracer.start_active_span("two") as inner_scope:
assert ot_tracer.active_span == inner_scope.span
assert ot_tracer.active_span == outer_scope.span
assert ot_tracer.active_span is None
def test_start_active_span_trace(self, ot_tracer, test_spans):
"""Test the active span of multiple nested calls of start_active_span."""
with ot_tracer.start_active_span("one") as outer_scope:
outer_scope.span.set_tag("outer", 2)
with ot_tracer.start_active_span("two") as inner_scope:
inner_scope.span.set_tag("inner", 3)
with ot_tracer.start_active_span("two") as inner_scope:
inner_scope.span.set_tag("inner", 3)
with ot_tracer.start_active_span("three") as innest_scope:
innest_scope.span.set_tag("innerest", 4)
spans = test_spans.pop()
assert spans[0].parent_id is None
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[0].span_id
assert spans[3].parent_id is spans[2].span_id
def test_interleave(self, dd_tracer, ot_tracer, test_spans):
with ot_tracer.start_active_span("ot_root_1", ignore_active_span=True):
with dd_tracer.trace("dd_child"):
with ot_tracer.start_active_span("ot_child_1"):
pass
with ot_tracer.start_active_span("ot_child_2"):
pass
spans = test_spans.pop()
assert len(spans) == 4
assert spans[0].name == "ot_root_1" and spans[0].parent_id is None
assert spans[1].name == "dd_child" and spans[1].parent_id == spans[0].span_id
assert spans[2].name == "ot_child_1" and spans[2].parent_id == spans[1].span_id
assert spans[3].name == "ot_child_2" and spans[3].parent_id == spans[0].span_id
def test_active_span(self, ot_tracer, test_spans):
with ot_tracer._dd_tracer.trace("dd") as span:
assert ot_tracer.active_span is not None
assert ot_tracer.active_span._dd_span is span
@pytest.fixture
def nop_span_ctx():
return SpanContext(sampling_priority=AUTO_KEEP)
class TestTracerSpanContextPropagation(object):
"""Test the injection and extraction of a span context from a tracer."""
def test_invalid_format(self, ot_tracer, nop_span_ctx):
"""An invalid format should raise an UnsupportedFormatException."""
# test inject
with pytest.raises(UnsupportedFormatException):
ot_tracer.inject(nop_span_ctx, None, {})
# test extract
with pytest.raises(UnsupportedFormatException):
ot_tracer.extract(None, {})
def test_inject_invalid_carrier(self, ot_tracer, nop_span_ctx):
"""Only dicts should be supported as a carrier."""
with pytest.raises(InvalidCarrierException):
ot_tracer.inject(nop_span_ctx, Format.HTTP_HEADERS, None)
def test_extract_invalid_carrier(self, ot_tracer):
"""Only dicts should be supported as a carrier."""
with pytest.raises(InvalidCarrierException):
ot_tracer.extract(Format.HTTP_HEADERS, None)
def test_http_headers_base(self, ot_tracer):
"""extract should undo inject for http headers."""
span_ctx = SpanContext(trace_id=123, span_id=456)
carrier = {}
ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.HTTP_HEADERS, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
def test_http_headers_baggage(self, ot_tracer):
"""extract should undo inject for http headers."""
span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"})
carrier = {}
ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.HTTP_HEADERS, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
assert ext_span_ctx.baggage == span_ctx.baggage
def test_empty_propagated_context(self, ot_tracer):
"""An empty propagated context should raise a
SpanContextCorruptedException when extracted.
"""
carrier = {}
with pytest.raises(SpanContextCorruptedException):
ot_tracer.extract(Format.HTTP_HEADERS, carrier)
def test_text(self, ot_tracer):
"""extract should undo inject for http headers"""
span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"})
carrier = {}
ot_tracer.inject(span_ctx, Format.TEXT_MAP, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.TEXT_MAP, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
assert ext_span_ctx.baggage == span_ctx.baggage
def test_corrupted_propagated_context(self, ot_tracer):
"""Corrupted context should raise a SpanContextCorruptedException."""
span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"})
carrier = {}
ot_tracer.inject(span_ctx, Format.TEXT_MAP, carrier)
assert len(carrier.keys()) > 0
# manually alter a key in the carrier baggage
del carrier[HTTP_HEADER_TRACE_ID]
corrupted_key = HTTP_HEADER_TRACE_ID[2:]
carrier[corrupted_key] = 123
with pytest.raises(SpanContextCorruptedException):
ot_tracer.extract(Format.TEXT_MAP, carrier)
def test_immutable_span_context(self, ot_tracer):
"""Span contexts should be immutable."""
with ot_tracer.start_span("root") as root:
ctx_before = root.context
root.set_baggage_item("test", 2)
assert ctx_before is not root.context
with ot_tracer.start_span("child") as level1:
with ot_tracer.start_span("child") as level2:
pass
assert root.context is not level1.context
assert level2.context is not level1.context
assert level2.context is not root.context
def test_inherited_baggage(self, ot_tracer):
"""Baggage should be inherited by child spans."""
with ot_tracer.start_active_span("root") as root:
# this should be passed down to the child
root.span.set_baggage_item("root", 1)
root.span.set_baggage_item("root2", 1)
with ot_tracer.start_active_span("child") as level1:
level1.span.set_baggage_item("level1", 1)
with ot_tracer.start_active_span("child") as level2:
level2.span.set_baggage_item("level2", 1)
# ensure immutability
assert level1.span.context is not root.span.context
assert level2.span.context is not level1.span.context
# level1 should have inherited the baggage of root
assert level1.span.get_baggage_item("root")
assert level1.span.get_baggage_item("root2")
# level2 should have inherited the baggage of both level1 and level2
assert level2.span.get_baggage_item("root")
assert level2.span.get_baggage_item("root2")
assert level2.span.get_baggage_item("level1")
assert level2.span.get_baggage_item("level2")
class TestTracerCompatibility(object):
"""Ensure that our opentracer produces results in the underlying datadog tracer."""
def test_required_dd_fields(self):
"""Ensure required fields needed for successful tracing are possessed
by the underlying datadog tracer.
"""
# a service name is required
tracer = Tracer("service")
with tracer.start_span("my_span") as span:
assert span._dd_span.service
def test_set_global_tracer():
"""Sanity check for set_global_tracer"""
my_tracer = Tracer("service")
set_global_tracer(my_tracer)
assert opentracing.tracer is my_tracer
assert ddtrace.tracer is my_tracer._dd_tracer
|
test_s3.py | import boto3
import botocore.session
from botocore.exceptions import ClientError
from botocore.exceptions import ParamValidationError
from nose.tools import eq_ as eq
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
import isodate
import email.utils
import datetime
import threading
import re
import pytz
from cStringIO import StringIO
from ordereddict import OrderedDict
import requests
import json
import base64
import hmac
import sha
import xml.etree.ElementTree as ET
import time
import operator
import nose
import os
import string
import random
import socket
import ssl
from collections import namedtuple
from email.header import decode_header
from .utils import assert_raises
from .utils import generate_random
from .utils import _get_status_and_error_code
from .utils import _get_status
from .policy import Policy, Statement, make_json_policy
from . import (
get_client,
get_prefix,
get_unauthenticated_client,
get_bad_auth_client,
get_v2_client,
get_new_bucket,
get_new_bucket_name,
get_new_bucket_resource,
get_config_is_secure,
get_config_host,
get_config_port,
get_config_endpoint,
get_main_aws_access_key,
get_main_aws_secret_key,
get_main_display_name,
get_main_user_id,
get_main_email,
get_main_api_name,
get_alt_aws_access_key,
get_alt_aws_secret_key,
get_alt_display_name,
get_alt_user_id,
get_alt_email,
get_alt_client,
get_tenant_client,
get_buckets_list,
get_objects_list,
get_main_kms_keyid,
get_secondary_kms_keyid,
nuke_prefixed_buckets,
)
def _bucket_is_empty(bucket):
is_empty = True
for obj in bucket.objects.all():
is_empty = False
break
return is_empty
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='empty buckets return no contents')
def test_bucket_list_empty():
bucket = get_new_bucket_resource()
is_empty = _bucket_is_empty(bucket)
eq(is_empty, True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='distinct buckets have different contents')
def test_bucket_list_distinct():
bucket1 = get_new_bucket_resource()
bucket2 = get_new_bucket_resource()
obj = bucket1.put_object(Body='str', Key='asdf')
is_empty = _bucket_is_empty(bucket2)
eq(is_empty, True)
def _create_objects(bucket=None, bucket_name=None, keys=[]):
"""
Populate a (specified or new) bucket with objects with
specified names (and contents identical to their names).
"""
if bucket_name is None:
bucket_name = get_new_bucket_name()
if bucket is None:
bucket = get_new_bucket_resource(name=bucket_name)
for key in keys:
obj = bucket.put_object(Body=key, Key=key)
return bucket_name
def _get_keys(response):
"""
return lists of strings that are the keys from a client.list_objects() response
"""
keys = []
if 'Contents' in response:
objects_list = response['Contents']
keys = [obj['Key'] for obj in objects_list]
return keys
def _get_prefixes(response):
"""
return lists of strings that are prefixes from a client.list_objects() response
"""
prefixes = []
if 'CommonPrefixes' in response:
prefix_list = response['CommonPrefixes']
prefixes = [prefix['Prefix'] for prefix in prefix_list]
return prefixes
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=2, no marker')
def test_bucket_list_many():
bucket_name = _create_objects(keys=['foo', 'bar', 'baz'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 2)
eq(keys, ['bar', 'baz'])
eq(response['IsTruncated'], True)
response = client.list_objects(Bucket=bucket_name, Marker='baz',MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 1)
eq(response['IsTruncated'], False)
eq(keys, ['foo'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=2, no marker')
@attr('list-objects-v2')
def test_bucket_listv2_many():
bucket_name = _create_objects(keys=['foo', 'bar', 'baz'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 2)
eq(keys, ['bar', 'baz'])
eq(response['IsTruncated'], True)
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='baz',MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 1)
eq(response['IsTruncated'], False)
eq(keys, ['foo'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='keycount in listobjectsv2')
@attr('list-objects-v2')
def test_basic_key_count():
client = get_client()
bucket_names = []
bucket_name = get_new_bucket_name()
client.create_bucket(Bucket=bucket_name)
for j in range(5):
client.put_object(Bucket=bucket_name, Key=str(j))
response1 = client.list_objects_v2(Bucket=bucket_name)
eq(response1['KeyCount'], 5)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
def test_bucket_list_delimiter_basic():
bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
eq(keys, ['asdf'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['foo/', 'quux/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_basic():
bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
eq(keys, ['asdf'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['foo/', 'quux/'])
def validate_bucket_list(bucket_name, prefix, delimiter, marker, max_keys,
is_truncated, check_objs, check_prefixes, next_marker):
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter=delimiter, Marker=marker, MaxKeys=max_keys, Prefix=prefix)
eq(response['IsTruncated'], is_truncated)
if 'NextMarker' not in response:
response['NextMarker'] = None
eq(response['NextMarker'], next_marker)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(len(keys), len(check_objs))
eq(len(prefixes), len(check_prefixes))
eq(keys, check_objs)
eq(prefixes, check_prefixes)
return response['NextMarker']
def validate_bucket_listv2(bucket_name, prefix, delimiter, continuation_token, max_keys,
is_truncated, check_objs, check_prefixes, last=False):
client = get_client()
params = dict(Bucket=bucket_name, Delimiter=delimiter, MaxKeys=max_keys, Prefix=prefix)
if continuation_token is not None:
params['ContinuationToken'] = continuation_token
else:
params['StartAfter'] = ''
response = client.list_objects_v2(**params)
eq(response['IsTruncated'], is_truncated)
if 'NextContinuationToken' not in response:
response['NextContinuationToken'] = None
if last:
eq(response['NextContinuationToken'], None)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(len(keys), len(check_objs))
eq(len(prefixes), len(check_prefixes))
eq(keys, check_objs)
eq(prefixes, check_prefixes)
return response['NextContinuationToken']
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
def test_bucket_list_delimiter_prefix():
bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
delim = '/'
marker = ''
prefix = ''
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['asdf'], [], 'asdf')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, True, [], ['boo/'], 'boo/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['cquux/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, True, ['asdf'], ['boo/'], 'boo/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 2, False, [], ['cquux/'], None)
prefix = 'boo/'
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['boo/bar'], [], 'boo/bar')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['boo/baz/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['boo/bar'], ['boo/baz/'], None)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_prefix():
bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
delim = '/'
continuation_token = ''
prefix = ''
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['asdf'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, True, [], ['boo/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, False, [], ['cquux/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, True, ['asdf'], ['boo/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 2, False, [], ['cquux/'], last=True)
prefix = 'boo/'
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['boo/bar'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, False, [], ['boo/baz/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['boo/bar'], ['boo/baz/'], last=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefix and delimiter handling when object ends with delimiter')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_prefix_ends_with_delimiter():
bucket_name = _create_objects(keys=['asdf/'])
validate_bucket_listv2(bucket_name, 'asdf/', '/', None, 1000, False, ['asdf/'], [], last=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefix and delimiter handling when object ends with delimiter')
def test_bucket_list_delimiter_prefix_ends_with_delimiter():
bucket_name = _create_objects(keys=['asdf/'])
validate_bucket_list(bucket_name, 'asdf/', '/', '', 1000, False, ['asdf/'], [], None)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='non-slash delimiter characters')
def test_bucket_list_delimiter_alt():
bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='a')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
# bar, baz, and cab should be broken up by the 'a' delimiters
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['ba', 'ca'])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='non-slash delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_alt():
bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
# bar, baz, and cab should be broken up by the 'a' delimiters
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['ba', 'ca'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes starting with underscore')
def test_bucket_list_delimiter_prefix_underscore():
bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla'])
delim = '/'
marker = ''
prefix = ''
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['_obj1_'], [], '_obj1_')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, True, [], ['_under1/'], '_under1/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['_under2/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, True, ['_obj1_'], ['_under1/'], '_under1/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 2, False, [], ['_under2/'], None)
prefix = '_under1/'
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['_under1/bar'], [], '_under1/bar')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['_under1/baz/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['_under1/bar'], ['_under1/baz/'], None)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes starting with underscore')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_prefix_underscore():
bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla'])
delim = '/'
continuation_token = ''
prefix = ''
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['_obj1_'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, True, [], ['_under1/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, False, [], ['_under2/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, True, ['_obj1_'], ['_under1/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 2, False, [], ['_under2/'], last=True)
prefix = '_under1/'
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['_under1/bar'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, False, [], ['_under1/baz/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['_under1/bar'], ['_under1/baz/'], last=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='percentage delimiter characters')
def test_bucket_list_delimiter_percentage():
bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='%')
eq(response['Delimiter'], '%')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b%', 'c%'])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='percentage delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_percentage():
bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='%')
eq(response['Delimiter'], '%')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b%', 'c%'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='whitespace delimiter characters')
def test_bucket_list_delimiter_whitespace():
bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter=' ')
eq(response['Delimiter'], ' ')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b ', 'c '])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='whitespace delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_whitespace():
bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter=' ')
eq(response['Delimiter'], ' ')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b ', 'c '])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='dot delimiter characters')
def test_bucket_list_delimiter_dot():
bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='.')
eq(response['Delimiter'], '.')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b.', 'c.'])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='dot delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_dot():
bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='.')
eq(response['Delimiter'], '.')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b.', 'c.'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='non-printable delimiter can be specified')
def test_bucket_list_delimiter_unreadable():
key_names=['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='\x0a')
eq(response['Delimiter'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='non-printable delimiter can be specified')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_unreadable():
key_names=['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='\x0a')
eq(response['Delimiter'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='empty delimiter can be specified')
def test_bucket_list_delimiter_empty():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='')
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='empty delimiter can be specified')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_empty():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='')
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='unspecified delimiter defaults to none')
def test_bucket_list_delimiter_none():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='unspecified delimiter defaults to none')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_none():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr('list-objects-v2')
def test_bucket_listv2_fetchowner_notempty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, FetchOwner=True)
objs_list = response['Contents']
eq('Owner' in objs_list[0], True)
@attr('list-objects-v2')
def test_bucket_listv2_fetchowner_defaultempty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
objs_list = response['Contents']
eq('Owner' in objs_list[0], False)
@attr('list-objects-v2')
def test_bucket_listv2_fetchowner_empty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, FetchOwner= False)
objs_list = response['Contents']
eq('Owner' in objs_list[0], False)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='unused delimiter is not found')
def test_bucket_list_delimiter_not_exist():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/')
# putting an empty value into Delimiter will not return a value in the response
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='unused delimiter is not found')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_not_exist():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/')
# putting an empty value into Delimiter will not return a value in the response
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='list with delimiter not skip special keys')
def test_bucket_list_delimiter_not_skip_special():
key_names = ['0/'] + ['0/%s' % i for i in range(1000, 1999)]
key_names2 = ['1999', '1999#', '1999+', '2000']
key_names += key_names2
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names2)
eq(prefixes, ['0/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='returns only objects under prefix')
def test_bucket_list_prefix_basic():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='foo/')
eq(response['Prefix'], 'foo/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar', 'foo/baz'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='returns only objects under prefix')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_basic():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='foo/')
eq(response['Prefix'], 'foo/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar', 'foo/baz'])
eq(prefixes, [])
# just testing that we can do the delimeter and prefix logic on non-slashes
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='prefixes w/o delimiters')
def test_bucket_list_prefix_alt():
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='ba')
eq(response['Prefix'], 'ba')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar', 'baz'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='prefixes w/o delimiters')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_alt():
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='ba')
eq(response['Prefix'], 'ba')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar', 'baz'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='empty prefix returns everything')
def test_bucket_list_prefix_empty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='empty prefix returns everything')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_empty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='unspecified prefix returns everything')
def test_bucket_list_prefix_none():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='unspecified prefix returns everything')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_none():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='nonexistent prefix returns nothing')
def test_bucket_list_prefix_not_exist():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='d')
eq(response['Prefix'], 'd')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='nonexistent prefix returns nothing')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_not_exist():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='d')
eq(response['Prefix'], 'd')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='non-printable prefix can be specified')
def test_bucket_list_prefix_unreadable():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='\x0a')
eq(response['Prefix'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='non-printable prefix can be specified')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_unreadable():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='\x0a')
eq(response['Prefix'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='returns only objects directly under prefix')
def test_bucket_list_prefix_delimiter_basic():
key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/', Prefix='foo/')
eq(response['Prefix'], 'foo/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar'])
eq(prefixes, ['foo/baz/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='returns only objects directly under prefix')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_basic():
key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', Prefix='foo/')
eq(response['Prefix'], 'foo/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar'])
eq(prefixes, ['foo/baz/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='non-slash delimiters')
def test_bucket_list_prefix_delimiter_alt():
key_names = ['bar', 'bazar', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='a', Prefix='ba')
eq(response['Prefix'], 'ba')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar'])
eq(prefixes, ['baza'])
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_alt():
key_names = ['bar', 'bazar', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a', Prefix='ba')
eq(response['Prefix'], 'ba')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar'])
eq(prefixes, ['baza'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix')
def test_bucket_list_prefix_delimiter_prefix_not_exist():
key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='d', Prefix='/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_prefix_not_exist():
key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='d', Prefix='/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='over-ridden slash ceases to be a delimiter')
def test_bucket_list_prefix_delimiter_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='z', Prefix='b')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['b/a/c', 'b/a/g', 'b/a/r'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='over-ridden slash ceases to be a delimiter')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='z', Prefix='b')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['b/a/c', 'b/a/g', 'b/a/r'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix and delimiter')
def test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='z', Prefix='y')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix and delimiter')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='z', Prefix='y')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=1, marker')
def test_bucket_list_maxkeys_one():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, MaxKeys=1)
eq(response['IsTruncated'], True)
keys = _get_keys(response)
eq(keys, key_names[0:1])
response = client.list_objects(Bucket=bucket_name, Marker=key_names[0])
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names[1:])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='pagination w/max_keys=1, marker')
@attr('list-objects-v2')
def test_bucket_listv2_maxkeys_one():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
eq(response['IsTruncated'], True)
keys = _get_keys(response)
eq(keys, key_names[0:1])
response = client.list_objects_v2(Bucket=bucket_name, StartAfter=key_names[0])
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names[1:])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=0')
def test_bucket_list_maxkeys_zero():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, MaxKeys=0)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='pagination w/max_keys=0')
@attr('list-objects-v2')
def test_bucket_listv2_maxkeys_zero():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=0)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/o max_keys')
def test_bucket_list_maxkeys_none():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
eq(response['MaxKeys'], 1000)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='pagination w/o max_keys')
@attr('list-objects-v2')
def test_bucket_listv2_maxkeys_none():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
eq(response['MaxKeys'], 1000)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='bucket list unordered')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
def test_bucket_list_unordered():
# boto3.set_stream_logger(name='botocore')
keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex',
'abc/ink', 'abc/jet', 'abc/kin', 'abc/lax', 'abc/mux',
'def/nim', 'def/owl', 'def/pie', 'def/qed', 'def/rye',
'ghi/sew', 'ghi/tor', 'ghi/uke', 'ghi/via', 'ghi/wit',
'xix', 'yak', 'zoo']
bucket_name = _create_objects(keys=keys_in)
client = get_client()
# adds the unordered query parameter
def add_unordered(**kwargs):
kwargs['params']['url'] += "&allow-unordered=true"
client.meta.events.register('before-call.s3.ListObjects', add_unordered)
# test simple retrieval
response = client.list_objects(Bucket=bucket_name, MaxKeys=1000)
unordered_keys_out = _get_keys(response)
eq(len(keys_in), len(unordered_keys_out))
eq(keys_in.sort(), unordered_keys_out.sort())
# test retrieval with prefix
response = client.list_objects(Bucket=bucket_name,
MaxKeys=1000,
Prefix="abc/")
unordered_keys_out = _get_keys(response)
eq(5, len(unordered_keys_out))
# test incremental retrieval with marker
response = client.list_objects(Bucket=bucket_name, MaxKeys=6)
unordered_keys_out = _get_keys(response)
eq(6, len(unordered_keys_out))
# now get the next bunch
response = client.list_objects(Bucket=bucket_name,
MaxKeys=6,
Marker=unordered_keys_out[-1])
unordered_keys_out2 = _get_keys(response)
eq(6, len(unordered_keys_out2))
# make sure there's no overlap between the incremental retrievals
intersect = set(unordered_keys_out).intersection(unordered_keys_out2)
eq(0, len(intersect))
# verify that unordered used with delimiter results in error
e = assert_raises(ClientError,
client.list_objects, Bucket=bucket_name, Delimiter="/")
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='bucket list unordered')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
@attr('list-objects-v2')
def test_bucket_listv2_unordered():
# boto3.set_stream_logger(name='botocore')
keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex',
'abc/ink', 'abc/jet', 'abc/kin', 'abc/lax', 'abc/mux',
'def/nim', 'def/owl', 'def/pie', 'def/qed', 'def/rye',
'ghi/sew', 'ghi/tor', 'ghi/uke', 'ghi/via', 'ghi/wit',
'xix', 'yak', 'zoo']
bucket_name = _create_objects(keys=keys_in)
client = get_client()
# adds the unordered query parameter
def add_unordered(**kwargs):
kwargs['params']['url'] += "&allow-unordered=true"
client.meta.events.register('before-call.s3.ListObjects', add_unordered)
# test simple retrieval
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1000)
unordered_keys_out = _get_keys(response)
eq(len(keys_in), len(unordered_keys_out))
eq(keys_in.sort(), unordered_keys_out.sort())
# test retrieval with prefix
response = client.list_objects_v2(Bucket=bucket_name,
MaxKeys=1000,
Prefix="abc/")
unordered_keys_out = _get_keys(response)
eq(5, len(unordered_keys_out))
# test incremental retrieval with marker
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=6)
unordered_keys_out = _get_keys(response)
eq(6, len(unordered_keys_out))
# now get the next bunch
response = client.list_objects_v2(Bucket=bucket_name,
MaxKeys=6,
StartAfter=unordered_keys_out[-1])
unordered_keys_out2 = _get_keys(response)
eq(6, len(unordered_keys_out2))
# make sure there's no overlap between the incremental retrievals
intersect = set(unordered_keys_out).intersection(unordered_keys_out2)
eq(0, len(intersect))
# verify that unordered used with delimiter results in error
e = assert_raises(ClientError,
client.list_objects, Bucket=bucket_name, Delimiter="/")
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='invalid max_keys')
def test_bucket_list_maxkeys_invalid():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
# adds invalid max keys to url
# before list_objects is called
def add_invalid_maxkeys(**kwargs):
kwargs['params']['url'] += "&max-keys=blah"
client.meta.events.register('before-call.s3.ListObjects', add_invalid_maxkeys)
e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='no pagination, no marker')
def test_bucket_list_marker_none():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
eq(response['Marker'], '')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='no pagination, empty marker')
def test_bucket_list_marker_empty():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='')
eq(response['Marker'], '')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='no pagination, empty continuationtoken')
@attr('list-objects-v2')
def test_bucket_listv2_continuationtoken_empty():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, ContinuationToken='')
eq(response['ContinuationToken'], '')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list keys with list-objects-v2')
@attr(assertion='no pagination, non-empty continuationtoken')
@attr('list-objects-v2')
def test_bucket_listv2_continuationtoken():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response1 = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
next_continuation_token = response1['NextContinuationToken']
response2 = client.list_objects_v2(Bucket=bucket_name, ContinuationToken=next_continuation_token)
eq(response2['ContinuationToken'], next_continuation_token)
eq(response2['IsTruncated'], False)
key_names2 = ['baz', 'foo', 'quxx']
keys = _get_keys(response2)
eq(keys, key_names2)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list keys with list-objects-v2')
@attr(assertion='no pagination, non-empty continuationtoken and startafter')
@attr('list-objects-v2')
def test_bucket_listv2_both_continuationtoken_startafter():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response1 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', MaxKeys=1)
next_continuation_token = response1['NextContinuationToken']
response2 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', ContinuationToken=next_continuation_token)
eq(response2['ContinuationToken'], next_continuation_token)
eq(response2['StartAfter'], 'bar')
eq(response2['IsTruncated'], False)
key_names2 = ['foo', 'quxx']
keys = _get_keys(response2)
eq(keys, key_names2)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='non-printing marker')
def test_bucket_list_marker_unreadable():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='\x0a')
eq(response['Marker'], '\x0a')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='non-printing startafter')
@attr('list-objects-v2')
def test_bucket_listv2_startafter_unreadable():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='\x0a')
eq(response['StartAfter'], '\x0a')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='marker not-in-list')
def test_bucket_list_marker_not_in_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='blah')
eq(response['Marker'], 'blah')
keys = _get_keys(response)
eq(keys, [ 'foo','quxx'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='startafter not-in-list')
@attr('list-objects-v2')
def test_bucket_listv2_startafter_not_in_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='blah')
eq(response['StartAfter'], 'blah')
keys = _get_keys(response)
eq(keys, ['foo', 'quxx'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='marker after list')
def test_bucket_list_marker_after_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='zzz')
eq(response['Marker'], 'zzz')
keys = _get_keys(response)
eq(response['IsTruncated'], False)
eq(keys, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='startafter after list')
@attr('list-objects-v2')
def test_bucket_listv2_startafter_after_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='zzz')
eq(response['StartAfter'], 'zzz')
keys = _get_keys(response)
eq(response['IsTruncated'], False)
eq(keys, [])
def _compare_dates(datetime1, datetime2):
"""
changes ms from datetime1 to 0, compares it to datetime2
"""
# both times are in datetime format but datetime1 has
# microseconds and datetime2 does not
datetime1 = datetime1.replace(microsecond=0)
eq(datetime1, datetime2)
@attr(resource='object')
@attr(method='head')
@attr(operation='compare w/bucket list')
@attr(assertion='return same metadata')
def test_bucket_list_return_data():
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
data = {}
for key_name in key_names:
obj_response = client.head_object(Bucket=bucket_name, Key=key_name)
acl_response = client.get_object_acl(Bucket=bucket_name, Key=key_name)
data.update({
key_name: {
'DisplayName': acl_response['Owner']['DisplayName'],
'ID': acl_response['Owner']['ID'],
'ETag': obj_response['ETag'],
'LastModified': obj_response['LastModified'],
'ContentLength': obj_response['ContentLength'],
}
})
response = client.list_objects(Bucket=bucket_name)
objs_list = response['Contents']
for obj in objs_list:
key_name = obj['Key']
key_data = data[key_name]
eq(obj['ETag'],key_data['ETag'])
eq(obj['Size'],key_data['ContentLength'])
eq(obj['Owner']['DisplayName'],key_data['DisplayName'])
eq(obj['Owner']['ID'],key_data['ID'])
_compare_dates(obj['LastModified'],key_data['LastModified'])
# amazon is eventually consistent, retry a bit if failed
def check_configure_versioning_retry(bucket_name, status, expected_string):
client = get_client()
response = client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled','Status': status})
read_status = None
for i in xrange(5):
try:
response = client.get_bucket_versioning(Bucket=bucket_name)
read_status = response['Status']
except KeyError:
read_status = None
if (expected_string == read_status):
break
time.sleep(1)
eq(expected_string, read_status)
@attr(resource='object')
@attr(method='head')
@attr(operation='compare w/bucket list when bucket versioning is configured')
@attr(assertion='return same metadata')
@attr('versioning')
def test_bucket_list_return_data_versioning():
bucket_name = get_new_bucket()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(bucket_name=bucket_name,keys=key_names)
client = get_client()
data = {}
for key_name in key_names:
obj_response = client.head_object(Bucket=bucket_name, Key=key_name)
acl_response = client.get_object_acl(Bucket=bucket_name, Key=key_name)
data.update({
key_name: {
'ID': acl_response['Owner']['ID'],
'DisplayName': acl_response['Owner']['DisplayName'],
'ETag': obj_response['ETag'],
'LastModified': obj_response['LastModified'],
'ContentLength': obj_response['ContentLength'],
'VersionId': obj_response['VersionId']
}
})
response = client.list_object_versions(Bucket=bucket_name)
objs_list = response['Versions']
for obj in objs_list:
key_name = obj['Key']
key_data = data[key_name]
eq(obj['Owner']['DisplayName'],key_data['DisplayName'])
eq(obj['ETag'],key_data['ETag'])
eq(obj['Size'],key_data['ContentLength'])
eq(obj['Owner']['ID'],key_data['ID'])
eq(obj['VersionId'], key_data['VersionId'])
_compare_dates(obj['LastModified'],key_data['LastModified'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous)')
@attr(assertion='succeeds')
def test_bucket_list_objects_anonymous():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
unauthenticated_client = get_unauthenticated_client()
unauthenticated_client.list_objects(Bucket=bucket_name)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous) with list-objects-v2')
@attr(assertion='succeeds')
@attr('list-objects-v2')
def test_bucket_listv2_objects_anonymous():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
unauthenticated_client = get_unauthenticated_client()
unauthenticated_client.list_objects_v2(Bucket=bucket_name)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous)')
@attr(assertion='fails')
def test_bucket_list_objects_anonymous_fail():
bucket_name = get_new_bucket()
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous) with list-objects-v2')
@attr(assertion='fails')
@attr('list-objects-v2')
def test_bucket_listv2_objects_anonymous_fail():
bucket_name = get_new_bucket()
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.list_objects_v2, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_bucket_notexist():
bucket_name = get_new_bucket_name()
client = get_client()
e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='non-existant bucket with list-objects-v2')
@attr(assertion='fails 404')
@attr('list-objects-v2')
def test_bucketv2_notexist():
bucket_name = get_new_bucket_name()
client = get_client()
e = assert_raises(ClientError, client.list_objects_v2, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_bucket_delete_notexist():
bucket_name = get_new_bucket_name()
client = get_client()
e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='non-empty bucket')
@attr(assertion='fails 409')
def test_bucket_delete_nonempty():
key_names = ['foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'BucketNotEmpty')
def _do_set_bucket_canned_acl(client, bucket_name, canned_acl, i, results):
try:
client.put_bucket_acl(ACL=canned_acl, Bucket=bucket_name)
results[i] = True
except:
results[i] = False
def _do_set_bucket_canned_acl_concurrent(client, bucket_name, canned_acl, num, results):
t = []
for i in range(num):
thr = threading.Thread(target = _do_set_bucket_canned_acl, args=(client, bucket_name, canned_acl, i, results))
thr.start()
t.append(thr)
return t
def _do_wait_completion(t):
for thr in t:
thr.join()
@attr(resource='bucket')
@attr(method='put')
@attr(operation='concurrent set of acls on a bucket')
@attr(assertion='works')
def test_bucket_concurrent_set_canned_acl():
bucket_name = get_new_bucket()
client = get_client()
num_threads = 50 # boto2 retry defaults to 5 so we need a thread to fail at least 5 times
# this seems like a large enough number to get through retry (if bug
# exists)
results = [None] * num_threads
t = _do_set_bucket_canned_acl_concurrent(client, bucket_name, 'public-read', num_threads, results)
_do_wait_completion(t)
for r in results:
eq(r, True)
@attr(resource='object')
@attr(method='put')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_object_write_to_nonexist_bucket():
key_names = ['foo']
bucket_name = 'whatchutalkinboutwillis'
client = get_client()
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='del')
@attr(operation='deleted bucket')
@attr(assertion='fails 404')
def test_bucket_create_delete():
bucket_name = get_new_bucket()
client = get_client()
client.delete_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='read contents that were never written')
@attr(assertion='fails 404')
def test_object_read_notexist():
bucket_name = get_new_bucket()
client = get_client()
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
http_response = None
def get_http_response(**kwargs):
global http_response
http_response = kwargs['http_response'].__dict__
@attr(resource='object')
@attr(method='get')
@attr(operation='read contents that were never written to raise one error response')
@attr(assertion='RequestId appears in the error response')
def test_object_requestid_matches_header_on_error():
bucket_name = get_new_bucket()
client = get_client()
# get http response after failed request
client.meta.events.register('after-call.s3.GetObject', get_http_response)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar')
response_body = http_response['_content']
request_id = re.search(r'<RequestId>(.*)</RequestId>', response_body.encode('utf-8')).group(1)
assert request_id is not None
eq(request_id, e.response['ResponseMetadata']['RequestId'])
def _make_objs_dict(key_names):
objs_list = []
for key in key_names:
obj_dict = {'Key': key}
objs_list.append(obj_dict)
objs_dict = {'Objects': objs_list}
return objs_dict
@attr(resource='object')
@attr(method='post')
@attr(operation='delete multiple objects')
@attr(assertion='deletes multiple objects with a single call')
def test_multi_object_delete():
key_names = ['key0', 'key1', 'key2']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 3)
objs_dict = _make_objs_dict(key_names=key_names)
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects(Bucket=bucket_name)
assert 'Contents' not in response
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects(Bucket=bucket_name)
assert 'Contents' not in response
@attr(resource='object')
@attr(method='post')
@attr(operation='delete multiple objects with list-objects-v2')
@attr(assertion='deletes multiple objects with a single call')
@attr('list-objects-v2')
def test_multi_objectv2_delete():
key_names = ['key0', 'key1', 'key2']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 3)
objs_dict = _make_objs_dict(key_names=key_names)
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects_v2(Bucket=bucket_name)
assert 'Contents' not in response
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects_v2(Bucket=bucket_name)
assert 'Contents' not in response
@attr(resource='object')
@attr(method='put')
@attr(operation='write zero-byte key')
@attr(assertion='correct content length')
def test_object_head_zero_bytes():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='')
response = client.head_object(Bucket=bucket_name, Key='foo')
eq(response['ContentLength'], 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct etag')
def test_object_write_check_etag():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(response['ETag'], '"37b51d194a7513e45b56f6524f2d51f2"')
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct cache control header')
def test_object_write_cache_control():
bucket_name = get_new_bucket()
client = get_client()
cache_control = 'public, max-age=14400'
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', CacheControl=cache_control)
response = client.head_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPHeaders']['cache-control'], cache_control)
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct expires header')
def test_object_write_expires():
bucket_name = get_new_bucket()
client = get_client()
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Expires=expires)
response = client.head_object(Bucket=bucket_name, Key='foo')
_compare_dates(expires, response['Expires'])
def _get_body(response):
body = response['Body']
got = body.read()
return got
@attr(resource='object')
@attr(method='all')
@attr(operation='complete object life cycle')
@attr(assertion='read back what we wrote and rewrote')
def test_object_write_read_update_read_delete():
bucket_name = get_new_bucket()
client = get_client()
# Write
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
# Read
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
# Update
client.put_object(Bucket=bucket_name, Key='foo', Body='soup')
# Read
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'soup')
# Delete
client.delete_object(Bucket=bucket_name, Key='foo')
def _set_get_metadata(metadata, bucket_name=None):
"""
create a new bucket new or use an existing
name to create an object that bucket,
set the meta1 property to a specified, value,
and then re-read and return that property
"""
if bucket_name is None:
bucket_name = get_new_bucket()
client = get_client()
metadata_dict = {'meta1': metadata}
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
response = client.get_object(Bucket=bucket_name, Key='foo')
return response['Metadata']['meta1']
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-read')
@attr(assertion='reread what we wrote')
def test_object_set_get_metadata_none_to_good():
got = _set_get_metadata('mymeta')
eq(got, 'mymeta')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-read')
@attr(assertion='write empty value, returns empty value')
def test_object_set_get_metadata_none_to_empty():
got = _set_get_metadata('')
eq(got, '')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='empty value replaces old')
def test_object_set_get_metadata_overwrite_to_empty():
bucket_name = get_new_bucket()
got = _set_get_metadata('oldmeta', bucket_name)
eq(got, 'oldmeta')
got = _set_get_metadata('', bucket_name)
eq(got, '')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='UTF-8 values passed through')
def test_object_set_get_unicode_metadata():
bucket_name = get_new_bucket()
client = get_client()
def set_unicode_metadata(**kwargs):
kwargs['params']['headers']['x-amz-meta-meta1'] = u"Hello World\xe9"
client.meta.events.register('before-call.s3.PutObject', set_unicode_metadata)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
got = response['Metadata']['meta1'].decode('utf-8')
eq(got, u"Hello World\xe9")
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='non-UTF-8 values detected, but preserved')
@attr('fails_strict_rfc2616')
def test_object_set_get_non_utf8_metadata():
bucket_name = get_new_bucket()
client = get_client()
metadata_dict = {'meta1': '\x04mymeta'}
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
response = client.get_object(Bucket=bucket_name, Key='foo')
got = response['Metadata']['meta1']
eq(got, '=?UTF-8?Q?=04mymeta?=')
def _set_get_metadata_unreadable(metadata, bucket_name=None):
"""
set and then read back a meta-data value (which presumably
includes some interesting characters), and return a list
containing the stored value AND the encoding with which it
was returned.
"""
got = _set_get_metadata(metadata, bucket_name)
got = decode_header(got)
return got
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-priting prefixes noted and preserved')
@attr('fails_strict_rfc2616')
def test_object_set_get_metadata_empty_to_unreadable_prefix():
metadata = '\x04w'
got = _set_get_metadata_unreadable(metadata)
eq(got, [(metadata, 'utf-8')])
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-priting suffixes noted and preserved')
@attr('fails_strict_rfc2616')
def test_object_set_get_metadata_empty_to_unreadable_suffix():
metadata = 'h\x04'
got = _set_get_metadata_unreadable(metadata)
eq(got, [(metadata, 'utf-8')])
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-priting in-fixes noted and preserved')
@attr('fails_strict_rfc2616')
def test_object_set_get_metadata_empty_to_unreadable_infix():
metadata = 'h\x04w'
got = _set_get_metadata_unreadable(metadata)
eq(got, [(metadata, 'utf-8')])
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata re-write')
@attr(assertion='non-priting prefixes noted and preserved')
@attr('fails_strict_rfc2616')
def test_object_set_get_metadata_overwrite_to_unreadable_prefix():
metadata = '\x04w'
got = _set_get_metadata_unreadable(metadata)
eq(got, [(metadata, 'utf-8')])
metadata2 = '\x05w'
got2 = _set_get_metadata_unreadable(metadata2)
eq(got2, [(metadata2, 'utf-8')])
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata re-write')
@attr(assertion='non-priting suffixes noted and preserved')
@attr('fails_strict_rfc2616')
def test_object_set_get_metadata_overwrite_to_unreadable_suffix():
metadata = 'h\x04'
got = _set_get_metadata_unreadable(metadata)
eq(got, [(metadata, 'utf-8')])
metadata2 = 'h\x05'
got2 = _set_get_metadata_unreadable(metadata2)
eq(got2, [(metadata2, 'utf-8')])
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata re-write')
@attr(assertion='non-priting in-fixes noted and preserved')
@attr('fails_strict_rfc2616')
def test_object_set_get_metadata_overwrite_to_unreadable_infix():
metadata = 'h\x04w'
got = _set_get_metadata_unreadable(metadata)
eq(got, [(metadata, 'utf-8')])
metadata2 = 'h\x05w'
got2 = _set_get_metadata_unreadable(metadata2)
eq(got2, [(metadata2, 'utf-8')])
@attr(resource='object')
@attr(method='put')
@attr(operation='data re-write')
@attr(assertion='replaces previous metadata')
def test_object_metadata_replaced_on_put():
bucket_name = get_new_bucket()
client = get_client()
metadata_dict = {'meta1': 'bar'}
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
got = response['Metadata']
eq(got, {})
@attr(resource='object')
@attr(method='put')
@attr(operation='data write from file (w/100-Continue)')
@attr(assertion='succeeds and returns written data')
def test_object_write_file():
bucket_name = get_new_bucket()
client = get_client()
data = StringIO('bar')
client.put_object(Bucket=bucket_name, Key='foo', Body=data)
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
def _get_post_url(bucket_name):
endpoint = get_config_endpoint()
return '{endpoint}/{bucket_name}'.format(endpoint=endpoint, bucket_name=bucket_name)
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_anonymous_request():
bucket_name = get_new_bucket_name()
client = get_client()
url = _get_post_url(bucket_name)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_authenticated_request():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request, no content-type header')
@attr(assertion='succeeds and returns written data')
def test_post_object_authenticated_no_content_type():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key="foo.txt")
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request, bad access key')
@attr(assertion='fails')
def test_post_object_authenticated_request_bad_access_key():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , 'foo'),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds with status 201')
def test_post_object_set_success_code():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("success_action_status" , "201"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 201)
message = ET.fromstring(r.content).find('Key')
eq(message.text,'foo.txt')
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_set_invalid_success_code():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("success_action_status" , "404"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
eq(r.content,'')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_upload_larger_than_chunk():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 5*1024*1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
foo_string = 'foo' * 1024*1024
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', foo_string)])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, foo_string)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_set_key_from_filename():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "${filename}"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('foo.txt', 'bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_ignored_header():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),("x-ignore-foo" , "bar"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_case_insensitive_condition_fields():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bUcKeT": bucket_name},\
["StArTs-WiTh", "$KeY", "foo"],\
{"AcL": "private"},\
["StArTs-WiTh", "$CoNtEnT-TyPe", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
foo_string = 'foo' * 1024*1024
payload = OrderedDict([ ("kEy" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("aCl" , "private"),("signature" , signature),("pOLICy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with escaped leading $ and returns written data')
def test_post_object_escaped_field_values():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='\$foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns redirect url')
def test_post_object_success_redirect_action():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
redirect_url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["eq", "$success_action_redirect", redirect_url],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),("success_action_redirect" , redirect_url),\
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 200)
url = r.url
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
eq(url,
'{rurl}?bucket={bucket}&key={key}&etag=%22{etag}%22'.format(rurl = redirect_url,\
bucket = bucket_name, key = 'foo.txt', etag = response['ETag'].strip('"')))
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid signature error')
def test_post_object_invalid_signature():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())[::-1]
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with access key does not exist error')
def test_post_object_invalid_access_key():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id[::-1]),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid expiration error')
def test_post_object_invalid_date_format():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": str(expires),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with missing key error')
def test_post_object_no_key_specified():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with missing signature error')
def test_post_object_missing_signature():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with extra input fields policy error')
def test_post_object_missing_policy_condition():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds using starts-with restriction on metadata header')
def test_post_object_user_specified_header():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["starts-with", "$x-amz-meta-foo", "bar"]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
eq(response['Metadata']['foo'], 'barclamp')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy condition failed error due to missing field in POST request')
def test_post_object_request_missing_policy_specified_field():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["starts-with", "$x-amz-meta-foo", "bar"]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with conditions must be list error')
def test_post_object_condition_is_case_sensitive():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"CONDITIONS": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with expiration must be string error')
def test_post_object_expires_is_case_sensitive():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"EXPIRATION": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy expired error')
def test_post_object_expired_policy():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=-6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails using equality restriction on metadata header')
def test_post_object_invalid_request_field_value():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["eq", "$x-amz-meta-foo", ""]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy missing expiration error')
def test_post_object_missing_expires_condition():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy missing conditions error')
def test_post_object_missing_conditions_list():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ")}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with allowable upload size exceeded error')
def test_post_object_upload_size_limit_exceeded():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 0],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid content length error')
def test_post_object_missing_content_length_argument():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid JSON error')
def test_post_object_invalid_content_length_argument():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", -1, 0],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with upload size less than minimum allowable error')
def test_post_object_upload_size_below_minimum():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 512, 1000],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='empty conditions return appropriate error response')
def test_post_object_empty_conditions():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{ }\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Match: the latest ETag')
@attr(assertion='succeeds')
def test_get_object_ifmatch_good():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
etag = response['ETag']
response = client.get_object(Bucket=bucket_name, Key='foo', IfMatch=etag)
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Match: bogus ETag')
@attr(assertion='fails 412')
def test_get_object_ifmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfMatch='"ABCORZ"')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-None-Match: the latest ETag')
@attr(assertion='fails 304')
def test_get_object_ifnonematch_good():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
etag = response['ETag']
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfNoneMatch=etag)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 304)
eq(e.response['Error']['Message'], 'Not Modified')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-None-Match: bogus ETag')
@attr(assertion='succeeds')
def test_get_object_ifnonematch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo', IfNoneMatch='ABCORZ')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Modified-Since: before')
@attr(assertion='succeeds')
def test_get_object_ifmodifiedsince_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo', IfModifiedSince='Sat, 29 Oct 1994 19:43:31 GMT')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Modified-Since: after')
@attr(assertion='fails 304')
def test_get_object_ifmodifiedsince_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
last_modified = str(response['LastModified'])
last_modified = last_modified.split('+')[0]
mtime = datetime.datetime.strptime(last_modified, '%Y-%m-%d %H:%M:%S')
after = mtime + datetime.timedelta(seconds=1)
after_str = time.strftime("%a, %d %b %Y %H:%M:%S GMT", after.timetuple())
time.sleep(1)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfModifiedSince=after_str)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 304)
eq(e.response['Error']['Message'], 'Not Modified')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Unmodified-Since: before')
@attr(assertion='fails 412')
def test_get_object_ifunmodifiedsince_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 1994 19:43:31 GMT')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Unmodified-Since: after')
@attr(assertion='succeeds')
def test_get_object_ifunmodifiedsince_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 2100 19:43:31 GMT')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='data re-write w/ If-Match: the latest ETag')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifmatch_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
etag = response['ETag'].replace('"', '')
# pass in custom header 'If-Match' before PutObject call
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': etag}))
client.meta.events.register('before-call.s3.PutObject', lf)
response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'zar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Match: bogus ETag')
@attr(assertion='fails 412')
def test_put_object_ifmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
# pass in custom header 'If-Match' before PutObject call
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '"ABCORZ"'}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-Match: *')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifmatch_overwrite_existed_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'zar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite non-existing object w/ If-Match: *')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifmatch_nonexisted_failed():
bucket_name = get_new_bucket()
client = get_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: outdated ETag')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': 'ABCORZ'}))
client.meta.events.register('before-call.s3.PutObject', lf)
response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'zar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: the latest ETag')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
etag = response['ETag'].replace('"', '')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': etag}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite non-existing object w/ If-None-Match: *')
@attr(assertion='succeeds')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_nonexisted_good():
bucket_name = get_new_bucket()
client = get_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: *')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_overwrite_existed_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
def _setup_bucket_object_acl(bucket_acl, object_acl):
"""
add a foo key, and specified key and bucket acls to
a (new or existing) bucket.
"""
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL=bucket_acl, Bucket=bucket_name)
client.put_object(ACL=object_acl, Bucket=bucket_name, Key='foo')
return bucket_name
def _setup_bucket_acl(bucket_acl=None):
"""
set up a new bucket with specified acl
"""
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL=bucket_acl, Bucket=bucket_name)
return bucket_name
@attr(resource='object')
@attr(method='get')
@attr(operation='publically readable bucket')
@attr(assertion='bucket is readable')
def test_object_raw_get():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='deleted object and bucket')
@attr(assertion='fails 404')
def test_object_raw_get_bucket_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
client.delete_bucket(Bucket=bucket_name)
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='deleted object and bucket')
@attr(assertion='fails 404')
def test_object_delete_key_bucket_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
client.delete_bucket(Bucket=bucket_name)
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.delete_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='deleted object')
@attr(assertion='fails 404')
def test_object_raw_get_object_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
@attr(resource='bucket')
@attr(method='head')
@attr(operation='head bucket')
@attr(assertion='succeeds')
def test_bucket_head():
bucket_name = get_new_bucket()
client = get_client()
response = client.head_bucket(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr('fails_on_aws')
@attr(resource='bucket')
@attr(method='head')
@attr(operation='read bucket extended information')
@attr(assertion='extended information is getting updated')
def test_bucket_head_extended():
bucket_name = get_new_bucket()
client = get_client()
response = client.head_bucket(Bucket=bucket_name)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']), 0)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']), 0)
_create_objects(bucket_name=bucket_name, keys=['foo','bar','baz'])
response = client.head_bucket(Bucket=bucket_name)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']), 3)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']), 9)
@attr(resource='bucket.acl')
@attr(method='get')
@attr(operation='unauthenticated on private bucket')
@attr(assertion='succeeds')
def test_object_raw_get_bucket_acl():
bucket_name = _setup_bucket_object_acl('private', 'public-read')
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object.acl')
@attr(method='get')
@attr(operation='unauthenticated on private object')
@attr(assertion='fails 403')
def test_object_raw_get_object_acl():
bucket_name = _setup_bucket_object_acl('public-read', 'private')
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on public bucket/object')
@attr(assertion='succeeds')
def test_object_raw_authenticated():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on private bucket/private object with modified response headers')
@attr(assertion='succeeds')
def test_object_raw_response_headers():
bucket_name = _setup_bucket_object_acl('private', 'private')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo', ResponseCacheControl='no-cache', ResponseContentDisposition='bla', ResponseContentEncoding='aaa', ResponseContentLanguage='esperanto', ResponseContentType='foo/bar', ResponseExpires='123')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], 'foo/bar')
eq(response['ResponseMetadata']['HTTPHeaders']['content-disposition'], 'bla')
eq(response['ResponseMetadata']['HTTPHeaders']['content-language'], 'esperanto')
eq(response['ResponseMetadata']['HTTPHeaders']['content-encoding'], 'aaa')
eq(response['ResponseMetadata']['HTTPHeaders']['cache-control'], 'no-cache')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on private bucket/public object')
@attr(assertion='succeeds')
def test_object_raw_authenticated_bucket_acl():
bucket_name = _setup_bucket_object_acl('private', 'public-read')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on public bucket/private object')
@attr(assertion='succeeds')
def test_object_raw_authenticated_object_acl():
bucket_name = _setup_bucket_object_acl('public-read', 'private')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on deleted object and bucket')
@attr(assertion='fails 404')
def test_object_raw_authenticated_bucket_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
client.delete_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on deleted object')
@attr(assertion='fails 404')
def test_object_raw_authenticated_object_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
@attr(resource='object')
@attr(method='get')
@attr(operation='x-amz-expires check not expired')
@attr(assertion='succeeds')
def test_object_raw_get_x_amz_expires_not_expired():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=100000, HttpMethod='GET')
res = requests.get(url).__dict__
eq(res['status_code'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='check x-amz-expires value out of range zero')
@attr(assertion='fails 403')
def test_object_raw_get_x_amz_expires_out_range_zero():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=0, HttpMethod='GET')
res = requests.get(url).__dict__
eq(res['status_code'], 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='check x-amz-expires value out of max range')
@attr(assertion='fails 403')
def test_object_raw_get_x_amz_expires_out_max_range():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=609901, HttpMethod='GET')
res = requests.get(url).__dict__
eq(res['status_code'], 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='check x-amz-expires value out of positive range')
@attr(assertion='succeeds')
def test_object_raw_get_x_amz_expires_out_positive_range():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=-7, HttpMethod='GET')
res = requests.get(url).__dict__
eq(res['status_code'], 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='unauthenticated, no object acls')
@attr(assertion='fails 403')
def test_object_anon_put():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo')
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.put_object, Bucket=bucket_name, Key='foo', Body='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='object')
@attr(method='put')
@attr(operation='unauthenticated, publically writable object')
@attr(assertion='succeeds')
def test_object_anon_put_write_access():
bucket_name = _setup_bucket_acl('public-read-write')
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo')
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.put_object(Bucket=bucket_name, Key='foo', Body='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='put')
@attr(operation='authenticated, no object acls')
@attr(assertion='succeeds')
def test_object_put_authenticated():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='put')
@attr(operation='authenticated, no object acls')
@attr(assertion='succeeds')
def test_object_raw_put_authenticated_expired():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo')
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='put_object', Params=params, ExpiresIn=-1000, HttpMethod='PUT')
# params wouldn't take a 'Body' parameter so we're passing it in here
res = requests.put(url,data="foo").__dict__
eq(res['status_code'], 403)
def check_bad_bucket_name(bucket_name):
"""
Attempt to create a bucket with a specified name, and confirm
that the request fails because of an invalid bucket name.
"""
client = get_client()
e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# AWS does not enforce all documented bucket restrictions.
# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
@attr('fails_on_aws')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='name begins with underscore')
@attr(assertion='fails with subdomain: 400')
def test_bucket_create_naming_bad_starts_nonalpha():
bucket_name = get_new_bucket_name()
check_bad_bucket_name('_' + bucket_name)
def check_invalid_bucketname(invalid_name):
"""
Send a create bucket_request with an invalid bucket name
that will bypass the ParamValidationError that would be raised
if the invalid bucket name that was passed in normally.
This function returns the status and error code from the failure
"""
client = get_client()
valid_bucket_name = get_new_bucket_name()
def replace_bucketname_from_url(**kwargs):
url = kwargs['params']['url']
new_url = url.replace(valid_bucket_name, invalid_name)
kwargs['params']['url'] = new_url
client.meta.events.register('before-call.s3.CreateBucket', replace_bucketname_from_url)
e = assert_raises(ClientError, client.create_bucket, Bucket=invalid_name)
status, error_code = _get_status_and_error_code(e.response)
return (status, error_code)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='empty name')
@attr(assertion='fails 405')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_create_naming_bad_short_empty():
invalid_bucketname = ''
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 405)
eq(error_code, 'MethodNotAllowed')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='short (one character) name')
@attr(assertion='fails 400')
def test_bucket_create_naming_bad_short_one():
check_bad_bucket_name('a')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='short (two character) name')
@attr(assertion='fails 400')
def test_bucket_create_naming_bad_short_two():
check_bad_bucket_name('aa')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='excessively long names')
@attr(assertion='fails with subdomain: 400')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_create_naming_bad_long():
invalid_bucketname = 256*'a'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
invalid_bucketname = 280*'a'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
invalid_bucketname = 3000*'a'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
def check_good_bucket_name(name, _prefix=None):
"""
Attempt to create a bucket with a specified name
and (specified or default) prefix, returning the
results of that effort.
"""
# tests using this with the default prefix must *not* rely on
# being able to set the initial character, or exceed the max len
# tests using this with a custom prefix are responsible for doing
# their own setup/teardown nukes, with their custom prefix; this
# should be very rare
if _prefix is None:
_prefix = get_prefix()
bucket_name = '{prefix}{name}'.format(
prefix=_prefix,
name=name,
)
client = get_client()
response = client.create_bucket(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
def _test_bucket_create_naming_good_long(length):
"""
Attempt to create a bucket whose name (including the
prefix) is of a specified length.
"""
# tests using this with the default prefix must *not* rely on
# being able to set the initial character, or exceed the max len
# tests using this with a custom prefix are responsible for doing
# their own setup/teardown nukes, with their custom prefix; this
# should be very rare
prefix = get_new_bucket_name()
assert len(prefix) < 63
num = length - len(prefix)
name=num*'a'
bucket_name = '{prefix}{name}'.format(
prefix=prefix,
name=name,
)
client = get_client()
response = client.create_bucket(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/60 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_good_long_60():
_test_bucket_create_naming_good_long(60)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/61 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_good_long_61():
_test_bucket_create_naming_good_long(61)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/62 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_good_long_62():
_test_bucket_create_naming_good_long(62)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/63 byte name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_good_long_63():
_test_bucket_create_naming_good_long(63)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list w/61 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_list_long_name():
prefix = get_new_bucket_name()
length = 61
num = length - len(prefix)
name=num*'a'
bucket_name = '{prefix}{name}'.format(
prefix=prefix,
name=name,
)
bucket = get_new_bucket_resource(name=bucket_name)
is_empty = _bucket_is_empty(bucket)
eq(is_empty, True)
# AWS does not enforce all documented bucket restrictions.
# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
@attr('fails_on_aws')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/ip address for name')
@attr(assertion='fails on aws')
def test_bucket_create_naming_bad_ip():
check_bad_bucket_name('192.168.5.123')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/! in name')
@attr(assertion='fails with subdomain')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_create_naming_bad_punctuation():
# characters other than [a-zA-Z0-9._-]
invalid_bucketname = 'alpha!soup'
status, error_code = check_invalid_bucketname(invalid_bucketname)
# TODO: figure out why a 403 is coming out in boto3 but not in boto2.
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# test_bucket_create_naming_dns_* are valid but not recommended
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/underscore in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_underscore():
invalid_bucketname = 'foo_bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/100 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
def test_bucket_create_naming_dns_long():
prefix = get_prefix()
assert len(prefix) < 50
num = 63 - len(prefix)
check_good_bucket_name(num * 'a')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/dash at end of name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dash_at_end():
invalid_bucketname = 'foo-'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/.. in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dot_dot():
invalid_bucketname = 'foo..bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/.- in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dot_dash():
invalid_bucketname = 'foo.-bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/-. in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dash_dot():
invalid_bucketname = 'foo-.bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create')
def test_bucket_create_exists():
# aws-s3 default region allows recreation of buckets
# but all other regions fail with BucketAlreadyOwnedByYou.
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
try:
response = client.create_bucket(Bucket=bucket_name)
except ClientError, e:
status, error_code = _get_status_and_error_code(e.response)
eq(e.status, 409)
eq(e.error_code, 'BucketAlreadyOwnedByYou')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get location')
def test_bucket_get_location():
location_constraint = get_main_api_name()
if not location_constraint:
raise SkipTest
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': location_constraint})
response = client.get_bucket_location(Bucket=bucket_name)
if location_constraint == "":
location_constraint = None
eq(response['LocationConstraint'], location_constraint)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create by non-owner')
@attr(assertion='fails 409')
def test_bucket_create_exists_nonowner():
# Names are shared across a global namespace. As such, no two
# users can create a bucket with that same name.
bucket_name = get_new_bucket_name()
client = get_client()
alt_client = get_alt_client()
client.create_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, alt_client.create_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'BucketAlreadyExists')
def check_access_denied(fn, *args, **kwargs):
e = assert_raises(ClientError, fn, *args, **kwargs)
status = _get_status(e.response)
eq(status, 403)
def check_grants(got, want):
"""
Check that grants list in got matches the dictionaries in want,
in any order.
"""
eq(len(got), len(want))
for g, w in zip(got, want):
w = dict(w)
g = dict(g)
eq(g.pop('Permission', None), w['Permission'])
eq(g['Grantee'].pop('DisplayName', None), w['DisplayName'])
eq(g['Grantee'].pop('ID', None), w['ID'])
eq(g['Grantee'].pop('Type', None), w['Type'])
eq(g['Grantee'].pop('URI', None), w['URI'])
eq(g['Grantee'].pop('EmailAddress', None), w['EmailAddress'])
eq(g, {'Grantee': {}})
@attr(resource='bucket')
@attr(method='get')
@attr(operation='default acl')
@attr(assertion='read back expected defaults')
def test_bucket_acl_default():
bucket_name = get_new_bucket()
client = get_client()
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
eq(response['Owner']['DisplayName'], display_name)
eq(response['Owner']['ID'], user_id)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='public-read acl')
@attr(assertion='read back expected defaults')
@attr('fails_on_aws') # <Error><Code>IllegalLocationConstraintException</Code><Message>The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.</Message>
def test_bucket_acl_canned_during_create():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='acl: public-read,private')
@attr(assertion='read back expected values')
def test_bucket_acl_canned():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
client.put_bucket_acl(ACL='private', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket.acls')
@attr(method='put')
@attr(operation='acl: public-read-write')
@attr(assertion='read back expected values')
def test_bucket_acl_canned_publicreadwrite():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='WRITE',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='acl: authenticated-read')
@attr(assertion='read back expected values')
def test_bucket_acl_canned_authenticatedread():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='authenticated-read', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='get')
@attr(operation='default acl')
@attr(assertion='read back expected defaults')
def test_object_acl_default():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl public-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_during_create():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(ACL='public-read', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl public-read,private')
@attr(assertion='read back expected values')
def test_object_acl_canned():
bucket_name = get_new_bucket()
client = get_client()
# Since it defaults to private, set it public-read first
client.put_object(ACL='public-read', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
# Then back to private.
client.put_object_acl(ACL='private',Bucket=bucket_name, Key='foo')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object')
@attr(method='put')
@attr(operation='acl public-read-write')
@attr(assertion='read back expected values')
def test_object_acl_canned_publicreadwrite():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(ACL='public-read-write', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='WRITE',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl authenticated-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_authenticatedread():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(ACL='authenticated-read', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl bucket-owner-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_bucketownerread():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
bucket_acl_response = main_client.get_bucket_acl(Bucket=bucket_name)
bucket_owner_id = bucket_acl_response['Grants'][2]['Grantee']['ID']
bucket_owner_display_name = bucket_acl_response['Grants'][2]['Grantee']['DisplayName']
alt_client.put_object(ACL='bucket-owner-read', Bucket=bucket_name, Key='foo')
response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
alt_display_name = get_alt_display_name()
alt_user_id = get_alt_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='READ',
ID=bucket_owner_id,
DisplayName=bucket_owner_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl bucket-owner-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_bucketownerfullcontrol():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
bucket_acl_response = main_client.get_bucket_acl(Bucket=bucket_name)
bucket_owner_id = bucket_acl_response['Grants'][2]['Grantee']['ID']
bucket_owner_display_name = bucket_acl_response['Grants'][2]['Grantee']['DisplayName']
alt_client.put_object(ACL='bucket-owner-full-control', Bucket=bucket_name, Key='foo')
response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
alt_display_name = get_alt_display_name()
alt_user_id = get_alt_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=bucket_owner_id,
DisplayName=bucket_owner_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='set write-acp')
@attr(assertion='does not modify owner')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_object_acl_full_control_verify_owner():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
main_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
grant = { 'Grants': [{'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}], 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant)
grant = { 'Grants': [{'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'READ_ACP'}], 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
alt_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant)
response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
eq(response['Owner']['ID'], main_user_id)
def add_obj_user_grant(bucket_name, key, grant):
"""
Adds a grant to the existing grants meant to be passed into
the AccessControlPolicy argument of put_object_acls for an object
owned by the main user, not the alt user
A grant is a dictionary in the form of:
{u'Grantee': {u'Type': 'type', u'DisplayName': 'name', u'ID': 'id'}, u'Permission': 'PERM'}
"""
client = get_client()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
response = client.get_object_acl(Bucket=bucket_name, Key=key)
grants = response['Grants']
grants.append(grant)
grant = {'Grants': grants, 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
return grant
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='set write-acp')
@attr(assertion='does not modify other attributes')
def test_object_acl_full_control_verify_attributes():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
header = {'x-amz-foo': 'bar'}
# lambda to add any header
add_header = (lambda **kwargs: kwargs['params']['headers'].update(header))
main_client.meta.events.register('before-call.s3.PutObject', add_header)
main_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = main_client.get_object(Bucket=bucket_name, Key='foo')
content_type = response['ContentType']
etag = response['ETag']
alt_user_id = get_alt_user_id()
grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
grants = add_obj_user_grant(bucket_name, 'foo', grant)
main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grants)
response = main_client.get_object(Bucket=bucket_name, Key='foo')
eq(content_type, response['ContentType'])
eq(etag, response['ETag'])
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl private')
@attr(assertion='a private object can be set to private')
def test_bucket_acl_canned_private_to_private():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_bucket_acl(Bucket=bucket_name, ACL='private')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
def add_bucket_user_grant(bucket_name, grant):
"""
Adds a grant to the existing grants meant to be passed into
the AccessControlPolicy argument of put_object_acls for an object
owned by the main user, not the alt user
A grant is a dictionary in the form of:
{u'Grantee': {u'Type': 'type', u'DisplayName': 'name', u'ID': 'id'}, u'Permission': 'PERM'}
"""
client = get_client()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
grants.append(grant)
grant = {'Grants': grants, 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
return grant
def _check_object_acl(permission):
"""
Sets the permission on an object then checks to see
if it was set
"""
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
policy = {}
policy['Owner'] = response['Owner']
policy['Grants'] = response['Grants']
policy['Grants'][0]['Permission'] = permission
client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=policy)
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
grants = response['Grants']
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
check_grants(
grants,
[
dict(
Permission=permission,
ID=main_user_id,
DisplayName=main_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl FULL_CONTRO')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl():
_check_object_acl('FULL_CONTROL')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl WRITE')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_write():
_check_object_acl('WRITE')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl WRITE_ACP')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_writeacp():
_check_object_acl('WRITE_ACP')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl READ')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_read():
_check_object_acl('READ')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl READ_ACP')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_readacp():
_check_object_acl('READ_ACP')
def _bucket_acl_grant_userid(permission):
"""
create a new bucket, grant a specific user the specified
permission, read back the acl and verify correct setting
"""
bucket_name = get_new_bucket()
client = get_client()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': permission}
grant = add_bucket_user_grant(bucket_name, grant)
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=grant)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission=permission,
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=main_user_id,
DisplayName=main_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
return bucket_name
def _check_bucket_acl_grant_can_read(bucket_name):
"""
verify ability to read the specified bucket
"""
alt_client = get_alt_client()
response = alt_client.head_bucket(Bucket=bucket_name)
def _check_bucket_acl_grant_cant_read(bucket_name):
"""
verify inability to read the specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.head_bucket, Bucket=bucket_name)
def _check_bucket_acl_grant_can_readacp(bucket_name):
"""
verify ability to read acls on specified bucket
"""
alt_client = get_alt_client()
alt_client.get_bucket_acl(Bucket=bucket_name)
def _check_bucket_acl_grant_cant_readacp(bucket_name):
"""
verify inability to read acls on specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.get_bucket_acl, Bucket=bucket_name)
def _check_bucket_acl_grant_can_write(bucket_name):
"""
verify ability to write the specified bucket
"""
alt_client = get_alt_client()
alt_client.put_object(Bucket=bucket_name, Key='foo-write', Body='bar')
def _check_bucket_acl_grant_cant_write(bucket_name):
"""
verify inability to write the specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key='foo-write', Body='bar')
def _check_bucket_acl_grant_can_writeacp(bucket_name):
"""
verify ability to set acls on the specified bucket
"""
alt_client = get_alt_client()
alt_client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
def _check_bucket_acl_grant_cant_writeacp(bucket_name):
"""
verify inability to set acls on the specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.put_bucket_acl,Bucket=bucket_name, ACL='public-read')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid FULL_CONTROL')
@attr(assertion='can read/write data/acls')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_bucket_acl_grant_userid_fullcontrol():
bucket_name = _bucket_acl_grant_userid('FULL_CONTROL')
# alt user can read
_check_bucket_acl_grant_can_read(bucket_name)
# can read acl
_check_bucket_acl_grant_can_readacp(bucket_name)
# can write
_check_bucket_acl_grant_can_write(bucket_name)
# can write acl
_check_bucket_acl_grant_can_writeacp(bucket_name)
client = get_client()
bucket_acl_response = client.get_bucket_acl(Bucket=bucket_name)
owner_id = bucket_acl_response['Owner']['ID']
owner_display_name = bucket_acl_response['Owner']['DisplayName']
main_display_name = get_main_display_name()
main_user_id = get_main_user_id()
eq(owner_id, main_user_id)
eq(owner_display_name, main_display_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid READ')
@attr(assertion='can read data, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_read():
bucket_name = _bucket_acl_grant_userid('READ')
# alt user can read
_check_bucket_acl_grant_can_read(bucket_name)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket_name)
# can't write
_check_bucket_acl_grant_cant_write(bucket_name)
# can't write acl
_check_bucket_acl_grant_cant_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid READ_ACP')
@attr(assertion='can read acl, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_readacp():
bucket_name = _bucket_acl_grant_userid('READ_ACP')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket_name)
# can read acl
_check_bucket_acl_grant_can_readacp(bucket_name)
# can't write
_check_bucket_acl_grant_cant_write(bucket_name)
# can't write acp
#_check_bucket_acl_grant_cant_writeacp_can_readacp(bucket)
_check_bucket_acl_grant_cant_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid WRITE')
@attr(assertion='can write data, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_write():
bucket_name = _bucket_acl_grant_userid('WRITE')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket_name)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket_name)
# can write
_check_bucket_acl_grant_can_write(bucket_name)
# can't write acl
_check_bucket_acl_grant_cant_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid WRITE_ACP')
@attr(assertion='can write acls, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_writeacp():
bucket_name = _bucket_acl_grant_userid('WRITE_ACP')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket_name)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket_name)
# can't write
_check_bucket_acl_grant_cant_write(bucket_name)
# can write acl
_check_bucket_acl_grant_can_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/invalid userid')
@attr(assertion='fails 400')
def test_bucket_acl_grant_nonexist_user():
bucket_name = get_new_bucket()
client = get_client()
bad_user_id = '_foo'
#response = client.get_bucket_acl(Bucket=bucket_name)
grant = {'Grantee': {'ID': bad_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
grant = add_bucket_user_grant(bucket_name, grant)
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy=grant)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='revoke all ACLs')
@attr(assertion='can: read obj, get/set bucket acl, cannot write objs')
def test_bucket_acl_no_grants():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_bucket_acl(Bucket=bucket_name)
old_grants = response['Grants']
policy = {}
policy['Owner'] = response['Owner']
# clear grants
policy['Grants'] = []
# remove read/write permission
response = client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
# can read
client.get_object(Bucket=bucket_name, Key='foo')
# can't write
check_access_denied(client.put_object, Bucket=bucket_name, Key='baz', Body='a')
#TODO fix this test once a fix is in for same issues in
# test_access_bucket_private_object_private
client2 = get_client()
# owner can read acl
client2.get_bucket_acl(Bucket=bucket_name)
# owner can write acl
client2.put_bucket_acl(Bucket=bucket_name, ACL='private')
# set policy back to original so that bucket can be cleaned up
policy['Grants'] = old_grants
client2.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
def _get_acl_header(user_id=None, perms=None):
all_headers = ["read", "write", "read-acp", "write-acp", "full-control"]
headers = []
if user_id == None:
user_id = get_alt_user_id()
if perms != None:
for perm in perms:
header = ("x-amz-grant-{perm}".format(perm=perm), "id={uid}".format(uid=user_id))
headers.append(header)
else:
for perm in all_headers:
header = ("x-amz-grant-{perm}".format(perm=perm), "id={uid}".format(uid=user_id))
headers.append(header)
return headers
@attr(resource='object')
@attr(method='PUT')
@attr(operation='add all grants to user through headers')
@attr(assertion='adds all grants individually to second user')
@attr('fails_on_dho')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_object_header_acl_grants():
bucket_name = get_new_bucket()
client = get_client()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
headers = _get_acl_header()
def add_headers_before_sign(**kwargs):
updated_headers = (kwargs['request'].__dict__['headers'].__dict__['_headers'] + headers)
kwargs['request'].__dict__['headers'].__dict__['_headers'] = updated_headers
client.meta.events.register('before-sign.s3.PutObject', add_headers_before_sign)
client.put_object(Bucket=bucket_name, Key='foo_key', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo_key')
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='READ_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='PUT')
@attr(operation='add all grants to user through headers')
@attr(assertion='adds all grants individually to second user')
@attr('fails_on_dho')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_header_acl_grants():
headers = _get_acl_header()
bucket_name = get_new_bucket_name()
client = get_client()
headers = _get_acl_header()
def add_headers_before_sign(**kwargs):
updated_headers = (kwargs['request'].__dict__['headers'].__dict__['_headers'] + headers)
kwargs['request'].__dict__['headers'].__dict__['_headers'] = updated_headers
client.meta.events.register('before-sign.s3.CreateBucket', add_headers_before_sign)
client.create_bucket(Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
check_grants(
grants,
[
dict(
Permission='READ',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='READ_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
alt_client = get_alt_client()
alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
# set bucket acl to public-read-write so that teardown can work
alt_client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
# This test will fail on DH Objects. DHO allows multiple users with one account, which
# would violate the uniqueness requirement of a user's email. As such, DHO users are
# created without an email.
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='add second FULL_CONTROL user')
@attr(assertion='works for S3, fails for DHO')
@attr('fails_on_aws') # <Error><Code>AmbiguousGrantByEmailAddress</Code><Message>The e-mail address you provided is associated with more than one account. Please retry your request using a different identification method or after resolving the ambiguity.</Message>
def test_bucket_acl_grant_email():
bucket_name = get_new_bucket()
client = get_client()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
alt_email_address = get_alt_email()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
grant = {'Grantee': {'EmailAddress': alt_email_address, 'Type': 'AmazonCustomerByEmail' }, 'Permission': 'FULL_CONTROL'}
grant = add_bucket_user_grant(bucket_name, grant)
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy = grant)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=main_user_id,
DisplayName=main_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
]
)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='add acl for nonexistent user')
@attr(assertion='fail 400')
def test_bucket_acl_grant_email_notexist():
# behavior not documented by amazon
bucket_name = get_new_bucket()
client = get_client()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
alt_email_address = get_alt_email()
NONEXISTENT_EMAIL = 'doesnotexist@dreamhost.com.invalid'
grant = {'Grantee': {'EmailAddress': NONEXISTENT_EMAIL, 'Type': 'AmazonCustomerByEmail'}, 'Permission': 'FULL_CONTROL'}
grant = add_bucket_user_grant(bucket_name, grant)
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy = grant)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'UnresolvableGrantByEmailAddress')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='revoke all ACLs')
@attr(assertion='acls read back as empty')
def test_bucket_acl_revoke_all():
# revoke all access, including the owner's access
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_bucket_acl(Bucket=bucket_name)
old_grants = response['Grants']
policy = {}
policy['Owner'] = response['Owner']
# clear grants
policy['Grants'] = []
# remove read/write permission for everyone
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
response = client.get_bucket_acl(Bucket=bucket_name)
eq(len(response['Grants']), 0)
# set policy back to original so that bucket can be cleaned up
policy['Grants'] = old_grants
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
# TODO rgw log_bucket.set_as_logging_target() gives 403 Forbidden
# http://tracker.newdream.net/issues/984
@attr(resource='bucket.log')
@attr(method='put')
@attr(operation='set/enable/disable logging target')
@attr(assertion='operations succeed')
@attr('fails_on_rgw')
def test_logging_toggle():
bucket_name = get_new_bucket()
client = get_client()
main_display_name = get_main_display_name()
main_user_id = get_main_user_id()
status = {'LoggingEnabled': {'TargetBucket': bucket_name, 'TargetGrants': [{'Grantee': {'DisplayName': main_display_name, 'ID': main_user_id,'Type': 'CanonicalUser'},'Permission': 'FULL_CONTROL'}], 'TargetPrefix': 'foologgingprefix'}}
client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
client.get_bucket_logging(Bucket=bucket_name)
status = {'LoggingEnabled': {}}
client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
# NOTE: this does not actually test whether or not logging works
def _setup_access(bucket_acl, object_acl):
"""
Simple test fixture: create a bucket with given ACL, with objects:
- a: owning user, given ACL
- a2: same object accessed by some other user
- b: owning user, default ACL in bucket w/given ACL
- b2: same object accessed by a some other user
"""
bucket_name = get_new_bucket()
client = get_client()
key1 = 'foo'
key2 = 'bar'
newkey = 'new'
client.put_bucket_acl(Bucket=bucket_name, ACL=bucket_acl)
client.put_object(Bucket=bucket_name, Key=key1, Body='foocontent')
client.put_object_acl(Bucket=bucket_name, Key=key1, ACL=object_acl)
client.put_object(Bucket=bucket_name, Key=key2, Body='barcontent')
return bucket_name, key1, key2, newkey
def get_bucket_key_names(bucket_name):
objs_list = get_objects_list(bucket_name)
return frozenset(obj for obj in objs_list)
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/private')
@attr(assertion='public has no access to bucket or objects')
def test_access_bucket_private_object_private():
# all the test_access_* tests follow this template
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private')
alt_client = get_alt_client()
# acled object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
# default object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
# bucket read fail
check_access_denied(alt_client.list_objects, Bucket=bucket_name)
# acled object write fail
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
# NOTE: The above put's causes the connection to go bad, therefore the client can't be used
# anymore. This can be solved either by:
# 1) putting an empty string ('') in the 'Body' field of those put_object calls
# 2) getting a new client hence the creation of alt_client{2,3} for the tests below
# TODO: Test it from another host and on AWS, Report this to Amazon, if findings are identical
alt_client2 = get_alt_client()
# default object write fail
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
# bucket write fail
alt_client3 = get_alt_client()
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/private with list-objects-v2')
@attr(assertion='public has no access to bucket or objects')
@attr('list-objects-v2')
def test_access_bucket_private_objectv2_private():
# all the test_access_* tests follow this template
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private')
alt_client = get_alt_client()
# acled object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
# default object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
# bucket read fail
check_access_denied(alt_client.list_objects_v2, Bucket=bucket_name)
# acled object write fail
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
# NOTE: The above put's causes the connection to go bad, therefore the client can't be used
# anymore. This can be solved either by:
# 1) putting an empty string ('') in the 'Body' field of those put_object calls
# 2) getting a new client hence the creation of alt_client{2,3} for the tests below
# TODO: Test it from another host and on AWS, Report this to Amazon, if findings are identical
alt_client2 = get_alt_client()
# default object write fail
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
# bucket write fail
alt_client3 = get_alt_client()
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read')
@attr(assertion='public can only read readable object')
def test_access_bucket_private_object_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read, b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read with list-objects-v2')
@attr(assertion='public can only read readable object')
@attr('list-objects-v2')
def test_access_bucket_private_objectv2_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read, b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read/write')
@attr(assertion='public can only read the readable object')
def test_access_bucket_private_object_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-only ... because it is in a private bucket
# b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read/write with list-objects-v2')
@attr(assertion='public can only read the readable object')
@attr('list-objects-v2')
def test_access_bucket_private_objectv2_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-only ... because it is in a private bucket
# b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/private')
@attr(assertion='public can only list the bucket')
def test_access_bucket_publicread_object_private():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='private')
alt_client = get_alt_client()
# a should be private, b gets default (private)
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
objs = get_objects_list(bucket=bucket_name, client=alt_client3)
eq(objs, [u'bar', u'foo'])
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/public-read')
@attr(assertion='public can read readable objects and list bucket')
def test_access_bucket_publicread_object_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
# a should be public-read, b gets default (private)
body = _get_body(response)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
objs = get_objects_list(bucket=bucket_name, client=alt_client3)
eq(objs, [u'bar', u'foo'])
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/public-read-write')
@attr(assertion='public can read readable objects and list bucket')
def test_access_bucket_publicread_object_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-only ... because it is in a r/o bucket
# b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
objs = get_objects_list(bucket=bucket_name, client=alt_client3)
eq(objs, [u'bar', u'foo'])
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/private')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_private():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='private')
alt_client = get_alt_client()
# a should be private, b gets default (private)
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent')
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
objs = get_objects_list(bucket=bucket_name, client=alt_client)
eq(objs, [u'bar', u'foo'])
alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/public-read')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read')
alt_client = get_alt_client()
# a should be public-read, b gets default (private)
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
eq(body, 'foocontent')
alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent')
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
objs = get_objects_list(bucket=bucket_name, client=alt_client)
eq(objs, [u'bar', u'foo'])
alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/public-read-write')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-write, b gets default (private)
eq(body, 'foocontent')
alt_client.put_object(Bucket=bucket_name, Key=key1, Body='foooverwrite')
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
objs = get_objects_list(bucket=bucket_name, client=alt_client)
eq(objs, [u'bar', u'foo'])
alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets')
@attr(assertion='returns all expected buckets')
def test_buckets_create_then_list():
client = get_client()
bucket_names = []
for i in xrange(5):
bucket_name = get_new_bucket_name()
bucket_names.append(bucket_name)
for name in bucket_names:
client.create_bucket(Bucket=name)
response = client.list_buckets()
bucket_dicts = response['Buckets']
buckets_list = []
buckets_list = get_buckets_list()
for name in bucket_names:
if name not in buckets_list:
raise RuntimeError("S3 implementation's GET on Service did not return bucket we created: %r", bucket.name)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (anonymous)')
@attr(assertion='succeeds')
@attr('fails_on_aws')
def test_list_buckets_anonymous():
# Get a connection with bad authorization, then change it to be our new Anonymous auth mechanism,
# emulating standard HTTP access.
#
# While it may have been possible to use httplib directly, doing it this way takes care of also
# allowing us to vary the calling format in testing.
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.list_buckets()
eq(len(response['Buckets']), 0)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (bad auth)')
@attr(assertion='fails 403')
def test_list_buckets_invalid_auth():
bad_auth_client = get_bad_auth_client()
e = assert_raises(ClientError, bad_auth_client.list_buckets)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'InvalidAccessKeyId')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (bad auth)')
@attr(assertion='fails 403')
def test_list_buckets_bad_auth():
main_access_key = get_main_aws_access_key()
bad_auth_client = get_bad_auth_client(aws_access_key_id=main_access_key)
e = assert_raises(ClientError, bad_auth_client.list_buckets)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'SignatureDoesNotMatch')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name starts with alphabetic works')
# this test goes outside the user-configure prefix because it needs to
# control the initial character of the bucket name
@nose.with_setup(
setup=lambda: nuke_prefixed_buckets(prefix='a'+get_prefix()),
teardown=lambda: nuke_prefixed_buckets(prefix='a'+get_prefix()),
)
def test_bucket_create_naming_good_starts_alpha():
check_good_bucket_name('foo', _prefix='a'+get_prefix())
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name starts with numeric works')
# this test goes outside the user-configure prefix because it needs to
# control the initial character of the bucket name
@nose.with_setup(
setup=lambda: nuke_prefixed_buckets(prefix='0'+get_prefix()),
teardown=lambda: nuke_prefixed_buckets(prefix='0'+get_prefix()),
)
def test_bucket_create_naming_good_starts_digit():
check_good_bucket_name('foo', _prefix='0'+get_prefix())
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name containing dot works')
def test_bucket_create_naming_good_contains_period():
check_good_bucket_name('aaa.111')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name containing hyphen works')
def test_bucket_create_naming_good_contains_hyphen():
check_good_bucket_name('aaa-111')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket with objects and recreate it')
@attr(assertion='bucket recreation not overriding index')
def test_bucket_recreate_not_overriding():
key_names = ['mykey1', 'mykey2']
bucket_name = _create_objects(keys=key_names)
objs_list = get_objects_list(bucket_name)
eq(key_names, objs_list)
client = get_client()
client.create_bucket(Bucket=bucket_name)
objs_list = get_objects_list(bucket_name)
eq(key_names, objs_list)
@attr(resource='object')
@attr(method='put')
@attr(operation='create and list objects with special names')
@attr(assertion='special names work')
def test_bucket_create_special_key_names():
key_names = [
' ',
'"',
'$',
'%',
'&',
'\'',
'<',
'>',
'_',
'_ ',
'_ _',
'__',
]
bucket_name = _create_objects(keys=key_names)
objs_list = get_objects_list(bucket_name)
eq(key_names, objs_list)
client = get_client()
for name in key_names:
eq((name in objs_list), True)
response = client.get_object(Bucket=bucket_name, Key=name)
body = _get_body(response)
eq(name, body)
client.put_object_acl(Bucket=bucket_name, Key=name, ACL='private')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='create and list objects with underscore as prefix, list using prefix')
@attr(assertion='listing works correctly')
def test_bucket_list_special_prefix():
key_names = ['_bla/1', '_bla/2', '_bla/3', '_bla/4', 'abcd']
bucket_name = _create_objects(keys=key_names)
objs_list = get_objects_list(bucket_name)
eq(len(objs_list), 5)
objs_list = get_objects_list(bucket_name, prefix='_bla/')
eq(len(objs_list), 4)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy zero sized object in same bucket')
@attr(assertion='works')
def test_object_copy_zero_size():
key = 'foo123bar'
bucket_name = _create_objects(keys=[key])
fp_a = FakeWriteFile(0, '')
client = get_client()
client.put_object(Bucket=bucket_name, Key=key, Body=fp_a)
copy_source = {'Bucket': bucket_name, 'Key': key}
client.copy(copy_source, bucket_name, 'bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
eq(response['ContentLength'], 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object in same bucket')
@attr(assertion='works')
def test_object_copy_same_bucket():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy(copy_source, bucket_name, 'bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
body = _get_body(response)
eq('foo', body)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object with content-type')
@attr(assertion='works')
def test_object_copy_verify_contenttype():
bucket_name = get_new_bucket()
client = get_client()
content_type = 'text/bla'
client.put_object(Bucket=bucket_name, ContentType=content_type, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy(copy_source, bucket_name, 'bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
body = _get_body(response)
eq('foo', body)
response_content_type = response['ContentType']
eq(response_content_type, content_type)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object to itself')
@attr(assertion='fails')
def test_object_copy_to_itself():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'foo123bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='object')
@attr(method='put')
@attr(operation='modify object metadata by copying')
@attr(assertion='fails')
def test_object_copy_to_itself_with_metadata():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
metadata = {'foo': 'bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE')
response = client.get_object(Bucket=bucket_name, Key='foo123bar')
eq(response['Metadata'], metadata)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object from different bucket')
@attr(assertion='works')
def test_object_copy_diff_bucket():
bucket_name1 = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name1, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name1, 'Key': 'foo123bar'}
client.copy(copy_source, bucket_name2, 'bar321foo')
response = client.get_object(Bucket=bucket_name2, Key='bar321foo')
body = _get_body(response)
eq('foo', body)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy to an inaccessible bucket')
@attr(assertion='fails w/AttributeError')
def test_object_copy_not_owned_bucket():
client = get_client()
alt_client = get_alt_client()
bucket_name1 = get_new_bucket_name()
bucket_name2 = get_new_bucket_name()
client.create_bucket(Bucket=bucket_name1)
alt_client.create_bucket(Bucket=bucket_name2)
client.put_object(Bucket=bucket_name1, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name1, 'Key': 'foo123bar'}
e = assert_raises(ClientError, alt_client.copy, copy_source, bucket_name2, 'bar321foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy a non-owned object in a non-owned bucket, but with perms')
@attr(assertion='works')
def test_object_copy_not_owned_object_bucket():
client = get_client()
alt_client = get_alt_client()
bucket_name = get_new_bucket_name()
client.create_bucket(Bucket=bucket_name)
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
alt_user_id = get_alt_user_id()
grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
grants = add_obj_user_grant(bucket_name, 'foo123bar', grant)
client.put_object_acl(Bucket=bucket_name, Key='foo123bar', AccessControlPolicy=grants)
grant = add_bucket_user_grant(bucket_name, grant)
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=grant)
alt_client.get_object(Bucket=bucket_name, Key='foo123bar')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
alt_client.copy(copy_source, bucket_name, 'bar321foo')
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and change acl')
@attr(assertion='works')
def test_object_copy_canned_acl():
bucket_name = get_new_bucket()
client = get_client()
alt_client = get_alt_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', ACL='public-read')
# check ACL is applied by doing GET from another user
alt_client.get_object(Bucket=bucket_name, Key='bar321foo')
metadata={'abc': 'def'}
copy_source = {'Bucket': bucket_name, 'Key': 'bar321foo'}
client.copy_object(ACL='public-read', Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE')
# check ACL is applied by doing GET from another user
alt_client.get_object(Bucket=bucket_name, Key='foo123bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and retain metadata')
def test_object_copy_retaining_metadata():
for size in [3, 1024 * 1024]:
bucket_name = get_new_bucket()
client = get_client()
content_type = 'audio/ogg'
metadata = {'key1': 'value1', 'key2': 'value2'}
client.put_object(Bucket=bucket_name, Key='foo123bar', Metadata=metadata, ContentType=content_type, Body=str(bytearray(size)))
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
eq(content_type, response['ContentType'])
eq(metadata, response['Metadata'])
eq(size, response['ContentLength'])
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and replace metadata')
def test_object_copy_replacing_metadata():
for size in [3, 1024 * 1024]:
bucket_name = get_new_bucket()
client = get_client()
content_type = 'audio/ogg'
metadata = {'key1': 'value1', 'key2': 'value2'}
client.put_object(Bucket=bucket_name, Key='foo123bar', Metadata=metadata, ContentType=content_type, Body=str(bytearray(size)))
metadata = {'key3': 'value3', 'key2': 'value2'}
content_type = 'audio/mpeg'
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', Metadata=metadata, MetadataDirective='REPLACE', ContentType=content_type)
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
eq(content_type, response['ContentType'])
eq(metadata, response['Metadata'])
eq(size, response['ContentLength'])
@attr(resource='object')
@attr(method='put')
@attr(operation='copy from non-existent bucket')
def test_object_copy_bucket_not_found():
bucket_name = get_new_bucket()
client = get_client()
copy_source = {'Bucket': bucket_name + "-fake", 'Key': 'foo123bar'}
e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo')
status = _get_status(e.response)
eq(status, 404)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy from non-existent object')
def test_object_copy_key_not_found():
bucket_name = get_new_bucket()
client = get_client()
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo')
status = _get_status(e.response)
eq(status, 404)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object to/from versioned bucket')
@attr(assertion='works')
@attr('versioning')
def test_object_copy_versioned_bucket():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
size = 1*1024*124
data = str(bytearray(size))
key1 = 'foo123bar'
client.put_object(Bucket=bucket_name, Key=key1, Body=data)
response = client.get_object(Bucket=bucket_name, Key=key1)
version_id = response['VersionId']
# copy object in the same bucket
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key2 = 'bar321foo'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2)
response = client.get_object(Bucket=bucket_name, Key=key2)
body = _get_body(response)
eq(data, body)
eq(size, response['ContentLength'])
# second copy
version_id2 = response['VersionId']
copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2}
key3 = 'bar321foo2'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3)
response = client.get_object(Bucket=bucket_name, Key=key3)
body = _get_body(response)
eq(data, body)
eq(size, response['ContentLength'])
# copy to another versioned bucket
bucket_name2 = get_new_bucket()
check_configure_versioning_retry(bucket_name2, "Enabled", "Enabled")
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key4 = 'bar321foo3'
client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4)
response = client.get_object(Bucket=bucket_name2, Key=key4)
body = _get_body(response)
eq(data, body)
eq(size, response['ContentLength'])
# copy to another non versioned bucket
bucket_name3 = get_new_bucket()
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key5 = 'bar321foo4'
client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5)
response = client.get_object(Bucket=bucket_name3, Key=key5)
body = _get_body(response)
eq(data, body)
eq(size, response['ContentLength'])
# copy from a non versioned bucket
copy_source = {'Bucket': bucket_name3, 'Key': key5}
key6 = 'foo123bar2'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key6)
response = client.get_object(Bucket=bucket_name, Key=key6)
body = _get_body(response)
eq(data, body)
eq(size, response['ContentLength'])
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object to/from versioned bucket with url-encoded name')
@attr(assertion='works')
@attr('versioning')
def test_object_copy_versioned_url_encoding():
bucket = get_new_bucket_resource()
check_configure_versioning_retry(bucket.name, "Enabled", "Enabled")
src_key = 'foo?bar'
src = bucket.put_object(Key=src_key)
src.load() # HEAD request tests that the key exists
# copy object in the same bucket
dst_key = 'bar&foo'
dst = bucket.Object(dst_key)
dst.copy_from(CopySource={'Bucket': src.bucket_name, 'Key': src.key, 'VersionId': src.version_id})
dst.load() # HEAD request tests that the key exists
def generate_random(size, part_size=5*1024*1024):
"""
Generate the specified number random data.
(actually each MB is a repetition of the first KB)
"""
chunk = 1024
allowed = string.ascii_letters
for x in range(0, size, part_size):
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in xrange(chunk)])
s = ''
left = size - x
this_part_size = min(left, part_size)
for y in range(this_part_size / chunk):
s = s + strpart
if this_part_size > len(s):
s = s + strpart[0:this_part_size - len(s)]
yield s
if (x == size):
return
def _multipart_upload(bucket_name, key, size, part_size=5*1024*1024, client=None, content_type=None, metadata=None, resend_parts=[]):
"""
generate a multi-part upload for a random file of specifed size,
if requested, generate a list of the parts
return the upload descriptor
"""
if client == None:
client = get_client()
if content_type == None and metadata == None:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
else:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key, Metadata=metadata, ContentType=content_type)
upload_id = response['UploadId']
s = ''
parts = []
for i, part in enumerate(generate_random(size, part_size)):
# part_num is necessary because PartNumber for upload_part and in parts must start at 1 and i starts at 0
part_num = i+1
s += part
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num})
if i in resend_parts:
client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
return (upload_id, s, parts)
@attr(resource='object')
@attr(method='put')
@attr(operation='test copy object of a multipart upload')
@attr(assertion='successful')
@attr('versioning')
def test_object_copy_versioning_multipart_upload():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key1 = "srcmultipart"
key1_metadata = {'foo': 'bar'}
content_type = 'text/bla'
objlen = 30 * 1024 * 1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen, content_type=content_type, metadata=key1_metadata)
client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key1)
key1_size = response['ContentLength']
version_id = response['VersionId']
# copy object in the same bucket
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key2 = 'dstmultipart'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2)
response = client.get_object(Bucket=bucket_name, Key=key2)
version_id2 = response['VersionId']
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# second copy
copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2}
key3 = 'dstmultipart2'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3)
response = client.get_object(Bucket=bucket_name, Key=key3)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# copy to another versioned bucket
bucket_name2 = get_new_bucket()
check_configure_versioning_retry(bucket_name2, "Enabled", "Enabled")
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key4 = 'dstmultipart3'
client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4)
response = client.get_object(Bucket=bucket_name2, Key=key4)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# copy to another non versioned bucket
bucket_name3 = get_new_bucket()
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key5 = 'dstmultipart4'
client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5)
response = client.get_object(Bucket=bucket_name3, Key=key5)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# copy from a non versioned bucket
copy_source = {'Bucket': bucket_name3, 'Key': key5}
key6 = 'dstmultipart5'
client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key6)
response = client.get_object(Bucket=bucket_name3, Key=key6)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart upload without parts')
def test_multipart_upload_empty():
bucket_name = get_new_bucket()
client = get_client()
key1 = "mymultipart"
objlen = 0
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen)
e = assert_raises(ClientError, client.complete_multipart_upload,Bucket=bucket_name, Key=key1, UploadId=upload_id)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart uploads with single small part')
def test_multipart_upload_small():
bucket_name = get_new_bucket()
client = get_client()
key1 = "mymultipart"
objlen = 1
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key1)
eq(response['ContentLength'], objlen)
def _create_key_with_random_content(keyname, size=7*1024*1024, bucket_name=None, client=None):
if bucket_name is None:
bucket_name = get_new_bucket()
if client == None:
client = get_client()
data = StringIO(str(generate_random(size, size).next()))
client.put_object(Bucket=bucket_name, Key=keyname, Body=data)
return bucket_name
def _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, client=None, part_size=5*1024*1024, version_id=None):
if(client == None):
client = get_client()
response = client.create_multipart_upload(Bucket=dest_bucket_name, Key=dest_key)
upload_id = response['UploadId']
if(version_id == None):
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
else:
copy_source = {'Bucket': src_bucket_name, 'Key': src_key, 'VersionId': version_id}
parts = []
i = 0
for start_offset in range(0, size, part_size):
end_offset = min(start_offset + part_size - 1, size - 1)
part_num = i+1
copy_source_range = 'bytes={start}-{end}'.format(start=start_offset, end=end_offset)
response = client.upload_part_copy(Bucket=dest_bucket_name, Key=dest_key, CopySource=copy_source, PartNumber=part_num, UploadId=upload_id, CopySourceRange=copy_source_range)
parts.append({'ETag': response['CopyPartResult'][u'ETag'], 'PartNumber': part_num})
i = i+1
return (upload_id, parts)
def _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=None):
client = get_client()
if(version_id == None):
response = client.get_object(Bucket=src_bucket_name, Key=src_key)
else:
response = client.get_object(Bucket=src_bucket_name, Key=src_key, VersionId=version_id)
src_size = response['ContentLength']
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
dest_size = response['ContentLength']
dest_data = _get_body(response)
assert(src_size >= dest_size)
r = 'bytes={s}-{e}'.format(s=0, e=dest_size-1)
if(version_id == None):
response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r)
else:
response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r, VersionId=version_id)
src_data = _get_body(response)
eq(src_data, dest_data)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies with single small part')
def test_multipart_copy_small():
src_key = 'foo'
src_bucket_name = _create_key_with_random_content(src_key)
dest_bucket_name = get_new_bucket()
dest_key = "mymultipart"
size = 1
client = get_client()
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(size, response['ContentLength'])
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies with an invalid range')
def test_multipart_copy_invalid_range():
client = get_client()
src_key = 'source'
src_bucket_name = _create_key_with_random_content(src_key, size=5)
response = client.create_multipart_upload(Bucket=src_bucket_name, Key='dest')
upload_id = response['UploadId']
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
copy_source_range = 'bytes={start}-{end}'.format(start=0, end=21)
e = assert_raises(ClientError, client.upload_part_copy,Bucket=src_bucket_name, Key='dest', UploadId=upload_id, CopySource=copy_source, CopySourceRange=copy_source_range, PartNumber=1)
status, error_code = _get_status_and_error_code(e.response)
valid_status = [400, 416]
if not status in valid_status:
raise AssertionError("Invalid response " + str(status))
eq(error_code, 'InvalidRange')
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copy with an improperly formatted range')
def test_multipart_copy_improper_range():
client = get_client()
src_key = 'source'
src_bucket_name = _create_key_with_random_content(src_key, size=5)
response = client.create_multipart_upload(
Bucket=src_bucket_name, Key='dest')
upload_id = response['UploadId']
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
test_ranges = ['{start}-{end}'.format(start=0, end=2),
'bytes={start}'.format(start=0),
'bytes=hello-world',
'bytes=0-bar',
'bytes=hello-',
'bytes=0-2,3-5']
for test_range in test_ranges:
e = assert_raises(ClientError, client.upload_part_copy,
Bucket=src_bucket_name, Key='dest',
UploadId=upload_id,
CopySource=copy_source,
CopySourceRange=test_range,
PartNumber=1)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies without x-amz-copy-source-range')
def test_multipart_copy_without_range():
client = get_client()
src_key = 'source'
src_bucket_name = _create_key_with_random_content(src_key, size=10)
dest_bucket_name = get_new_bucket_name()
get_new_bucket(name=dest_bucket_name)
dest_key = "mymultipartcopy"
response = client.create_multipart_upload(Bucket=dest_bucket_name, Key=dest_key)
upload_id = response['UploadId']
parts = []
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
part_num = 1
copy_source_range = 'bytes={start}-{end}'.format(start=0, end=9)
response = client.upload_part_copy(Bucket=dest_bucket_name, Key=dest_key, CopySource=copy_source, PartNumber=part_num, UploadId=upload_id)
parts.append({'ETag': response['CopyPartResult'][u'ETag'], 'PartNumber': part_num})
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(response['ContentLength'], 10)
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies with single small part')
def test_multipart_copy_special_names():
src_bucket_name = get_new_bucket()
dest_bucket_name = get_new_bucket()
dest_key = "mymultipart"
size = 1
client = get_client()
for src_key in (' ', '_', '__', '?versionId'):
_create_key_with_random_content(src_key, bucket_name=src_bucket_name)
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(size, response['ContentLength'])
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
def _check_content_using_range(key, bucket_name, data, step):
client = get_client()
response = client.get_object(Bucket=bucket_name, Key=key)
size = response['ContentLength']
for ofs in xrange(0, size, step):
toread = size - ofs
if toread > step:
toread = step
end = ofs + toread - 1
r = 'bytes={s}-{e}'.format(s=ofs, e=end)
response = client.get_object(Bucket=bucket_name, Key=key, Range=r)
eq(response['ContentLength'], toread)
body = _get_body(response)
eq(body, data[ofs:end+1])
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload')
@attr(assertion='successful')
@attr('fails_on_aws')
def test_multipart_upload():
bucket_name = get_new_bucket()
key="mymultipart"
content_type='text/bla'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
client = get_client()
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, content_type=content_type, metadata=metadata)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used'])
eq(rgw_bytes_used, objlen)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count'])
eq(rgw_object_count, 1)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ContentType'], content_type)
eq(response['Metadata'], metadata)
body = _get_body(response)
eq(len(body), response['ContentLength'])
eq(body, data)
_check_content_using_range(key, bucket_name, data, 1000000)
_check_content_using_range(key, bucket_name, data, 10000000)
def check_versioning(bucket_name, status):
client = get_client()
try:
response = client.get_bucket_versioning(Bucket=bucket_name)
eq(response['Status'], status)
except KeyError:
eq(status, None)
# amazon is eventual consistent, retry a bit if failed
def check_configure_versioning_retry(bucket_name, status, expected_string):
client = get_client()
client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': status})
read_status = None
for i in xrange(5):
try:
response = client.get_bucket_versioning(Bucket=bucket_name)
read_status = response['Status']
except KeyError:
read_status = None
if (expected_string == read_status):
break
time.sleep(1)
eq(expected_string, read_status)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies of versioned objects')
@attr('versioning')
def test_multipart_copy_versioned():
src_bucket_name = get_new_bucket()
dest_bucket_name = get_new_bucket()
dest_key = "mymultipart"
check_versioning(src_bucket_name, None)
src_key = 'foo'
check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
size = 15 * 1024 * 1024
_create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
_create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
_create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
version_id = []
client = get_client()
response = client.list_object_versions(Bucket=src_bucket_name)
for ver in response['Versions']:
version_id.append(ver['VersionId'])
for vid in version_id:
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, version_id=vid)
response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(size, response['ContentLength'])
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=vid)
def _check_upload_multipart_resend(bucket_name, key, objlen, resend_parts):
content_type = 'text/bla'
metadata = {'foo': 'bar'}
client = get_client()
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, content_type=content_type, metadata=metadata, resend_parts=resend_parts)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ContentType'], content_type)
eq(response['Metadata'], metadata)
body = _get_body(response)
eq(len(body), response['ContentLength'])
eq(body, data)
_check_content_using_range(key, bucket_name, data, 1000000)
_check_content_using_range(key, bucket_name, data, 10000000)
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multiple multi-part upload with different sizes')
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload')
@attr(assertion='successful')
def test_multipart_upload_resend_part():
bucket_name = get_new_bucket()
key="mymultipart"
objlen = 30 * 1024 * 1024
_check_upload_multipart_resend(bucket_name, key, objlen, [0])
_check_upload_multipart_resend(bucket_name, key, objlen, [1])
_check_upload_multipart_resend(bucket_name, key, objlen, [2])
_check_upload_multipart_resend(bucket_name, key, objlen, [1,2])
_check_upload_multipart_resend(bucket_name, key, objlen, [0,1,2,3,4,5])
@attr(assertion='successful')
def test_multipart_upload_multiple_sizes():
bucket_name = get_new_bucket()
key="mymultipart"
client = get_client()
objlen = 5*1024*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 5*1024*1024+100*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 5*1024*1024+600*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 10*1024*1024+100*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 10*1024*1024+600*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 10*1024*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
@attr(assertion='successful')
def test_multipart_copy_multiple_sizes():
src_key = 'foo'
src_bucket_name = _create_key_with_random_content(src_key, 12*1024*1024)
dest_bucket_name = get_new_bucket()
dest_key="mymultipart"
client = get_client()
size = 5*1024*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 5*1024*1024+100*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 5*1024*1024+600*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 10*1024*1024+100*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 10*1024*1024+600*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 10*1024*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
@attr(resource='object')
@attr(method='put')
@attr(operation='check failure on multiple multi-part upload with size too small')
@attr(assertion='fails 400')
def test_multipart_upload_size_too_small():
bucket_name = get_new_bucket()
key="mymultipart"
client = get_client()
size = 100*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=size, part_size=10*1024)
e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'EntityTooSmall')
def gen_rand_string(size, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def _do_test_multipart_upload_contents(bucket_name, key, num_parts):
payload=gen_rand_string(5)*1024*1024
client = get_client()
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
for part_num in range(0, num_parts):
part = StringIO(payload)
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num+1, Body=part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num+1})
last_payload = '123'*1024*1024
last_part = StringIO(last_payload)
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=num_parts+1, Body=last_part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': num_parts+1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key)
test_string = _get_body(response)
all_payload = payload*num_parts + last_payload
assert test_string == all_payload
return all_payload
@attr(resource='object')
@attr(method='put')
@attr(operation='check contents of multi-part upload')
@attr(assertion='successful')
def test_multipart_upload_contents():
bucket_name = get_new_bucket()
_do_test_multipart_upload_contents(bucket_name, 'mymultipart', 3)
@attr(resource='object')
@attr(method='put')
@attr(operation=' multi-part upload overwrites existing key')
@attr(assertion='successful')
def test_multipart_upload_overwrite_existing_object():
bucket_name = get_new_bucket()
client = get_client()
key = 'mymultipart'
payload='12345'*1024*1024
num_parts=2
client.put_object(Bucket=bucket_name, Key=key, Body=payload)
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
for part_num in range(0, num_parts):
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num+1, Body=payload)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num+1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key)
test_string = _get_body(response)
assert test_string == payload*num_parts
@attr(resource='object')
@attr(method='put')
@attr(operation='abort multi-part upload')
@attr(assertion='successful')
def test_abort_multipart_upload():
bucket_name = get_new_bucket()
key="mymultipart"
objlen = 10 * 1024 * 1024
client = get_client()
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id)
response = client.head_bucket(Bucket=bucket_name)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used'])
eq(rgw_bytes_used, 0)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count'])
eq(rgw_object_count, 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='abort non-existent multi-part upload')
@attr(assertion='fails 404')
def test_abort_multipart_upload_not_found():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
client.put_object(Bucket=bucket_name, Key=key)
e = assert_raises(ClientError, client.abort_multipart_upload, Bucket=bucket_name, Key=key, UploadId='56788')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchUpload')
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent multi-part uploads')
@attr(assertion='successful')
def test_list_multipart_upload():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
mb = 1024 * 1024
upload_ids = []
(upload_id1, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=5*mb)
upload_ids.append(upload_id1)
(upload_id2, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=6*mb)
upload_ids.append(upload_id2)
key2="mymultipart2"
(upload_id3, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key2, size=5*mb)
upload_ids.append(upload_id3)
response = client.list_multipart_uploads(Bucket=bucket_name)
uploads = response['Uploads']
resp_uploadids = []
for i in range(0, len(uploads)):
resp_uploadids.append(uploads[i]['UploadId'])
for i in range(0, len(upload_ids)):
eq(True, (upload_ids[i] in resp_uploadids))
client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id1)
client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id2)
client.abort_multipart_upload(Bucket=bucket_name, Key=key2, UploadId=upload_id3)
@attr(resource='object')
@attr(method='put')
@attr(operation='multi-part upload with missing part')
def test_multipart_upload_missing_part():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
size = 1
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=StringIO('\x00'))
# 'PartNumber should be 1'
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 9999})
e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidPart')
@attr(resource='object')
@attr(method='put')
@attr(operation='multi-part upload with incorrect ETag')
def test_multipart_upload_incorrect_etag():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
size = 1
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=StringIO('\x00'))
# 'ETag' should be "93b885adfe0da089cdf634904fd59f71"
parts.append({'ETag': "ffffffffffffffffffffffffffffffff", 'PartNumber': 1})
e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidPart')
def _simple_http_req_100_cont(host, port, is_secure, method, resource):
"""
Send the specified request w/expect 100-continue
and await confirmation.
"""
req = '{method} {resource} HTTP/1.1\r\nHost: {host}\r\nAccept-Encoding: identity\r\nContent-Length: 123\r\nExpect: 100-continue\r\n\r\n'.format(
method=method,
resource=resource,
host=host,
)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if is_secure:
s = ssl.wrap_socket(s);
s.settimeout(5)
s.connect((host, port))
s.send(req)
try:
data = s.recv(1024)
except socket.error, msg:
print 'got response: ', msg
print 'most likely server doesn\'t support 100-continue'
s.close()
l = data.split(' ')
assert l[0].startswith('HTTP')
return l[1]
@attr(resource='object')
@attr(method='put')
@attr(operation='w/expect continue')
@attr(assertion='succeeds if object is public-read-write')
@attr('100_continue')
@attr('fails_on_mod_proxy_fcgi')
def test_100_continue():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
objname='testobj'
resource = '/{bucket}/{obj}'.format(bucket=bucket_name, obj=objname)
host = get_config_host()
port = get_config_port()
is_secure = get_config_is_secure()
#NOTES: this test needs to be tested when is_secure is True
status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource)
eq(status, '403')
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource)
eq(status, '100')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set cors')
@attr(assertion='succeeds')
@attr('cors')
def test_set_cors():
bucket_name = get_new_bucket()
client = get_client()
allowed_methods = ['GET', 'PUT']
allowed_origins = ['*.get', '*.put']
cors_config ={
'CORSRules': [
{'AllowedMethods': allowed_methods,
'AllowedOrigins': allowed_origins,
},
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
response = client.get_bucket_cors(Bucket=bucket_name)
eq(response['CORSRules'][0]['AllowedMethods'], allowed_methods)
eq(response['CORSRules'][0]['AllowedOrigins'], allowed_origins)
client.delete_bucket_cors(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
def _cors_request_and_check(func, url, headers, expect_status, expect_allow_origin, expect_allow_methods):
r = func(url, headers=headers)
eq(r.status_code, expect_status)
assert r.headers.get('access-control-allow-origin', None) == expect_allow_origin
assert r.headers.get('access-control-allow-methods', None) == expect_allow_methods
@attr(resource='bucket')
@attr(method='get')
@attr(operation='check cors response when origin header set')
@attr(assertion='returning cors header')
@attr('cors')
def test_cors_origin_response():
bucket_name = _setup_bucket_acl(bucket_acl='public-read')
client = get_client()
cors_config ={
'CORSRules': [
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['*suffix'],
},
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['start*end'],
},
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['prefix*'],
},
{'AllowedMethods': ['PUT'],
'AllowedOrigins': ['*.put'],
}
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
time.sleep(3)
url = _get_post_url(bucket_name)
_cors_request_and_check(requests.get, url, None, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix'}, 200, 'foo.suffix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'foo.bar'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix.get'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'startend'}, 200, 'startend', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'start1end'}, 200, 'start1end', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'start12end'}, 200, 'start12end', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': '0start12end'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'prefix'}, 200, 'prefix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'prefix.suffix'}, 200, 'prefix.suffix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'bla.prefix'}, 200, None, None)
obj_url = '{u}/{o}'.format(u=url, o='bar')
_cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
'content-length': '0'}, 403, 'foo.suffix', 'GET')
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'PUT',
'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'DELETE',
'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.put', 'content-length': '0'}, 403, 'foo.put', 'PUT')
_cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
_cors_request_and_check(requests.options, url, None, 400, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix'}, 400, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'bla'}, 400, None, None)
_cors_request_and_check(requests.options, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
'content-length': '0'}, 200, 'foo.suffix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'foo.bar', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix.get', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'startend', 'Access-Control-Request-Method': 'GET'}, 200, 'startend', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'start1end', 'Access-Control-Request-Method': 'GET'}, 200, 'start1end', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'start12end', 'Access-Control-Request-Method': 'GET'}, 200, 'start12end', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': '0start12end', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'prefix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'prefix.suffix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix.suffix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'bla.prefix', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'PUT'}, 200, 'foo.put', 'PUT')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='check cors response when origin is set to wildcard')
@attr(assertion='returning cors header')
@attr('cors')
def test_cors_origin_wildcard():
bucket_name = _setup_bucket_acl(bucket_acl='public-read')
client = get_client()
cors_config ={
'CORSRules': [
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['*'],
},
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
time.sleep(3)
url = _get_post_url(bucket_name)
_cors_request_and_check(requests.get, url, None, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'example.origin'}, 200, '*', 'GET')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='check cors response when Access-Control-Request-Headers is set in option request')
@attr(assertion='returning cors header')
@attr('cors')
def test_cors_header_option():
bucket_name = _setup_bucket_acl(bucket_acl='public-read')
client = get_client()
cors_config ={
'CORSRules': [
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['*'],
'ExposeHeaders': ['x-amz-meta-header1'],
},
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
time.sleep(3)
url = _get_post_url(bucket_name)
obj_url = '{u}/{o}'.format(u=url, o='bar')
_cors_request_and_check(requests.options, obj_url, {'Origin': 'example.origin','Access-Control-Request-Headers':'x-amz-meta-header2','Access-Control-Request-Method':'GET'}, 403, None, None)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='put tags')
@attr(assertion='succeeds')
@attr('tagging')
def test_set_tagging():
bucket_name = get_new_bucket()
client = get_client()
tags={
'TagSet': [
{
'Key': 'Hello',
'Value': 'World'
},
]
}
e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchTagSetError')
client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags)
response = client.get_bucket_tagging(Bucket=bucket_name)
eq(len(response['TagSet']), 1)
eq(response['TagSet'][0]['Key'], 'Hello')
eq(response['TagSet'][0]['Value'], 'World')
client.delete_bucket_tagging(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchTagSetError')
class FakeFile(object):
"""
file that simulates seek, tell, and current character
"""
def __init__(self, char='A', interrupt=None):
self.offset = 0
self.char = char
self.interrupt = interrupt
def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
self.offset = offset
elif whence == os.SEEK_END:
self.offset = self.size + offset;
elif whence == os.SEEK_CUR:
self.offset += offset
def tell(self):
return self.offset
class FakeWriteFile(FakeFile):
"""
file that simulates interruptable reads of constant data
"""
def __init__(self, size, char='A', interrupt=None):
FakeFile.__init__(self, char, interrupt)
self.size = size
def read(self, size=-1):
if size < 0:
size = self.size - self.offset
count = min(size, self.size - self.offset)
self.offset += count
# Sneaky! do stuff before we return (the last time)
if self.interrupt != None and self.offset == self.size and count > 0:
self.interrupt()
return self.char*count
class FakeReadFile(FakeFile):
"""
file that simulates writes, interrupting after the second
"""
def __init__(self, size, char='A', interrupt=None):
FakeFile.__init__(self, char, interrupt)
self.interrupted = False
self.size = 0
self.expected_size = size
def write(self, chars):
eq(chars, self.char*len(chars))
self.offset += len(chars)
self.size += len(chars)
# Sneaky! do stuff on the second seek
if not self.interrupted and self.interrupt != None \
and self.offset > 0:
self.interrupt()
self.interrupted = True
def close(self):
eq(self.size, self.expected_size)
class FakeFileVerifier(object):
"""
file that verifies expected data has been written
"""
def __init__(self, char=None):
self.char = char
self.size = 0
def write(self, data):
size = len(data)
if self.char == None:
self.char = data[0]
self.size += size
eq(data, self.char*size)
def _verify_atomic_key_data(bucket_name, key, size=-1, char=None):
"""
Make sure file is of the expected size and (simulated) content
"""
fp_verify = FakeFileVerifier(char)
client = get_client()
client.download_fileobj(bucket_name, key, fp_verify)
if size >= 0:
eq(fp_verify.size, size)
def _test_atomic_read(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Create a file of B's, use it to re-set_contents_from_file.
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
fp_a = FakeWriteFile(file_size, 'A')
client.put_object(Bucket=bucket_name, Key='testobj', Body=fp_a)
fp_b = FakeWriteFile(file_size, 'B')
fp_a2 = FakeReadFile(file_size, 'A',
lambda: client.put_object(Bucket=bucket_name, Key='testobj', Body=fp_b)
)
read_client = get_client()
read_client.download_fileobj(bucket_name, 'testobj', fp_a2)
fp_a2.close()
_verify_atomic_key_data(bucket_name, 'testobj', file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='1MB successful')
def test_atomic_read_1mb():
_test_atomic_read(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='4MB successful')
def test_atomic_read_4mb():
_test_atomic_read(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='8MB successful')
def test_atomic_read_8mb():
_test_atomic_read(1024*1024*8)
def _test_atomic_write(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Verify the contents are all A's.
Create a file of B's, use it to re-set_contents_from_file.
Before re-set continues, verify content's still A's
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
objname = 'testobj'
# create <file_size> file of A's
fp_a = FakeWriteFile(file_size, 'A')
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
# verify A's
_verify_atomic_key_data(bucket_name, objname, file_size, 'A')
# create <file_size> file of B's
# but try to verify the file before we finish writing all the B's
fp_b = FakeWriteFile(file_size, 'B',
lambda: _verify_atomic_key_data(bucket_name, objname, file_size)
)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
# verify B's
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='1MB successful')
def test_atomic_write_1mb():
_test_atomic_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='4MB successful')
def test_atomic_write_4mb():
_test_atomic_write(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='8MB successful')
def test_atomic_write_8mb():
_test_atomic_write(1024*1024*8)
def _test_atomic_dual_write(file_size):
"""
create an object, two sessions writing different contents
confirm that it is all one or the other
"""
bucket_name = get_new_bucket()
objname = 'testobj'
client = get_client()
client.put_object(Bucket=bucket_name, Key=objname)
# write <file_size> file of B's
# but before we're done, try to write all A's
fp_a = FakeWriteFile(file_size, 'A')
def rewind_put_fp_a():
fp_a.seek(0)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
fp_b = FakeWriteFile(file_size, 'B', rewind_put_fp_a)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
# verify the file
_verify_atomic_key_data(bucket_name, objname, file_size)
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='1MB successful')
def test_atomic_dual_write_1mb():
_test_atomic_dual_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='4MB successful')
def test_atomic_dual_write_4mb():
_test_atomic_dual_write(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='8MB successful')
def test_atomic_dual_write_8mb():
_test_atomic_dual_write(1024*1024*8)
def _test_atomic_conditional_write(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Verify the contents are all A's.
Create a file of B's, use it to re-set_contents_from_file.
Before re-set continues, verify content's still A's
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
objname = 'testobj'
client = get_client()
# create <file_size> file of A's
fp_a = FakeWriteFile(file_size, 'A')
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
fp_b = FakeWriteFile(file_size, 'B',
lambda: _verify_atomic_key_data(bucket_name, objname, file_size)
)
# create <file_size> file of B's
# but try to verify the file before we finish writing all the B's
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
# verify B's
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='1MB successful')
@attr('fails_on_aws')
def test_atomic_conditional_write_1mb():
_test_atomic_conditional_write(1024*1024)
def _test_atomic_dual_conditional_write(file_size):
"""
create an object, two sessions writing different contents
confirm that it is all one or the other
"""
bucket_name = get_new_bucket()
objname = 'testobj'
client = get_client()
fp_a = FakeWriteFile(file_size, 'A')
response = client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
_verify_atomic_key_data(bucket_name, objname, file_size, 'A')
etag_fp_a = response['ETag'].replace('"', '')
# write <file_size> file of C's
# but before we're done, try to write all B's
fp_b = FakeWriteFile(file_size, 'B')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': etag_fp_a}))
client.meta.events.register('before-call.s3.PutObject', lf)
def rewind_put_fp_b():
fp_b.seek(0)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
fp_c = FakeWriteFile(file_size, 'C', rewind_put_fp_b)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_c)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
# verify the file
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='1MB successful')
@attr('fails_on_aws')
# TODO: test not passing with SSL, fix this
@attr('fails_on_rgw')
def test_atomic_dual_conditional_write_1mb():
_test_atomic_dual_conditional_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write file in deleted bucket')
@attr(assertion='fail 404')
@attr('fails_on_aws')
# TODO: test not passing with SSL, fix this
@attr('fails_on_rgw')
def test_atomic_write_bucket_gone():
bucket_name = get_new_bucket()
client = get_client()
def remove_bucket():
client.delete_bucket(Bucket=bucket_name)
objname = 'foo'
fp_a = FakeWriteFile(1024*1024, 'A', remove_bucket)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_a)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='put')
@attr(operation='begin to overwrite file with multipart upload then abort')
@attr(assertion='read back original key contents')
def test_atomic_multipart_upload_write():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.create_multipart_upload(Bucket=bucket_name, Key='foo')
upload_id = response['UploadId']
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
client.abort_multipart_upload(Bucket=bucket_name, Key='foo', UploadId=upload_id)
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
class Counter:
def __init__(self, default_val):
self.val = default_val
def inc(self):
self.val = self.val + 1
class ActionOnCount:
def __init__(self, trigger_count, action):
self.count = 0
self.trigger_count = trigger_count
self.action = action
self.result = 0
def trigger(self):
self.count = self.count + 1
if self.count == self.trigger_count:
self.result = self.action()
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart check for two writes of the same part, first write finishes last')
@attr(assertion='object contains correct content')
def test_multipart_resend_first_finishes_last():
bucket_name = get_new_bucket()
client = get_client()
key_name = "mymultipart"
response = client.create_multipart_upload(Bucket=bucket_name, Key=key_name)
upload_id = response['UploadId']
#file_size = 8*1024*1024
file_size = 8
counter = Counter(0)
# upload_part might read multiple times from the object
# first time when it calculates md5, second time when it writes data
# out. We want to interject only on the last time, but we can't be
# sure how many times it's going to read, so let's have a test run
# and count the number of reads
fp_dry_run = FakeWriteFile(file_size, 'C',
lambda: counter.inc()
)
parts = []
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, PartNumber=1, Body=fp_dry_run)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key_name, UploadId=upload_id, MultipartUpload={'Parts': parts})
client.delete_object(Bucket=bucket_name, Key=key_name)
# clear parts
parts[:] = []
# ok, now for the actual test
fp_b = FakeWriteFile(file_size, 'B')
def upload_fp_b():
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, Body=fp_b, PartNumber=1)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
action = ActionOnCount(counter.val, lambda: upload_fp_b())
response = client.create_multipart_upload(Bucket=bucket_name, Key=key_name)
upload_id = response['UploadId']
fp_a = FakeWriteFile(file_size, 'A',
lambda: action.trigger()
)
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, PartNumber=1, Body=fp_a)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key_name, UploadId=upload_id, MultipartUpload={'Parts': parts})
_verify_atomic_key_data(bucket_name, key_name, file_size, 'A')
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_response_code():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-7')
fetched_content = _get_body(response)
eq(fetched_content, content[4:8])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-7/11')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_big_request_response_code():
content = os.urandom(8*1024*1024)
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=3145728-5242880')
fetched_content = _get_body(response)
eq(fetched_content, content[3145728:5242881])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 3145728-5242880/8388608')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_skip_leading_bytes_response_code():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-')
fetched_content = _get_body(response)
eq(fetched_content, content[4:])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-10/11')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_return_trailing_bytes_response_code():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=-7')
fetched_content = _get_body(response)
eq(fetched_content, content[-7:])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-10/11')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns invalid range, 416')
def test_ranged_request_invalid_range():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
# test invalid range
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 416)
eq(error_code, 'InvalidRange')
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns invalid range, 416')
def test_ranged_request_empty_object():
content = ''
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
# test invalid range
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 416)
eq(error_code, 'InvalidRange')
@attr(resource='bucket')
@attr(method='create')
@attr(operation='create versioned bucket')
@attr(assertion='can create and suspend bucket versioning')
@attr('versioning')
def test_versioning_bucket_create_suspend():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
def check_obj_content(client, bucket_name, key, version_id, content):
response = client.get_object(Bucket=bucket_name, Key=key, VersionId=version_id)
if content is not None:
body = _get_body(response)
eq(body, content)
else:
eq(response['DeleteMarker'], True)
def check_obj_versions(client, bucket_name, key, version_ids, contents):
# check to see if objects is pointing at correct version
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
# obj versions in versions come out created last to first not first to last like version_ids & contents
versions.reverse()
i = 0
for version in versions:
eq(version['VersionId'], version_ids[i])
eq(version['Key'], key)
check_obj_content(client, bucket_name, key, version['VersionId'], contents[i])
i += 1
def create_multiple_versions(client, bucket_name, key, num_versions, version_ids = None, contents = None, check_versions = True):
contents = contents or []
version_ids = version_ids or []
for i in xrange(num_versions):
body = 'content-{i}'.format(i=i)
response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
version_id = response['VersionId']
contents.append(body)
version_ids.append(version_id)
if check_versions:
check_obj_versions(client, bucket_name, key, version_ids, contents)
return (version_ids, contents)
def remove_obj_version(client, bucket_name, key, version_ids, contents, index):
eq(len(version_ids), len(contents))
index = index % len(version_ids)
rm_version_id = version_ids.pop(index)
rm_content = contents.pop(index)
check_obj_content(client, bucket_name, key, rm_version_id, rm_content)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=rm_version_id)
if len(version_ids) != 0:
check_obj_versions(client, bucket_name, key, version_ids, contents)
def clean_up_bucket(client, bucket_name, key, version_ids):
for version_id in version_ids:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
client.delete_bucket(Bucket=bucket_name)
def _do_test_create_remove_versions(client, bucket_name, key, num_versions, remove_start_idx, idx_inc):
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
idx = remove_start_idx
for j in xrange(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
idx += idx_inc
response = client.list_object_versions(Bucket=bucket_name)
if 'Versions' in response:
print response['Versions']
@attr(resource='object')
@attr(method='create')
@attr(operation='create and remove versioned object')
@attr(assertion='can create access and remove appropriate versions')
@attr('versioning')
def test_versioning_obj_create_read_remove():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled', 'Status': 'Enabled'})
key = 'testobj'
num_versions = 5
_do_test_create_remove_versions(client, bucket_name, key, num_versions, -1, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, -1, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 0, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 1, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 4, -1)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 3, 3)
@attr(resource='object')
@attr(method='create')
@attr(operation='create and remove versioned object and head')
@attr(assertion='can create access and remove appropriate versions')
@attr('versioning')
def test_versioning_obj_create_read_remove_head():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled', 'Status': 'Enabled'})
key = 'testobj'
num_versions = 5
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
# removes old head object, checks new one
removed_version_id = version_ids.pop()
contents.pop()
num_versions = num_versions-1
response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=removed_version_id)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, contents[-1])
# add a delete marker
response = client.delete_object(Bucket=bucket_name, Key=key)
eq(response['DeleteMarker'], True)
delete_marker_version_id = response['VersionId']
version_ids.append(delete_marker_version_id)
response = client.list_object_versions(Bucket=bucket_name)
eq(len(response['Versions']), num_versions)
eq(len(response['DeleteMarkers']), 1)
eq(response['DeleteMarkers'][0]['VersionId'], delete_marker_version_id)
clean_up_bucket(client, bucket_name, key, version_ids)
@attr(resource='object')
@attr(method='create')
@attr(operation='create object, then switch to versioning')
@attr(assertion='behaves correctly')
@attr('versioning')
def test_versioning_obj_plain_null_version_removal():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
client = get_client()
key = 'testobjfoo'
content = 'fooz'
client.put_object(Bucket=bucket_name, Key=key, Body=content)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='create')
@attr(operation='create object, then switch to versioning')
@attr(assertion='behaves correctly')
@attr('versioning')
def test_versioning_obj_plain_null_version_overwrite():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
client = get_client()
key = 'testobjfoo'
content = 'fooz'
client.put_object(Bucket=bucket_name, Key=key, Body=content)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
content2 = 'zzz'
response = client.put_object(Bucket=bucket_name, Key=key, Body=content2)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, content2)
version_id = response['VersionId']
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, content)
client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='create')
@attr(operation='create object, then switch to versioning')
@attr(assertion='behaves correctly')
@attr('versioning')
def test_versioning_obj_plain_null_version_overwrite_suspended():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
client = get_client()
key = 'testobjbar'
content = 'foooz'
client.put_object(Bucket=bucket_name, Key=key, Body=content)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
content2 = 'zzz'
response = client.put_object(Bucket=bucket_name, Key=key, Body=content2)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, content2)
response = client.list_object_versions(Bucket=bucket_name)
# original object with 'null' version id still counts as a version
eq(len(response['Versions']), 1)
client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
def delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents):
client.delete_object(Bucket=bucket_name, Key=key)
# clear out old null objects in lists since they will get overwritten
eq(len(version_ids), len(contents))
i = 0
for version_id in version_ids:
if version_id == 'null':
version_ids.pop(i)
contents.pop(i)
i += 1
return (version_ids, contents)
def overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, content):
client.put_object(Bucket=bucket_name, Key=key, Body=content)
# clear out old null objects in lists since they will get overwritten
eq(len(version_ids), len(contents))
i = 0
for version_id in version_ids:
if version_id == 'null':
version_ids.pop(i)
contents.pop(i)
i += 1
# add new content with 'null' version id to the end
contents.append(content)
version_ids.append('null')
return (version_ids, contents)
@attr(resource='object')
@attr(method='create')
@attr(operation='suspend versioned bucket')
@attr(assertion='suspended versioning behaves correctly')
@attr('versioning')
def test_versioning_obj_suspend_versions():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 5
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 1')
overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 2')
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 3')
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, 3, version_ids, contents)
num_versions += 3
for idx in xrange(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='remove')
@attr(operation='create and remove versions')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_versions_remove_all():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 10
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
for idx in xrange(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='remove')
@attr(operation='create and remove versions')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_versions_remove_special_names():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
keys = ['_testobj', '_', ':', ' ']
num_versions = 10
for key in keys:
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
for idx in xrange(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='multipart')
@attr(operation='create and test multipart object')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_overwrite_multipart():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 3
contents = []
version_ids = []
for i in xrange(num_versions):
ret = _do_test_multipart_upload_contents(bucket_name, key, 3)
contents.append(ret)
response = client.list_object_versions(Bucket=bucket_name)
for version in response['Versions']:
version_ids.append(version['VersionId'])
version_ids.reverse()
check_obj_versions(client, bucket_name, key, version_ids, contents)
for idx in xrange(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='multipart')
@attr(operation='list versioned objects')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_list_marker():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
key2 = 'testobj-1'
num_versions = 5
contents = []
version_ids = []
contents2 = []
version_ids2 = []
# for key #1
for i in xrange(num_versions):
body = 'content-{i}'.format(i=i)
response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
version_id = response['VersionId']
contents.append(body)
version_ids.append(version_id)
# for key #2
for i in xrange(num_versions):
body = 'content-{i}'.format(i=i)
response = client.put_object(Bucket=bucket_name, Key=key2, Body=body)
version_id = response['VersionId']
contents2.append(body)
version_ids2.append(version_id)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
# obj versions in versions come out created last to first not first to last like version_ids & contents
versions.reverse()
i = 0
# test the last 5 created objects first
for i in range(5):
version = versions[i]
eq(version['VersionId'], version_ids2[i])
eq(version['Key'], key2)
check_obj_content(client, bucket_name, key2, version['VersionId'], contents2[i])
i += 1
# then the first 5
for j in range(5):
version = versions[i]
eq(version['VersionId'], version_ids[j])
eq(version['Key'], key)
check_obj_content(client, bucket_name, key, version['VersionId'], contents[j])
i += 1
@attr(resource='object')
@attr(method='multipart')
@attr(operation='create and test versioned object copying')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_copy_obj_version():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 3
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
for i in xrange(num_versions):
new_key_name = 'key_{i}'.format(i=i)
copy_source = {'Bucket': bucket_name, 'Key': key, 'VersionId': version_ids[i]}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=new_key_name)
response = client.get_object(Bucket=bucket_name, Key=new_key_name)
body = _get_body(response)
eq(body, contents[i])
another_bucket_name = get_new_bucket()
for i in xrange(num_versions):
new_key_name = 'key_{i}'.format(i=i)
copy_source = {'Bucket': bucket_name, 'Key': key, 'VersionId': version_ids[i]}
client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name)
response = client.get_object(Bucket=another_bucket_name, Key=new_key_name)
body = _get_body(response)
eq(body, contents[i])
new_key_name = 'new_key'
copy_source = {'Bucket': bucket_name, 'Key': key}
client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name)
response = client.get_object(Bucket=another_bucket_name, Key=new_key_name)
body = _get_body(response)
eq(body, contents[-1])
@attr(resource='object')
@attr(method='delete')
@attr(operation='delete multiple versions')
@attr(assertion='deletes multiple versions of an object with a single call')
@attr('versioning')
def test_versioning_multi_object_delete():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'key'
num_versions = 2
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
versions.reverse()
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
# now remove again, should all succeed due to idempotency
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='delete')
@attr(operation='delete multiple versions')
@attr(assertion='deletes multiple versions of an object and delete marker with a single call')
@attr('versioning')
def test_versioning_multi_object_delete_with_marker():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'key'
num_versions = 2
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
client.delete_object(Bucket=bucket_name, Key=key)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
delete_markers = response['DeleteMarkers']
version_ids.append(delete_markers[0]['VersionId'])
eq(len(version_ids), 3)
eq(len(delete_markers), 1)
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
for delete_marker in delete_markers:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=delete_marker['VersionId'])
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
eq(('DeleteMarkers' in response), False)
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
for delete_marker in delete_markers:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=delete_marker['VersionId'])
# now remove again, should all succeed due to idempotency
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
eq(('DeleteMarkers' in response), False)
@attr(resource='object')
@attr(method='delete')
@attr(operation='multi delete create marker')
@attr(assertion='returns correct marker version id')
@attr('versioning')
def test_versioning_multi_object_delete_with_marker_create():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'key'
response = client.delete_object(Bucket=bucket_name, Key=key)
delete_marker_version_id = response['VersionId']
response = client.list_object_versions(Bucket=bucket_name)
delete_markers = response['DeleteMarkers']
eq(len(delete_markers), 1)
eq(delete_marker_version_id, delete_markers[0]['VersionId'])
eq(key, delete_markers[0]['Key'])
@attr(resource='object')
@attr(method='put')
@attr(operation='change acl on an object version changes specific version')
@attr(assertion='works')
@attr('versioning')
def test_versioned_object_acl():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'xyz'
num_versions = 3
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
version_id = version_ids[1]
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
display_name = get_main_display_name()
user_id = get_main_user_id()
eq(response['Owner']['DisplayName'], display_name)
eq(response['Owner']['ID'], user_id)
grants = response['Grants']
default_policy = [
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
]
check_grants(grants, default_policy)
client.put_object_acl(ACL='public-read',Bucket=bucket_name, Key=key, VersionId=version_id)
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
client.put_object(Bucket=bucket_name, Key=key)
response = client.get_object_acl(Bucket=bucket_name, Key=key)
grants = response['Grants']
check_grants(grants, default_policy)
@attr(resource='object')
@attr(method='put')
@attr(operation='change acl on an object with no version specified changes latest version')
@attr(assertion='works')
@attr('versioning')
def test_versioned_object_acl_no_version_specified():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'xyz'
num_versions = 3
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
response = client.get_object(Bucket=bucket_name, Key=key)
version_id = response['VersionId']
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
display_name = get_main_display_name()
user_id = get_main_user_id()
eq(response['Owner']['DisplayName'], display_name)
eq(response['Owner']['ID'], user_id)
grants = response['Grants']
default_policy = [
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
]
check_grants(grants, default_policy)
client.put_object_acl(ACL='public-read',Bucket=bucket_name, Key=key)
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
def _do_create_object(client, bucket_name, key, i):
body = 'data {i}'.format(i=i)
client.put_object(Bucket=bucket_name, Key=key, Body=body)
def _do_remove_ver(client, bucket_name, key, version_id):
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
def _do_create_versioned_obj_concurrent(client, bucket_name, key, num):
t = []
for i in range(num):
thr = threading.Thread(target = _do_create_object, args=(client, bucket_name, key, i))
thr.start()
t.append(thr)
return t
def _do_clear_versioned_bucket_concurrent(client, bucket_name):
t = []
response = client.list_object_versions(Bucket=bucket_name)
for version in response.get('Versions', []):
thr = threading.Thread(target = _do_remove_ver, args=(client, bucket_name, version['Key'], version['VersionId']))
thr.start()
t.append(thr)
return t
def _do_wait_completion(t):
for thr in t:
thr.join()
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent creation of objects, concurrent removal')
@attr(assertion='works')
@attr('versioning')
def test_versioned_concurrent_object_create_concurrent_remove():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'myobj'
num_versions = 5
for i in xrange(5):
t = _do_create_versioned_obj_concurrent(client, bucket_name, key, num_versions)
_do_wait_completion(t)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
eq(len(versions), num_versions)
t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
_do_wait_completion(t)
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent creation and removal of objects')
@attr(assertion='works')
@attr('versioning')
def test_versioned_concurrent_object_create_and_remove():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'myobj'
num_versions = 3
all_threads = []
for i in xrange(3):
t = _do_create_versioned_obj_concurrent(client, bucket_name, key, num_versions)
all_threads.append(t)
t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
all_threads.append(t)
for t in all_threads:
_do_wait_completion(t)
t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
_do_wait_completion(t)
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config')
@attr('lifecycle')
def test_lifecycle_set():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Disabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get lifecycle config')
@attr('lifecycle')
def test_lifecycle_get():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'test1/', 'Expiration': {'Days': 31}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'ID': 'test2/', 'Expiration': {'Days': 120}, 'Prefix': 'test2/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
eq(response['Rules'], rules)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get lifecycle config no id')
@attr('lifecycle')
def test_lifecycle_get_no_id():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'Expiration': {'Days': 31}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'Expiration': {'Days': 120}, 'Prefix': 'test2/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
current_lc = response['Rules']
Rule = namedtuple('Rule',['prefix','status','days'])
rules = {'rule1' : Rule('test1/','Enabled',31),
'rule2' : Rule('test2/','Enabled',120)}
for lc_rule in current_lc:
if lc_rule['Prefix'] == rules['rule1'].prefix:
eq(lc_rule['Expiration']['Days'], rules['rule1'].days)
eq(lc_rule['Status'], rules['rule1'].status)
assert 'ID' in lc_rule
elif lc_rule['Prefix'] == rules['rule2'].prefix:
eq(lc_rule['Expiration']['Days'], rules['rule2'].days)
eq(lc_rule['Status'], rules['rule2'].status)
assert 'ID' in lc_rule
else:
# neither of the rules we supplied was returned, something wrong
print "rules not right"
assert False
# The test harness for lifecycle is configured to treat days as 10 second intervals.
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration():
bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
'keep2/bar', 'expire3/foo', 'expire3/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Days': 4}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.list_objects(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(28)
response = client.list_objects(Bucket=bucket_name)
expire1_objects = response['Contents']
time.sleep(10)
response = client.list_objects(Bucket=bucket_name)
keep2_objects = response['Contents']
time.sleep(20)
response = client.list_objects(Bucket=bucket_name)
expire3_objects = response['Contents']
eq(len(init_objects), 6)
eq(len(expire1_objects), 6)
eq(len(keep2_objects), 6)
eq(len(expire3_objects), 6)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with list-objects-v2')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
@attr('list-objects-v2')
def test_lifecyclev2_expiration():
bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
'keep2/bar', 'expire3/foo', 'expire3/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Days': 4}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.list_objects_v2(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(28)
response = client.list_objects_v2(Bucket=bucket_name)
expire1_objects = response['Contents']
time.sleep(10)
response = client.list_objects_v2(Bucket=bucket_name)
keep2_objects = response['Contents']
time.sleep(20)
response = client.list_objects_v2(Bucket=bucket_name)
expire3_objects = response['Contents']
eq(len(init_objects), 6)
eq(len(expire1_objects), 4)
eq(len(keep2_objects), 4)
eq(len(expire3_objects), 2)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration on versining enabled bucket')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_versioning_enabled():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
create_multiple_versions(client, bucket_name, "test1/a", 1)
client.delete_object(Bucket=bucket_name, Key="test1/a")
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(30)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
delete_markers = response['DeleteMarkers']
eq(len(versions), 1)
eq(len(delete_markers), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='id too long in lifecycle rule')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_id_too_long():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 256*'a', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='same id')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_same_id():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='invalid status in lifecycle rule')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_invalid_status():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'disabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'invalid'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with expiration date')
@attr('lifecycle')
def test_lifecycle_set_date():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Date': '2017-09-27'}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with not iso8601 date')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_set_invalid_date():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Date': '20200101'}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with date')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_date():
bucket_name = _create_objects(keys=['past/foo', 'future/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Date': '2015-01-01'}, 'Prefix': 'past/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Date': '2030-01-01'}, 'Prefix': 'future/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.list_objects(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(20)
response = client.list_objects(Bucket=bucket_name)
expire_objects = response['Contents']
eq(len(init_objects), 2)
eq(len(expire_objects), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration days 0')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_days0():
bucket_name = _create_objects(keys=['days0/foo', 'days0/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 0}, 'Prefix': 'days0/',
'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
time.sleep(20)
response = client.list_objects(Bucket=bucket_name)
expire_objects = response['Contents']
eq(len(expire_objects), 0)
def setup_lifecycle_expiration(bucket_name, rule_id, delta_days,
rule_prefix):
rules=[{'ID': rule_id,
'Expiration': {'Days': delta_days}, 'Prefix': rule_prefix,
'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
key = rule_prefix + '/foo'
body = 'bar'
response = client.put_object(Bucket=bucket_name, Key=key, Body=bar)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
return response
def check_lifecycle_expiration_header(response, start_time, rule_id,
delta_days):
exp_header = response['ResponseMetadata']['HTTPHeaders']['x-amz-expiration']
m = re.search(r'expiry-date="(.+)", rule-id="(.+)"', exp_header)
expiration = datetime.datetime.strptime(m.group(1),
'%a %b %d %H:%M:%S %Y')
eq((expiration - start_time).days, delta_days)
eq(m.group(2), rule_id)
return True
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration header put')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_header_put():
"""
Check for valid x-amz-expiration header after PUT
"""
bucket_name = get_new_bucket()
client = get_client()
now = datetime.datetime.now(None)
response = setup_lifecycle_expiration(
bucket_name, 'rule1', 1, 'days1/')
eq(check_lifecycle_expiration_header(response, now, 'rule1', 1), True)
@attr(resource='bucket')
@attr(method='head')
@attr(operation='test lifecycle expiration header head')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_header_head():
"""
Check for valid x-amz-expiration header on HEAD request
"""
bucket_name = get_new_bucket()
client = get_client()
now = datetime.datetime.now(None)
response = setup_lifecycle_expiration(
bucket_name, 'rule1', 1, 'days1/')
# stat the object, check header
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(check_lifecycle_expiration_header(response, now, 'rule1', 1), True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with noncurrent version expiration')
@attr('lifecycle')
def test_lifecycle_set_noncurrent():
bucket_name = _create_objects(keys=['past/foo', 'future/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'past/', 'Status':'Enabled'},
{'ID': 'rule2', 'NoncurrentVersionExpiration': {'NoncurrentDays': 3}, 'Prefix': 'future/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle non-current version expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_noncur_expiration():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
create_multiple_versions(client, bucket_name, "test1/a", 3)
# not checking the object contents on the second run, because the function doesn't support multiple checks
create_multiple_versions(client, bucket_name, "test2/abc", 3, check_versions=False)
response = client.list_object_versions(Bucket=bucket_name)
init_versions = response['Versions']
rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
response = client.list_object_versions(Bucket=bucket_name)
expire_versions = response['Versions']
eq(len(init_versions), 6)
eq(len(expire_versions), 4)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with delete marker expiration')
@attr('lifecycle')
def test_lifecycle_set_deletemarker():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with Filter')
@attr('lifecycle')
def test_lifecycle_set_filter():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {'Prefix': 'foo'}, 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with empty Filter')
@attr('lifecycle')
def test_lifecycle_set_empty_filter():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {}, 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle delete marker expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_deletemarker_expiration():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
create_multiple_versions(client, bucket_name, "test1/a", 1)
create_multiple_versions(client, bucket_name, "test2/abc", 1, check_versions=False)
client.delete_object(Bucket=bucket_name, Key="test1/a")
client.delete_object(Bucket=bucket_name, Key="test2/abc")
response = client.list_object_versions(Bucket=bucket_name)
init_versions = response['Versions']
deleted_versions = response['DeleteMarkers']
total_init_versions = init_versions + deleted_versions
rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 1}, 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
response = client.list_object_versions(Bucket=bucket_name)
init_versions = response['Versions']
deleted_versions = response['DeleteMarkers']
total_expire_versions = init_versions + deleted_versions
eq(len(total_init_versions), 4)
eq(len(total_expire_versions), 2)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with multipart expiration')
@attr('lifecycle')
def test_lifecycle_set_multipart():
bucket_name = get_new_bucket()
client = get_client()
rules = [
{'ID': 'rule1', 'Prefix': 'test1/', 'Status': 'Enabled',
'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 2}},
{'ID': 'rule2', 'Prefix': 'test2/', 'Status': 'Disabled',
'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 3}}
]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle multipart expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_multipart_expiration():
bucket_name = get_new_bucket()
client = get_client()
key_names = ['test1/a', 'test2/']
upload_ids = []
for key in key_names:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_ids.append(response['UploadId'])
response = client.list_multipart_uploads(Bucket=bucket_name)
init_uploads = response['Uploads']
rules = [
{'ID': 'rule1', 'Prefix': 'test1/', 'Status': 'Enabled',
'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 2}},
]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
response = client.list_multipart_uploads(Bucket=bucket_name)
expired_uploads = response['Uploads']
eq(len(init_uploads), 2)
eq(len(expired_uploads), 1)
def _test_encryption_sse_customer_write(file_size):
"""
Tests Create a file of A's, use it to set_contents_from_file.
Create a file of B's, use it to re-set_contents_from_file.
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
key = 'testobj'
data = 'A'*file_size
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, data)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 1 byte')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_1b():
_test_encryption_sse_customer_write(1)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 1KB')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_1kb():
_test_encryption_sse_customer_write(1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 1MB')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_1MB():
_test_encryption_sse_customer_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 13 bytes')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_13b():
_test_encryption_sse_customer_write(13)
@attr(assertion='success')
@attr('encryption')
def test_encryption_sse_c_method_head():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*1000
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.HeadObject', lf)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C and read without SSE-C')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_present():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*1000
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C but read with other key')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_other_key():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers_A = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
sse_client_headers_B = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers_A))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers_B))
client.meta.events.register('before-call.s3.GetObject', lf)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C, but md5 is bad')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_invalid_md5():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C, but dont provide MD5')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_no_md5():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
@attr(resource='object')
@attr(method='put')
@attr(operation='declare SSE-C but do not provide key')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_no_key():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
@attr(resource='object')
@attr(method='put')
@attr(operation='Do not declare SSE-C but provide key and MD5')
@attr(assertion='operation successfull, no encryption')
@attr('encryption')
def test_encryption_key_no_sse_c():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
def _multipart_upload_enc(client, bucket_name, key, size, part_size, init_headers, part_headers, metadata, resend_parts):
"""
generate a multi-part upload for a random file of specifed size,
if requested, generate a list of the parts
return the upload descriptor
"""
if client == None:
client = get_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update(init_headers))
client.meta.events.register('before-call.s3.CreateMultipartUpload', lf)
if metadata == None:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
else:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key, Metadata=metadata)
upload_id = response['UploadId']
s = ''
parts = []
for i, part in enumerate(generate_random(size, part_size)):
# part_num is necessary because PartNumber for upload_part and in parts must start at 1 and i starts at 0
part_num = i+1
s += part
lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
client.meta.events.register('before-call.s3.UploadPart', lf)
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num})
if i in resend_parts:
lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
client.meta.events.register('before-call.s3.UploadPart', lf)
client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
return (upload_id, s, parts)
def _check_content_using_range_enc(client, bucket_name, key, data, step, enc_headers=None):
response = client.get_object(Bucket=bucket_name, Key=key)
size = response['ContentLength']
for ofs in xrange(0, size, step):
toread = size - ofs
if toread > step:
toread = step
end = ofs + toread - 1
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
r = 'bytes={s}-{e}'.format(s=ofs, e=end)
response = client.get_object(Bucket=bucket_name, Key=key, Range=r)
read_range = response['ContentLength']
body = _get_body(response)
eq(read_range, toread)
eq(body, data[ofs:end+1])
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload')
@attr(assertion='successful')
@attr('encryption')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
def test_encryption_sse_c_multipart_upload():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
enc_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
resend_parts = []
(upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count'])
eq(rgw_object_count, 1)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used'])
eq(rgw_bytes_used, objlen)
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['Metadata'], metadata)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
body = _get_body(response)
eq(body, data)
size = response['ContentLength']
eq(len(body), size)
_check_content_using_range_enc(client, bucket_name, key, data, 1000000, enc_headers=enc_headers)
_check_content_using_range_enc(client, bucket_name, key, data, 10000000, enc_headers=enc_headers)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart upload with bad key for uploading chunks')
@attr(assertion='successful')
@attr('encryption')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_encryption_sse_c_multipart_invalid_chunks_1():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
}
resend_parts = []
e = assert_raises(ClientError, _multipart_upload_enc, client=client, bucket_name=bucket_name,
key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart upload with bad md5 for chunks')
@attr(assertion='successful')
@attr('encryption')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_encryption_sse_c_multipart_invalid_chunks_2():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
}
resend_parts = []
e = assert_raises(ClientError, _multipart_upload_enc, client=client, bucket_name=bucket_name,
key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload and download with bad key')
@attr(assertion='successful')
@attr('encryption')
def test_encryption_sse_c_multipart_bad_download():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
put_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
get_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
}
resend_parts = []
(upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
part_size=5*1024*1024, init_headers=put_headers, part_headers=put_headers, metadata=metadata, resend_parts=resend_parts)
lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers))
client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count'])
eq(rgw_object_count, 1)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used'])
eq(rgw_bytes_used, objlen)
lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['Metadata'], metadata)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
@attr('encryption')
def test_encryption_sse_c_post_object_authenticated_request():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["starts-with", "$x-amz-server-side-encryption-customer-algorithm", ""], \
["starts-with", "$x-amz-server-side-encryption-customer-key", ""], \
["starts-with", "$x-amz-server-side-encryption-customer-key-md5", ""], \
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),
('x-amz-server-side-encryption-customer-algorithm', 'AES256'), \
('x-amz-server-side-encryption-customer-key', 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs='), \
('x-amz-server-side-encryption-customer-key-md5', 'DWygnHRtgiJ77HCm+1rvHw=='), \
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
get_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(assertion='success')
@attr('encryption')
def _test_sse_kms_customer_write(file_size, key_id = 'testkey-1'):
"""
Tests Create a file of A's, use it to set_contents_from_file.
Create a file of B's, use it to re-set_contents_from_file.
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': key_id
}
data = 'A'*file_size
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key='testobj', Body=data)
response = client.get_object(Bucket=bucket_name, Key='testobj')
body = _get_body(response)
eq(body, data)
@attr(resource='object')
@attr(method='head')
@attr(operation='Test SSE-KMS encrypted does perform head properly')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_method_head():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
}
data = 'A'*1000
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'aws:kms')
eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'], kms_keyid)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.HeadObject', lf)
e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-KMS and read without SSE-KMS')
@attr(assertion='operation success')
@attr('encryption')
def test_sse_kms_present():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
}
data = 'A'*100
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, data)
@attr(resource='object')
@attr(method='put')
@attr(operation='declare SSE-KMS but do not provide key_id')
@attr(assertion='operation fails')
@attr('encryption')
def test_sse_kms_no_key():
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
}
data = 'A'*100
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
@attr(resource='object')
@attr(method='put')
@attr(operation='Do not declare SSE-KMS but provide key_id')
@attr(assertion='operation successfull, no encryption')
@attr('encryption')
def test_sse_kms_not_declared():
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-2'
}
data = 'A'*100
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='complete KMS multi-part upload')
@attr(assertion='successful')
@attr('encryption')
def test_sse_kms_multipart_upload():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
enc_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
'Content-Type': content_type
}
resend_parts = []
(upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count'])
eq(rgw_object_count, 1)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used'])
eq(rgw_bytes_used, objlen)
lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
client.meta.events.register('before-call.s3.UploadPart', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['Metadata'], metadata)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
body = _get_body(response)
eq(body, data)
size = response['ContentLength']
eq(len(body), size)
_check_content_using_range(key, bucket_name, data, 1000000)
_check_content_using_range(key, bucket_name, data, 10000000)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart KMS upload with bad key_id for uploading chunks')
@attr(assertion='successful')
@attr('encryption')
def test_sse_kms_multipart_invalid_chunks_1():
kms_keyid = get_main_kms_keyid()
kms_keyid2 = get_secondary_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/bla'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid2
}
resend_parts = []
_multipart_upload_enc(client, bucket_name, key, objlen, part_size=5*1024*1024,
init_headers=init_headers, part_headers=part_headers, metadata=metadata,
resend_parts=resend_parts)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart KMS upload with unexistent key_id for chunks')
@attr(assertion='successful')
@attr('encryption')
def test_sse_kms_multipart_invalid_chunks_2():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-not-present'
}
resend_parts = []
_multipart_upload_enc(client, bucket_name, key, objlen, part_size=5*1024*1024,
init_headers=init_headers, part_headers=part_headers, metadata=metadata,
resend_parts=resend_parts)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated KMS browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
@attr('encryption')
def test_sse_kms_post_object_authenticated_request():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["starts-with", "$x-amz-server-side-encryption", ""], \
["starts-with", "$x-amz-server-side-encryption-aws-kms-key-id", ""], \
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),
('x-amz-server-side-encryption', 'aws:kms'), \
('x-amz-server-side-encryption-aws-kms-key-id', kms_keyid), \
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 1 byte')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_1b():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(1, key_id = kms_keyid)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 1KB')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_1kb():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(1024, key_id = kms_keyid)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 1MB')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_1MB():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(1024*1024, key_id = kms_keyid)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 13 bytes')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_13b():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(13, key_id = kms_keyid)
@attr(resource='object')
@attr(method='get')
@attr(operation='write encrypted with SSE-KMS and read with SSE-KMS')
@attr(assertion='operation fails')
@attr('encryption')
def test_sse_kms_read_declare():
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-1'
}
data = 'A'*100
key = 'testobj'
client.put_object(Bucket=bucket_name, Key=key, Body=data)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy')
@attr(assertion='succeeds')
@attr('bucket-policy')
def test_bucket_policy():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
response = alt_client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr('bucket-policy')
@attr('list-objects-v2')
def test_bucketv2_policy():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
response = alt_client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy and ACL')
@attr(assertion='fails')
@attr('bucket-policy')
def test_bucket_policy_acl():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Deny",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
e = assert_raises(ClientError, alt_client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.delete_bucket_policy(Bucket=bucket_name)
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy and ACL with list-objects-v2')
@attr(assertion='fails')
@attr('bucket-policy')
@attr('list-objects-v2')
def test_bucketv2_policy_acl():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Deny",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
e = assert_raises(ClientError, alt_client.list_objects_v2, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.delete_bucket_policy(Bucket=bucket_name)
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy for a user belonging to a different tenant')
@attr(assertion='succeeds')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_policy_different_tenant():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3::*:" + bucket_name
resource2 = "arn:aws:s3::*:" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
# TODO: figure out how to change the bucketname
def change_bucket_name(**kwargs):
kwargs['params']['url'] = "http://localhost:8000/:{bucket_name}?encoding-type=url".format(bucket_name=bucket_name)
kwargs['params']['url_path'] = "/:{bucket_name}".format(bucket_name=bucket_name)
kwargs['params']['context']['signing']['bucket'] = ":{bucket_name}".format(bucket_name=bucket_name)
print kwargs['request_signer']
print kwargs
#bucket_name = ":" + bucket_name
tenant_client = get_tenant_client()
tenant_client.meta.events.register('before-call.s3.ListObjects', change_bucket_name)
response = tenant_client.list_objects(Bucket=bucket_name)
#alt_client = get_alt_client()
#response = alt_client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy for a user belonging to a different tenant')
@attr(assertion='succeeds')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
@attr('list-objects-v2')
def test_bucketv2_policy_different_tenant():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3::*:" + bucket_name
resource2 = "arn:aws:s3::*:" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
# TODO: figure out how to change the bucketname
def change_bucket_name(**kwargs):
kwargs['params']['url'] = "http://localhost:8000/:{bucket_name}?encoding-type=url".format(bucket_name=bucket_name)
kwargs['params']['url_path'] = "/:{bucket_name}".format(bucket_name=bucket_name)
kwargs['params']['context']['signing']['bucket'] = ":{bucket_name}".format(bucket_name=bucket_name)
print kwargs['request_signer']
print kwargs
#bucket_name = ":" + bucket_name
tenant_client = get_tenant_client()
tenant_client.meta.events.register('before-call.s3.ListObjects', change_bucket_name)
response = tenant_client.list_objects_v2(Bucket=bucket_name)
#alt_client = get_alt_client()
#response = alt_client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy on another bucket')
@attr(assertion='succeeds')
@attr('bucket-policy')
def test_bucket_policy_another_bucket():
bucket_name = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
key = 'asdf'
key2 = 'abcd'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
client.put_object(Bucket=bucket_name2, Key=key2, Body='abcd')
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"arn:aws:s3:::*",
"arn:aws:s3:::*/*"
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
response = client.get_bucket_policy(Bucket=bucket_name)
response_policy = response['Policy']
client.put_bucket_policy(Bucket=bucket_name2, Policy=response_policy)
alt_client = get_alt_client()
response = alt_client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 1)
alt_client = get_alt_client()
response = alt_client.list_objects(Bucket=bucket_name2)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy on another bucket with list-objects-v2')
@attr(assertion='succeeds')
@attr('bucket-policy')
@attr('list-objects-v2')
def test_bucketv2_policy_another_bucket():
bucket_name = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
key = 'asdf'
key2 = 'abcd'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
client.put_object(Bucket=bucket_name2, Key=key2, Body='abcd')
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"arn:aws:s3:::*",
"arn:aws:s3:::*/*"
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
response = client.get_bucket_policy(Bucket=bucket_name)
response_policy = response['Policy']
client.put_bucket_policy(Bucket=bucket_name2, Policy=response_policy)
alt_client = get_alt_client()
response = alt_client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 1)
alt_client = get_alt_client()
response = alt_client.list_objects_v2(Bucket=bucket_name2)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put condition operator end with ifExists')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_policy_set_condition_operator_end_with_IfExists():
bucket_name = get_new_bucket()
client = get_client()
key = 'foo'
client.put_object(Bucket=bucket_name, Key=key)
policy = '''{
"Version":"2012-10-17",
"Statement": [{
"Sid": "Allow Public Access to All Objects",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Condition": {
"StringLikeIfExists": {
"aws:Referer": "http://www.example.com/*"
}
},
"Resource": "arn:aws:s3:::%s/*"
}
]
}''' % bucket_name
boto3.set_stream_logger(name='botocore')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy)
request_headers={'referer': 'http://www.example.com/'}
lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
request_headers={'referer': 'http://www.example.com/index.html'}
lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# the 'referer' headers need to be removed for this one
#response = client.get_object(Bucket=bucket_name, Key=key)
#eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
request_headers={'referer': 'http://example.com'}
lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
# TODO: Compare Requests sent in Boto3, Wireshark, RGW Log for both boto and boto3
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
response = client.get_bucket_policy(Bucket=bucket_name)
print response
def _create_simple_tagset(count):
tagset = []
for i in range(count):
tagset.append({'Key': str(i), 'Value': str(i)})
return {'TagSet': tagset}
def _make_random_string(size):
return ''.join(random.choice(string.ascii_letters) for _ in range(size))
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Get/PutObjTagging output')
@attr(assertion='success')
@attr('tagging')
def test_get_obj_tagging():
key = 'testputtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(2)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test HEAD obj tagging output')
@attr(assertion='success')
@attr('tagging')
def test_get_obj_head_tagging():
key = 'testputtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
count = 2
input_tagset = _create_simple_tagset(count)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-tagging-count'], str(count))
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Put max allowed tags')
@attr(assertion='success')
@attr('tagging')
def test_put_max_tags():
key = 'testputmaxtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(10)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Put max allowed tags')
@attr(assertion='fails')
@attr('tagging')
def test_put_excess_tags():
key = 'testputmaxtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(11)
e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidTag')
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Put max allowed k-v size')
@attr(assertion='success')
@attr('tagging')
def test_put_max_kvsize_tags():
key = 'testputmaxkeysize'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
for i in range(10):
k = _make_random_string(128)
v = _make_random_string(256)
tagset.append({'Key': k, 'Value': v})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
for kv_pair in response['TagSet']:
eq((kv_pair in input_tagset['TagSet']), True)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test exceed key size')
@attr(assertion='success')
@attr('tagging')
def test_put_excess_key_tags():
key = 'testputexcesskeytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
for i in range(10):
k = _make_random_string(129)
v = _make_random_string(256)
tagset.append({'Key': k, 'Value': v})
input_tagset = {'TagSet': tagset}
e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidTag')
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test exceed val size')
@attr(assertion='success')
@attr('tagging')
def test_put_excess_val_tags():
key = 'testputexcesskeytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
for i in range(10):
k = _make_random_string(128)
v = _make_random_string(257)
tagset.append({'Key': k, 'Value': v})
input_tagset = {'TagSet': tagset}
e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidTag')
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test PUT modifies existing tags')
@attr(assertion='success')
@attr('tagging')
def test_put_modify_tags():
key = 'testputmodifytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
tagset.append({'Key': 'key', 'Value': 'val'})
tagset.append({'Key': 'key2', 'Value': 'val2'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
tagset2 = []
tagset2.append({'Key': 'key3', 'Value': 'val3'})
input_tagset2 = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset2)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset2['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Delete tags')
@attr(assertion='success')
@attr('tagging')
def test_put_delete_tags():
key = 'testputmodifytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(2)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
response = client.delete_object_tagging(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr('tagging')
@attr(assertion='succeeds and returns written data')
def test_post_object_tags_anonymous_request():
bucket_name = get_new_bucket_name()
client = get_client()
url = _get_post_url(bucket_name)
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
key_name = "foo.txt"
input_tagset = _create_simple_tagset(2)
# xml_input_tagset is the same as input_tagset in xml.
# There is not a simple way to change input_tagset to xml like there is in the boto2 tetss
xml_input_tagset = "<Tagging><TagSet><Tag><Key>0</Key><Value>0</Value></Tag><Tag><Key>1</Key><Value>1</Value></Tag></TagSet></Tagging>"
payload = OrderedDict([
("key" , key_name),
("acl" , "public-read"),
("Content-Type" , "text/plain"),
("tagging", xml_input_tagset),
('file', ('bar')),
])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key=key_name)
body = _get_body(response)
eq(body, 'bar')
response = client.get_object_tagging(Bucket=bucket_name, Key=key_name)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr('tagging')
@attr(assertion='succeeds and returns written data')
def test_post_object_tags_authenticated_request():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [
{"bucket": bucket_name},
["starts-with", "$key", "foo"],
{"acl": "private"},
["starts-with", "$Content-Type", "text/plain"],
["content-length-range", 0, 1024],
["starts-with", "$tagging", ""]
]}
# xml_input_tagset is the same as `input_tagset = _create_simple_tagset(2)` in xml
# There is not a simple way to change input_tagset to xml like there is in the boto2 tetss
xml_input_tagset = "<Tagging><TagSet><Tag><Key>0</Key><Value>0</Value></Tag><Tag><Key>1</Key><Value>1</Value></Tag></TagSet></Tagging>"
json_policy_document = json.JSONEncoder().encode(policy_document)
policy = base64.b64encode(json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(aws_secret_access_key, policy, sha).digest())
payload = OrderedDict([
("key" , "foo.txt"),
("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("tagging", xml_input_tagset),
("Content-Type" , "text/plain"),
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='Test PutObj with tagging headers')
@attr(assertion='success')
@attr('tagging')
def test_put_obj_with_tags():
bucket_name = get_new_bucket()
client = get_client()
key = 'testtagobj1'
data = 'A'*100
tagset = []
tagset.append({'Key': 'bar', 'Value': ''})
tagset.append({'Key': 'foo', 'Value': 'bar'})
put_obj_tag_headers = {
'x-amz-tagging' : 'foo=bar&bar'
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(put_obj_tag_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, data)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], tagset)
def _make_arn_resource(path="*"):
return "arn:aws:s3:::{}".format(path)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test GetObjTagging public read')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_get_tags_acl_public():
key = 'testputtagsacl'
bucket_name = _create_key_with_random_content(key)
client = get_client()
resource = _make_arn_resource("{}/{}".format(bucket_name, key))
policy_document = make_json_policy("s3:GetObjectTagging",
resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
input_tagset = _create_simple_tagset(10)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test PutObjTagging public wrote')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_put_tags_acl_public():
key = 'testputtagsacl'
bucket_name = _create_key_with_random_content(key)
client = get_client()
resource = _make_arn_resource("{}/{}".format(bucket_name, key))
policy_document = make_json_policy("s3:PutObjectTagging",
resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
input_tagset = _create_simple_tagset(10)
alt_client = get_alt_client()
response = alt_client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='test deleteobjtagging public')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_delete_tags_obj_public():
key = 'testputtagsacl'
bucket_name = _create_key_with_random_content(key)
client = get_client()
resource = _make_arn_resource("{}/{}".format(bucket_name, key))
policy_document = make_json_policy("s3:DeleteObjectTagging",
resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
input_tagset = _create_simple_tagset(10)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.delete_object_tagging(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='test whether a correct version-id returned')
@attr(assertion='version-id is same as bucket list')
@attr('versioning')
def test_versioning_bucket_atomic_upload_return_version_id():
bucket_name = get_new_bucket()
client = get_client()
key = 'bar'
# for versioning-enabled-bucket, an non-empty version-id should return
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
response = client.put_object(Bucket=bucket_name, Key=key)
version_id = response['VersionId']
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
for version in versions:
eq(version['VersionId'], version_id)
# for versioning-default-bucket, no version-id should return.
bucket_name = get_new_bucket()
key = 'baz'
response = client.put_object(Bucket=bucket_name, Key=key)
eq(('VersionId' in response), False)
# for versioning-suspended-bucket, no version-id should return.
bucket_name = get_new_bucket()
key = 'baz'
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
response = client.put_object(Bucket=bucket_name, Key=key)
eq(('VersionId' in response), False)
@attr(resource='object')
@attr(method='put')
@attr(operation='test whether a correct version-id returned')
@attr(assertion='version-id is same as bucket list')
@attr('versioning')
def test_versioning_bucket_multipart_upload_return_version_id():
content_type='text/bla'
objlen = 30 * 1024 * 1024
bucket_name = get_new_bucket()
client = get_client()
key = 'bar'
metadata={'foo': 'baz'}
# for versioning-enabled-bucket, an non-empty version-id should return
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
version_id = response['VersionId']
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
for version in versions:
eq(version['VersionId'], version_id)
# for versioning-default-bucket, no version-id should return.
bucket_name = get_new_bucket()
key = 'baz'
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
eq(('VersionId' in response), False)
# for versioning-suspended-bucket, no version-id should return
bucket_name = get_new_bucket()
key = 'foo'
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
eq(('VersionId' in response), False)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on get object')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_get_obj_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset3 = []
tagset3.append({'Key': 'security1', 'Value': 'public'})
input_tagset = {'TagSet': tagset3}
response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key='publictag')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='privatetag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='invalidtag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on get object tagging')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_get_obj_tagging_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObjectTagging",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset3 = []
tagset3.append({'Key': 'security1', 'Value': 'public'})
input_tagset = {'TagSet': tagset3}
response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object_tagging(Bucket=bucket_name, Key='publictag')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# A get object itself should fail since we allowed only GetObjectTagging
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on put object tagging')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_tagging_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:PutObjectTagging",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
# PUT requests with object tagging are a bit wierd, if you forget to put
# the tag which is supposed to be existing anymore well, well subsequent
# put requests will fail
testtagset1 = []
testtagset1.append({'Key': 'security', 'Value': 'public'})
testtagset1.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': testtagset1}
response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
testtagset2 = []
testtagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': testtagset2}
response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# Now try putting the original tags again, this should fail
input_tagset = {'TagSet': testtagset1}
e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test copy-source conditional on put obj')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_copy_source():
bucket_name = _create_objects(keys=['public/foo', 'public/bar', 'private/foo'])
client = get_client()
src_resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
src_resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
bucket_name2 = get_new_bucket()
tag_conditional = {"StringLike": {
"s3:x-amz-copy-source" : bucket_name + "/public/*"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name2, "*"))
policy_document = make_json_policy("s3:PutObject",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name2, Policy=policy_document)
alt_client = get_alt_client()
copy_source = {'Bucket': bucket_name, 'Key': 'public/foo'}
alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo')
# This is possible because we are still the owner, see the grants with
# policy on how to do this right
response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo')
body = _get_body(response)
eq(body, 'public/foo')
copy_source = {'Bucket': bucket_name, 'Key': 'public/bar'}
alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2')
response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo2')
body = _get_body(response)
eq(body, 'public/bar')
copy_source = {'Bucket': bucket_name, 'Key': 'private/foo'}
check_access_denied(alt_client.copy_object, Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2')
@attr(resource='object')
@attr(method='put')
@attr(operation='Test copy-source conditional on put obj')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_copy_source_meta():
src_bucket_name = _create_objects(keys=['public/foo', 'public/bar'])
client = get_client()
src_resource = _make_arn_resource("{}/{}".format(src_bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
src_resource)
client.put_bucket_policy(Bucket=src_bucket_name, Policy=policy_document)
bucket_name = get_new_bucket()
tag_conditional = {"StringEquals": {
"s3:x-amz-metadata-directive" : "COPY"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:PutObject",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-metadata-directive": "COPY"}))
alt_client.meta.events.register('before-call.s3.CopyObject', lf)
copy_source = {'Bucket': src_bucket_name, 'Key': 'public/foo'}
alt_client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='new_foo')
# This is possible because we are still the owner, see the grants with
# policy on how to do this right
response = alt_client.get_object(Bucket=bucket_name, Key='new_foo')
body = _get_body(response)
eq(body, 'public/foo')
# remove the x-amz-metadata-directive header
def remove_header(**kwargs):
if ("x-amz-metadata-directive" in kwargs['params']['headers']):
del kwargs['params']['headers']["x-amz-metadata-directive"]
alt_client.meta.events.register('before-call.s3.CopyObject', remove_header)
copy_source = {'Bucket': src_bucket_name, 'Key': 'public/bar'}
check_access_denied(alt_client.copy_object, Bucket=bucket_name, CopySource=copy_source, Key='new_foo2', Metadata={"foo": "bar"})
@attr(resource='object')
@attr(method='put')
@attr(operation='Test put obj with canned-acl not to be public')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_acl():
bucket_name = get_new_bucket()
client = get_client()
# An allow conditional will require atleast the presence of an x-amz-acl
# attribute a Deny conditional would negate any requests that try to set a
# public-read/write acl
conditional = {"StringLike": {
"s3:x-amz-acl" : "public*"
}}
p = Policy()
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
s1 = Statement("s3:PutObject",resource)
s2 = Statement("s3:PutObject", resource, effect="Deny", condition=conditional)
policy_document = p.add_statement(s1).add_statement(s2).to_json()
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
key1 = 'private-key'
# if we want to be really pedantic, we should check that this doesn't raise
# and mark a failure, however if this does raise nosetests would mark this
# as an ERROR anyway
response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1)
#response = alt_client.put_object_acl(Bucket=bucket_name, Key=key1, ACL='private')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
key2 = 'public-key'
lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-acl": "public-read"}))
alt_client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, alt_client.put_object, Bucket=bucket_name, Key=key2, Body=key2)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test put obj with amz-grant back to bucket-owner')
@attr(assertion='success')
@attr('bucket-policy')
def test_bucket_policy_put_obj_grant():
bucket_name = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
# In normal cases a key owner would be the uploader of a key in first case
# we explicitly require that the bucket owner is granted full control over
# the object uploaded by any user, the second bucket is where no such
# policy is enforced meaning that the uploader still retains ownership
main_user_id = get_main_user_id()
alt_user_id = get_alt_user_id()
owner_id_str = "id=" + main_user_id
s3_conditional = {"StringEquals": {
"s3:x-amz-grant-full-control" : owner_id_str
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:PutObject",
resource,
conditions=s3_conditional)
resource = _make_arn_resource("{}/{}".format(bucket_name2, "*"))
policy_document2 = make_json_policy("s3:PutObject", resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
client.put_bucket_policy(Bucket=bucket_name2, Policy=policy_document2)
alt_client = get_alt_client()
key1 = 'key1'
lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-grant-full-control" : owner_id_str}))
alt_client.meta.events.register('before-call.s3.PutObject', lf)
response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
def remove_header(**kwargs):
if ("x-amz-grant-full-control" in kwargs['params']['headers']):
del kwargs['params']['headers']["x-amz-grant-full-control"]
alt_client.meta.events.register('before-call.s3.PutObject', remove_header)
key2 = 'key2'
response = alt_client.put_object(Bucket=bucket_name2, Key=key2, Body=key2)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
acl1_response = client.get_object_acl(Bucket=bucket_name, Key=key1)
# user 1 is trying to get acl for the object from user2 where ownership
# wasn't transferred
check_access_denied(client.get_object_acl, Bucket=bucket_name2, Key=key2)
acl2_response = alt_client.get_object_acl(Bucket=bucket_name2, Key=key2)
eq(acl1_response['Grants'][0]['Grantee']['ID'], main_user_id)
eq(acl2_response['Grants'][0]['Grantee']['ID'], alt_user_id)
@attr(resource='object')
@attr(method='put')
@attr(operation='Deny put obj requests without encryption')
@attr(assertion='success')
@attr('encryption')
@attr('bucket-policy')
# TODO: remove this 'fails_on_rgw' once I get the test passing
@attr('fails_on_rgw')
def test_bucket_policy_put_obj_enc():
bucket_name = get_new_bucket()
client = get_v2_client()
deny_incorrect_algo = {
"StringNotEquals": {
"s3:x-amz-server-side-encryption": "AES256"
}
}
deny_unencrypted_obj = {
"Null" : {
"s3:x-amz-server-side-encryption": "true"
}
}
p = Policy()
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
policy_document = p.add_statement(s1).add_statement(s2).to_json()
boto3.set_stream_logger(name='botocore')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
key1_str ='testobj'
#response = client.get_bucket_policy(Bucket=bucket_name)
#print response
check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
sse_client_headers = {
'x-amz-server-side-encryption' : 'AES256',
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
#TODO: why is this a 400 and not passing, it appears boto3 is not parsing the 200 response the rgw sends back properly
# DEBUGGING: run the boto2 and compare the requests
# DEBUGGING: try to run this with v2 auth (figure out why get_v2_client isn't working) to make the requests similar to what boto2 is doing
# DEBUGGING: try to add other options to put_object to see if that makes the response better
client.put_object(Bucket=bucket_name, Key=key1_str)
@attr(resource='object')
@attr(method='put')
@attr(operation='put obj with RequestObjectTag')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_policy_put_obj_request_obj_tag():
bucket_name = get_new_bucket()
client = get_client()
tag_conditional = {"StringEquals": {
"s3:RequestObjectTag/security" : "public"
}}
p = Policy()
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
s1 = Statement("s3:PutObject", resource, effect="Allow", condition=tag_conditional)
policy_document = p.add_statement(s1).to_json()
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
key1_str ='testobj'
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
headers = {"x-amz-tagging" : "security=public"}
lf = (lambda **kwargs: kwargs['params']['headers'].update(headers))
client.meta.events.register('before-call.s3.PutObject', lf)
#TODO: why is this a 400 and not passing
alt_client.put_object(Bucket=bucket_name, Key=key1_str, Body=key1_str)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on get object acl')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_get_obj_acl_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObjectAcl",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset3 = []
tagset3.append({'Key': 'security1', 'Value': 'public'})
input_tagset = {'TagSet': tagset3}
response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object_acl(Bucket=bucket_name, Key='publictag')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# A get object itself should fail since we allowed only GetObjectTagging
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with defalut retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_lock():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
response = client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'COMPLIANCE',
'Years':1
}
}}
response = client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_bucket_versioning(Bucket=bucket_name)
eq(response['Status'], 'Enabled')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with bucket object lock not enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'InvalidBucketState')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with days and years')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_with_days_and_years():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1,
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid days')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_days():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':0
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRetentionPeriod')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid years')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_years():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Years':-1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRetentionPeriod')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid mode')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_years():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'abc',
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'governance',
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid status')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_status():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Disabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
attr(resource='bucket')
@attr(method='put')
@attr(operation='Test suspend versioning when object lock enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_suspend_versioning():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
e = assert_raises(ClientError, client.put_bucket_versioning, Bucket=bucket_name, VersioningConfiguration={'Status': 'Suspended'})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'InvalidBucketState')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object lock')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_obj_lock():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
response = client.get_object_lock_configuration(Bucket=bucket_name)
eq(response['ObjectLockConfiguration'], conf)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object lock with bucket object lock not enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_get_obj_lock_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_object_lock_configuration, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'ObjectLockConfigurationNotFoundError')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test put object retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
response = client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention with bucket object lock not enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_retention_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention with invalid mode')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_retention_invalid_mode():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
retention = {'Mode':'governance', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
retention = {'Mode':'abc', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_obj_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object retention with invalid bucket')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_get_obj_retention_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
e = assert_raises(ClientError, client.get_object_retention, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention with version id')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_versionid():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id, Retention=retention)
response = client.get_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to override default retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_override_default_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to increase retention period')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_increase_period():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention1 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention1)
retention2 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention2)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention2)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to shorten period')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_retention_shorten_period():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to shorten period with bypass header')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_shorten_period_bypass():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention, BypassGovernanceRetention=True)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='Test delete object with retention')
@attr(assertion='retention period make effects')
@attr('object-lock')
def test_object_lock_delete_object_with_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_legal_hold():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold with invalid bucket')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_legal_hold_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold with invalid status')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_legal_hold_invalid_status():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'abc'}
e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get legal hold')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_legal_hold():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
response = client.get_object_legal_hold(Bucket=bucket_name, Key=key)
eq(response['LegalHold'], legal_hold)
legal_hold_off = {'Status': 'OFF'}
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold_off)
response = client.get_object_legal_hold(Bucket=bucket_name, Key=key)
eq(response['LegalHold'], legal_hold_off)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get legal hold with invalid bucket')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_get_legal_hold_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
e = assert_raises(ClientError, client.get_object_legal_hold, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='Test delete object with legal hold on')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_delete_object_with_legal_hold_on():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'ON'})
e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='Test delete object with legal hold off')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_delete_object_with_legal_hold_off():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'OFF'})
response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object metadata')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_obj_metadata():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
response = client.head_object(Bucket=bucket_name, Key=key)
print response
eq(response['ObjectLockMode'], retention['Mode'])
eq(response['ObjectLockRetainUntilDate'], retention['RetainUntilDate'])
eq(response['ObjectLockLegalHoldStatus'], legal_hold['Status'])
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold and retention when uploading object')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_uploading_obj():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
ObjectLockRetainUntilDate=datetime.datetime(2030,1,1,tzinfo=pytz.UTC), ObjectLockLegalHoldStatus='ON')
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ObjectLockMode'], 'GOVERNANCE')
eq(response['ObjectLockRetainUntilDate'], datetime.datetime(2030,1,1,tzinfo=pytz.UTC))
eq(response['ObjectLockLegalHoldStatus'], 'ON')
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-match: the latest ETag')
@attr(assertion='succeeds')
def test_copy_object_ifmatch_good():
bucket_name = get_new_bucket()
client = get_client()
resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch=resp['ETag'], Key='bar')
resp = client.get_object(Bucket=bucket_name, Key='bar')
eq(resp['Body'].read(), 'bar')
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-match: bogus ETag')
@attr(assertion='fails 412')
def test_copy_object_ifmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch='ABCORZ', Key='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-none-match: the latest ETag')
@attr(assertion='fails 412')
def test_copy_object_ifnonematch_good():
bucket_name = get_new_bucket()
client = get_client()
resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch=resp['ETag'], Key='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-none-match: bogus ETag')
@attr(assertion='succeeds')
def test_copy_object_ifnonematch_failed():
bucket_name = get_new_bucket()
client = get_client()
resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch='ABCORZ', Key='bar')
resp = client.get_object(Bucket=bucket_name, Key='bar')
eq(resp['Body'].read(), 'bar')
|
test_state.py | # -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import os
import shutil
import tempfile
import textwrap
import threading
import time
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.paths import TMP, FILES
from tests.support.mixins import SaltReturnAssertsMixin
# Import salt libs
import salt.utils
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
# Import 3rd-party libs
import salt.ext.six as six
DEFAULT_ENDING = salt.utils.to_bytes(os.linesep)
def trim_line_end(line):
'''
Remove CRLF or LF from the end of line.
'''
if line[-2:] == salt.utils.to_bytes('\r\n'):
return line[:-2]
elif line[-1:] == salt.utils.to_bytes('\n'):
return line[:-1]
raise Exception("Invalid line ending")
def reline(source, dest, force=False, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
fp, tmp = tempfile.mkstemp()
os.close(fp)
with salt.utils.fopen(tmp, 'wb') as tmp_fd:
with salt.utils.fopen(source, 'rb') as fd:
lines = fd.readlines()
for line in lines:
line_noend = trim_line_end(line)
tmp_fd.write(line_noend + ending)
if os.path.exists(dest) and force:
os.remove(dest)
os.rename(tmp, dest)
class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the state module
'''
maxDiff = None
def setUp(self):
super(StateModuleTest, self).setUp()
destpath = os.path.join(FILES, 'file', 'base', 'testappend', 'firstif')
reline(destpath, destpath, force=True)
destpath = os.path.join(FILES, 'file', 'base', 'testappend', 'secondif')
reline(destpath, destpath, force=True)
def test_show_highstate(self):
'''
state.show_highstate
'''
high = self.run_function('state.show_highstate')
destpath = os.path.join(TMP, 'testfile')
self.assertTrue(isinstance(high, dict))
self.assertTrue(destpath in high)
self.assertEqual(high[destpath]['__env__'], 'base')
def test_show_lowstate(self):
'''
state.show_lowstate
'''
low = self.run_function('state.show_lowstate')
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
def test_catch_recurse(self):
'''
state.show_sls used to catch a recursive ref
'''
err = self.run_function('state.sls', mods='recurse_fail')
self.assertIn('recursive', err[0])
def test_no_recurse(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok')
self.assertIn('snmpd', sls)
def test_no_recurse_two(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok_two')
self.assertIn('/etc/nagios/nrpe.cfg', sls)
def test_running_dictionary_consistency(self):
'''
Test the structure of the running dictionary so we don't change it
without deprecating/documenting the change
'''
running_dict_fields = [
'__id__',
'__run_num__',
'__sls__',
'changes',
'comment',
'duration',
'name',
'result',
'start_time',
]
sls = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
for state, ret in sls.items():
for field in running_dict_fields:
self.assertIn(field, ret)
def test_running_dictionary_key_sls(self):
'''
Ensure the __sls__ key is either null or a string
'''
sls1 = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
sls2 = self.run_function('state.sls', mods='gndn')
for state, ret in sls1.items():
self.assertTrue(isinstance(ret['__sls__'], type(None)))
for state, ret in sls2.items():
self.assertTrue(isinstance(ret['__sls__'], str))
def _remove_request_cache_file(self):
'''
remove minion state request file
'''
cache_file = os.path.join(self.get_config('minion')['cachedir'], 'req_state.p')
if os.path.exists(cache_file):
os.remove(cache_file)
def test_request(self):
'''
verify sending a state request to the minion(s)
'''
self._remove_request_cache_file()
ret = self.run_function('state.request', mods='modules.state.requested')
result = ret['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_check_request(self):
'''
verify checking a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.check_request')
result = ret['default']['test_run']['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_clear_request(self):
'''
verify clearing a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.clear_request')
self.assertTrue(ret)
def test_run_request_succeeded(self):
'''
verify running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
if salt.utils.is_windows():
self.run_function('state.request', mods='modules.state.requested_win')
else:
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.run_request')
if salt.utils.is_windows():
key = 'cmd_|-count_root_dir_contents_|-Get-ChildItem C:\\\\ | Measure-Object | %{$_.Count}_|-run'
else:
key = 'cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run'
result = ret[key]['result']
self.assertTrue(result)
def test_run_request_failed_no_request_staged(self):
'''
verify not running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
self.run_function('state.clear_request')
ret = self.run_function('state.run_request')
self.assertEqual(ret, {})
def test_issue_1896_file_append_source(self):
'''
Verify that we can append a file's contents
'''
testfile = os.path.join(TMP, 'test.append')
if os.path.isfile(testfile):
os.unlink(testfile)
ret = self.run_function('state.sls', mods='testappend')
self.assertSaltTrueReturn(ret)
ret = self.run_function('state.sls', mods='testappend.step-1')
self.assertSaltTrueReturn(ret)
ret = self.run_function('state.sls', mods='testappend.step-2')
self.assertSaltTrueReturn(ret)
with salt.utils.fopen(testfile, 'r') as fp_:
testfile_contents = fp_.read()
contents = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.is_windows():
new_contents = contents.splitlines()
contents = os.linesep.join(new_contents)
contents += os.linesep
self.assertMultiLineEqual(
contents, testfile_contents)
# Re-append switching order
ret = self.run_function('state.sls', mods='testappend.step-2')
self.assertSaltTrueReturn(ret)
ret = self.run_function('state.sls', mods='testappend.step-1')
self.assertSaltTrueReturn(ret)
with salt.utils.fopen(testfile, 'r') as fp_:
testfile_contents = fp_.read()
self.assertMultiLineEqual(contents, testfile_contents)
def test_issue_1876_syntax_error(self):
'''
verify that we catch the following syntax error::
/tmp/salttest/issue-1876:
file:
- managed
- source: salt://testfile
file.append:
- text: foo
'''
testfile = os.path.join(TMP, 'issue-1876')
sls = self.run_function('state.sls', mods='issue-1876')
self.assertIn(
'ID \'{0}\' in SLS \'issue-1876\' contains multiple state '
'declarations of the same type'.format(testfile),
sls
)
def test_issue_1879_too_simple_contains_check(self):
expected = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.is_windows():
new_contents = expected.splitlines()
expected = os.linesep.join(new_contents)
expected += os.linesep
testfile = os.path.join(TMP, 'issue-1879')
# Delete if exiting
if os.path.isfile(testfile):
os.unlink(testfile)
# Create the file
ret = self.run_function('state.sls', mods='issue-1879', timeout=120)
self.assertSaltTrueReturn(ret)
# The first append
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
# The second append
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
# Does it match?
try:
with salt.utils.fopen(testfile, 'r') as fp_:
contents = fp_.read()
self.assertMultiLineEqual(expected, contents)
# Make sure we don't re-append existing text
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
with salt.utils.fopen(testfile, 'r') as fp_:
contents = fp_.read()
self.assertMultiLineEqual(expected, contents)
except Exception:
if os.path.exists(testfile):
shutil.copy(testfile, testfile + '.bak')
raise
finally:
if os.path.exists(testfile):
os.unlink(testfile)
def test_include(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'to-include-test', 'exclude-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='include-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['to-include-test']))
self.assertFalse(os.path.isfile(pillar['exclude-test']))
def test_exclude(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'exclude-test', 'to-include-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='exclude-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['exclude-test']))
self.assertFalse(os.path.isfile(pillar['to-include-test']))
@skipIf(salt.utils.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
def test_issue_2068_template_str(self):
venv_dir = os.path.join(
TMP, 'issue-2068-template-str'
)
try:
ret = self.run_function(
'state.sls', mods='issue-2068-template-str-no-dot',
timeout=120
)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str-no-dot.sls'
)
with salt.utils.fopen(template_path, 'r') as fp_:
template = fp_.read()
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now the problematic #2068 including dot's
ret = self.run_function(
'state.sls', mods='issue-2068-template-str', timeout=120
)
self.assertSaltTrueReturn(ret)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str.sls'
)
with salt.utils.fopen(template_path, 'r') as fp_:
template = fp_.read()
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
def test_template_invalid_items(self):
TEMPLATE = textwrap.dedent('''\
{0}:
- issue-2068-template-str
/tmp/test-template-invalid-items:
file:
- managed
- source: salt://testfile
''')
for item in ('include', 'exclude', 'extends'):
ret = self.run_function(
'state.template_str', [TEMPLATE.format(item)]
)
self.assertTrue(isinstance(ret, list))
self.assertNotEqual(ret, [])
self.assertEqual(
['The \'{0}\' declaration found on \'<template-str>\' is '
'invalid when rendering single templates'.format(item)],
ret
)
def test_pydsl(self):
'''
Test the basics of the pydsl
'''
ret = self.run_function('state.sls', mods='pydsl-1')
self.assertSaltTrueReturn(ret)
def test_issues_7905_and_8174_sls_syntax_error(self):
'''
Call sls file with yaml syntax error.
Ensure theses errors are detected and presented to the user without
stack traces.
'''
ret = self.run_function('state.sls', mods='syntax.badlist')
self.assertEqual(ret, [
'State \'A\' in SLS \'syntax.badlist\' is not formed as a list'
])
ret = self.run_function('state.sls', mods='syntax.badlist2')
self.assertEqual(ret, [
'State \'C\' in SLS \'syntax.badlist2\' is not formed as a list'
])
def test_requisites_mixed_require_prereq_use(self):
'''
Call sls file containing several requisites.
'''
expected_simple_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True}
}
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B third" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True}
}
expected_req_use_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 4,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 5,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 2,
'comment': 'Command "echo E" run',
'result': True,
'changes': True},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 3,
'comment': 'Command "echo F" run',
'result': True,
'changes': True}
}
ret = self.run_function('state.sls', mods='requisites.mixed_simple')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_simple_result, result)
# test Traceback recursion prereq+require #8785
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error2')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v2
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error3')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v3
# TODO: this is actually failing badly, and expected result is maybe not a recursion
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error4')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# undetected infinite loopS prevents this test from running...
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.mixed_complex1')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result, result)
def test_watch_in(self):
'''
test watch_in requisite when there is a success
'''
ret = self.run_function('state.sls', mods='requisites.watch_in')
changes = 'test_|-return_changes_|-return_changes_|-succeed_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(ret[changes]['__run_num__'], 0)
self.assertEqual(ret[watch]['__run_num__'], 2)
self.assertEqual('Watch statement fired.', ret[watch]['comment'])
self.assertEqual('Something pretended to change',
ret[changes]['changes']['testing']['new'])
def test_watch_in_failure(self):
'''
test watch_in requisite when there is a failure
'''
ret = self.run_function('state.sls', mods='requisites.watch_in_failure')
fail = 'test_|-return_changes_|-return_changes_|-fail_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(False, ret[fail]['result'])
self.assertEqual('One or more requisite failed: requisites.watch_in_failure.return_changes',
ret[watch]['comment'])
def normalize_ret(self, ret):
'''
Normalize the return to the format that we'll use for result checking
'''
result = {}
for item, descr in six.iteritems(ret):
result[item] = {
'__run_num__': descr['__run_num__'],
'comment': descr['comment'],
'result': descr['result'],
'changes': descr['changes'] != {} # whether there where any changes
}
return result
def test_requisites_require_ordering_and_errors(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' foobar: A\n',
'result': False,
'changes': False,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 7,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
ret = self.run_function('state.sls', mods='requisites.require_error1')
self.assertEqual(ret, [
"Cannot extend ID 'W' in 'base:requisites.require_error1'. It is not part of the high state.\nThis is likely due to a missing include statement or an incorrectly typed ID.\nEnsure that a state with an ID of 'W' is available\nin environment 'base' and to SLS 'requisites.require_error1'"
])
# issue #8235
# FIXME: Why is require enforcing list syntax while require_in does not?
# And why preventing it?
# Currently this state fails, should return C/B/A
result = {}
ret = self.run_function('state.sls', mods='requisites.require_simple_nolist')
self.assertEqual(ret, [
'The require statement in state \'B\' in SLS '
+ '\'requisites.require_simple_nolist\' needs to be formed as a list'
])
# commented until a fix is made for issue #8772
# TODO: this test actually fails
#ret = self.run_function('state.sls', mods='requisites.require_error2')
#self.assertEqual(ret, [
# 'Cannot extend state foobar for ID A in "base:requisites.require_error2".'
# + ' It is not part of the high state.'
#])
ret = self.run_function('state.sls', mods='requisites.require_recursion_error1')
self.assertEqual(
ret,
['A recursive requisite was found, SLS "requisites.require_recursion_error1" ID "B" ID "A"']
)
def test_requisites_full_sls(self):
'''
Teste the sls special command in requisites
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.fullsls_require')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result, result)
# issue #8233: traceback on prereq sls
# TODO: not done
#ret = self.run_function('state.sls', mods='requisites.fullsls_prereq')
#self.assertEqual(['sls command can only be used with require requisite'], ret)
def test_requisites_prereq_simple_ordering_and_errors(self):
'''
Call sls file containing several prereq_in and prereq.
Ensure that some of them are failing and that the order is right.
'''
expected_result_simple = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False},
'cmd_|-J_|-echo J_|-run': {
'__run_num__': 4,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n',
'result': False,
'changes': False}
}
expected_result_simple2 = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 3,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 4,
'comment': 'Command "echo E" run',
'result': True,
'changes': True}
}
expected_result_simple3 = {
'cmd_|-A_|-echo A first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo A first" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-wait': {
'__run_num__': 2,
'comment': '',
'result': True,
'changes': False,
}
}
expected_result_complex = {
'cmd_|-A_|-echo A fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A fourth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D third" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.prereq_simple')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple, result)
# same test, but not using lists in yaml syntax
# TODO: issue #8235, prereq ignored when not used in list syntax
# Currently fails badly with :
# TypeError encountered executing state.sls: string indices must be integers, not str.
#expected_result_simple.pop('cmd_|-I_|-echo I_|-run')
#expected_result_simple.pop('cmd_|-J_|-echo J_|-run')
#ret = self.run_function('state.sls', mods='requisites.prereq_simple_nolist')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result_simple, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple2')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple2, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple3')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple3, result)
#ret = self.run_function('state.sls', mods='requisites.prereq_error_nolist')
#self.assertEqual(
# ret,
# ['Cannot extend ID Z in "base:requisites.prereq_error_nolist".'
# + ' It is not part of the high state.']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error1')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error2')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: C\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_complex')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_complex, result)
# issue #8210 : prereq recursion undetected
# TODO: this test fails
#ret = self.run_function('state.sls', mods='requisites.prereq_recursion_error')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_recursion_error" ID "B" ID "A"']
#)
def test_infinite_recursion_sls_prereq(self):
ret = self.run_function('state.sls', mods='requisites.prereq_sls_infinite_recursion')
self.assertSaltTrueReturn(ret)
def test_requisites_use(self):
'''
Call sls file containing several use_in and use.
'''
# TODO issue #8235 & #8774 some examples are still commented in the test file
ret = self.run_function('state.sls', mods='requisites.use')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif execution failed')
# TODO: issue #8802 : use recursions undetected
# issue is closed as use does not actually inherit requisites
# if chain-use is added after #8774 resolution theses tests would maybe become useful
#ret = self.run_function('state.sls', mods='requisites.use_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "B" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_recursion2')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion2"'
# + ' ID "C" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_auto_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "A" ID "A"'
#])
def test_get_file_from_env_in_top_match(self):
tgt = os.path.join(TMP, 'prod-cheese-file')
try:
ret = self.run_function(
'state.highstate', minion_tgt='sub_minion'
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(tgt))
with salt.utils.fopen(tgt, 'r') as cheese:
data = cheese.read()
self.assertIn('Gromit', data)
self.assertIn('Comte', data)
finally:
if os.path.islink(tgt):
os.unlink(tgt)
# onchanges tests
def test_onchanges_requisite(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# First, test the result of the state run when changes are expected to happen
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_multiple(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls',
mods='requisites.onchanges_multiple')
# First, test the result of the state run when two changes are expected to happen
test_data = state_run['cmd_|-test_two_changing_states_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when two changes are not expected to happen
test_data = state_run['cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
# Finally, test the result of the state run when only one of the onchanges requisites changes.
test_data = state_run['cmd_|-test_one_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_in_requisite(self):
'''
Tests a simple state using the onchanges_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_in_simple')
# First, test the result of the state run of when changes are expected to happen
test_data = state_run['cmd_|-test_changes_expected_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_changes_not_expected_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_with_duration(self):
'''
Tests a simple state using the onchanges requisite
the state will not run but results will include duration
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# Then, test the result of the state run when changes are not expected to happen
# and ensure duration is included in the results
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
# onfail tests
def test_onfail_requisite(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_multiple_onfail_requisite(self):
'''
test to ensure state is run even if only one
of the onfails fails. This is a test for the issue:
https://github.com/saltstack/salt/issues/22370
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple')
retcode = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['stdout']
self.assertEqual(stdout, 'itworked')
def test_onfail_in_requisite(self):
'''
Tests a simple state using the onfail_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_in_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_with_duration(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
def test_multiple_onfail_requisite_with_required(self):
'''
test to ensure multiple states are run
when specified as onfails for a single state.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required')
retcode = state_run['cmd_|-b_|-echo b_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-c_|-echo c_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-d_|-echo d_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-b_|-echo b_|-run']['changes']['stdout']
self.assertEqual(stdout, 'b')
stdout = state_run['cmd_|-c_|-echo c_|-run']['changes']['stdout']
self.assertEqual(stdout, 'c')
stdout = state_run['cmd_|-d_|-echo d_|-run']['changes']['stdout']
self.assertEqual(stdout, 'd')
def test_multiple_onfail_requisite_with_required_no_run(self):
'''
test to ensure multiple states are not run
when specified as onfails for a single state
which fails.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required_no_run')
expected = 'State was not run because onfail req did not change'
stdout = state_run['cmd_|-b_|-echo b_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-c_|-echo c_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-d_|-echo d_|-run']['comment']
self.assertEqual(stdout, expected)
# listen tests
def test_listen_requisite(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite(self):
'''
Tests a simple state using the listen_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# Test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listen_in_resolution_|-echo "Successful listen_in resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_resolution(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# Both listeners are expected to trigger
listener_state = 'cmd_|-listener_test_listening_resolution_one_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
listener_state = 'cmd_|-listener_test_listening_resolution_two_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_in_requisite_resolution_names(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_listen_requisite_resolution_names(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_issue_30820_requisite_in_match_by_name(self):
'''
This tests the case where a requisite_in matches by name instead of ID
See https://github.com/saltstack/salt/issues/30820 for more info
'''
state_run = self.run_function(
'state.sls',
mods='requisites.requisite_in_match_by_name'
)
bar_state = 'cmd_|-bar state_|-echo bar_|-wait'
self.assertIn(bar_state, state_run)
self.assertEqual(state_run[bar_state]['comment'],
'Command "echo bar" run')
def test_retry_option_defaults(self):
'''
test the retry option on a simple state with defaults
ensure comment is as expected
ensure state duration is greater than default retry_interval (30 seconds)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_defaults'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Specified path /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 30)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_custom(self):
'''
test the retry option on a simple state with custom retry values
ensure comment is as expected
ensure state duration is greater than custom defined interval * (retries - 1)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_custom'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Attempt 2: Returned a result of "False", with the following comment: "Specified'
' path /path/to/a/non-existent/file.txt does not exist"\nAttempt 3: Returned'
' a result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nAttempt 4: Returned a'
' result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nSpecified path'
' /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 40)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_success(self):
'''
test a state with the retry option that should return True immedietly (i.e. no retries)
'''
testfile = os.path.join(TMP, 'retry_file')
state_run = self.run_function(
'state.sls',
mods='retry.retry_success'
)
os.unlink(testfile)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertNotIn('Attempt', state_run[retry_state]['comment'])
def run_create(self):
'''
helper function to wait 30 seconds and then create the temp retry file
'''
testfile = os.path.join(TMP, 'retry_file')
time.sleep(30)
open(testfile, 'a').close() # pylint: disable=resource-leakage
def test_retry_option_eventual_success(self):
'''
test a state with the retry option that should return True after at least 4 retry attmempt
but never run 15 attempts
'''
testfile = os.path.join(TMP, 'retry_file')
create_thread = threading.Thread(target=self.run_create)
create_thread.start()
state_run = self.run_function(
'state.sls',
mods='retry.retry_success2'
)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertIn('Attempt 1:', state_run[retry_state]['comment'])
self.assertIn('Attempt 2:', state_run[retry_state]['comment'])
self.assertIn('Attempt 3:', state_run[retry_state]['comment'])
self.assertIn('Attempt 4:', state_run[retry_state]['comment'])
self.assertNotIn('Attempt 15:', state_run[retry_state]['comment'])
self.assertEqual(state_run[retry_state]['result'], True)
def test_issue_38683_require_order_failhard_combination(self):
'''
This tests the case where require, order, and failhard are all used together in a state definition.
Previously, the order option, which used in tandem with require and failhard, would cause the state
compiler to stacktrace. This exposed a logic error in the ``check_failhard`` function of the state
compiler. With the logic error resolved, this test should now pass.
See https://github.com/saltstack/salt/issues/38683 for more information.
'''
state_run = self.run_function(
'state.sls',
mods='requisites.require_order_failhard_combo'
)
state_id = 'test_|-b_|-b_|-fail_with_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'], 'Failure!')
self.assertFalse(state_run[state_id]['result'])
def test_issue_46762_prereqs_on_a_state_with_unfulfilled_requirements(self):
'''
This tests the case where state C requires state A, which fails.
State C is a pre-required state for State B.
Since state A fails, state C will not run because the requisite failed,
therefore state B will not run because state C failed to run.
See https://github.com/saltstack/salt/issues/46762 for
more information.
'''
state_run = self.run_function(
'state.sls',
mods='issue-46762'
)
state_id = 'test_|-a_|-a_|-fail_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Failure!')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-b_|-b_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.c')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-c_|-c_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.a')
self.assertFalse(state_run[state_id]['result'])
def test_state_nonbase_environment(self):
'''
test state.sls with saltenv using a nonbase environment
with a salt source
'''
file_name = os.path.join(TMP, 'nonbase_env')
state_run = self.run_function(
'state.sls',
mods='non-base-env',
saltenv='prod'
)
state_id = 'file_|-test_file_|-{0}_|-managed'.format(file_name)
self.assertEqual(state_run[state_id]['comment'],
'File {0} updated'.format(file_name))
self.assertTrue(
state_run['file_|-test_file_|-{0}_|-managed'.format(file_name)]['result'])
self.assertTrue(os.path.isfile(file_name))
def tearDown(self):
nonbase_file = os.path.join(TMP, 'nonbase_env')
if os.path.isfile(nonbase_file):
os.remove(nonbase_file)
|
api_handler.py | import cgi
import json
import logging
from functools import partial
from http.server import BaseHTTPRequestHandler
from typing import Optional
from src.notifier.notify_manager import NotifyManager
from src.notifier import Event, EventType, EventPriority, EventService
import http.server
import socketserver
import threading
PORT = 8925
class RequestHandler(BaseHTTPRequestHandler):
def __init__(self, notify_manager, *args, **kwargs):
self.notify_manager = notify_manager
# BaseHTTPRequestHandler calls do_GET **inside** __init__ !!!
# So we have to call super().__init__ after setting attributes.
super().__init__(*args, **kwargs)
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_HEAD(self):
self._set_headers()
# GET sends back a Hello world message
def do_GET(self):
self._set_headers()
self.wfile.write(json.dumps({'hello': 'world', 'received': 'ok'}).encode('utf-8'))
# POST echoes the message adding a JSON field
def do_POST(self):
ctype, pdict = cgi.parse_header(self.headers['content-type'])
# refuse to receive non-json content
if ctype != 'application/json':
self.send_response(400)
self.end_headers()
return
# read the message and convert it into a python dictionary
length = int(self.headers['content-length'])
message = json.loads(self.rfile.read(length))
if 'type' in message:
event_type = EventType[message['type'].upper()]
else:
self.send_response(400)
self.end_headers()
return
if 'priority' in message:
event_priority = EventPriority[message['priority'].upper()]
else:
self.send_response(400)
self.end_headers()
return
if 'service' in message:
event_service = EventService[message['service'].upper()]
else:
self.send_response(400)
self.end_headers()
return
if not 'message' in message:
self.send_response(400)
self.end_headers()
return
event = Event(type=event_type, priority=event_priority, service=event_service, message=message['message'])
self.notify_manager.process_events([event])
# send the message back
self._set_headers()
self.wfile.write("Event received and notifications sent.".encode('utf-8'))
class ApiHandler():
def __init__(self, notify_manager: NotifyManager):
self._notify_manager = notify_manager
handler = partial(RequestHandler, notify_manager)
self.httpd = socketserver.TCPServer(("", PORT), handler)
self.thread = threading.Thread(target=self.start_server)
self.thread.start()
def start_server(self):
logging.info("Starting API event receiver on port 8925 within the container only.")
try:
self.httpd.serve_forever()
except KeyboardInterrupt:
pass
finally:
# Clean-up server (close socket, etc.)
self.httpd.server_close()
def stop_server(self):
logging.info("Stopping API event receiver on port 8925 within the container only.")
self.httpd.shutdown()
self.thread.join()
|
fifo_queue_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class FIFOQueueTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueHalf(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
@test_util.run_in_graph_and_eager_modes
def testMultipleDequeues(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue_many([[1, 2, 3]]))
a, b, c = self.evaluate([q.dequeue(), q.dequeue(), q.dequeue()])
self.assertAllEqual(set([1, 2, 3]), set([a, b, c]))
@test_util.run_in_graph_and_eager_modes
def testQueuesDontShare(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue(1))
q2 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q2.enqueue(2))
self.assertAllEqual(self.evaluate(q2.dequeue()), 2)
self.assertAllEqual(self.evaluate(q.dequeue()), 1)
def testEnqueueDictWithoutNames(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue({"a": 12.0})
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue_many({"a": [12.0, 13.0]})
def testParallelEnqueue(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testDequeueHalf(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(3, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
dequeued_t.op.run()
self.assertEqual(0, size.eval())
def testEnqueueMany(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = dequeued_t.eval()
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], size_t.eval())
enqueue_op.run()
self.assertEqual([0], size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpTo(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError("specified shapes"):
q.dequeue_many(0).eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testDequeueUpToNoBlocking(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueUpToNoBlocking(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
def testHighDimension(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, (4, 4, 4, 4))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), ((),
(2)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongShapeAtRuntime(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (3, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Expected \[3,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongShape(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (3, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,3,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(50, dtypes_lib.float32, shapes=())
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shapes=())
enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = array_ops.placeholder(
dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(
elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shapes=())
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, dequeued_t.eval())
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(
elements_dequeued, elements_dequeued + count, dtype=np.int32)
self.assertAllEqual(expected_range,
dequeuemany_t.eval({
count_placeholder: count
}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.FIFOQueue(100, dtypes_lib.int32, ())
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.FIFOQueue(total_count, dtypes_lib.int32, ())
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
self.assertAllEqual(elems[3:], sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(4, (dtypes_lib.float32, dtypes_lib.float32), (
(), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([50.0], dequeued_t.eval())
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
time.sleep(0.01)
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([60.0], dequeued_t.eval())
# Make sure the thread finishes before exiting.
thread.join()
def testBlockingEnqueueBeforeClose(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, dequeued_t.eval())
def testDoesNotLoseValue(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(1, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.test_session():
q1 = data_flow_ops.FIFOQueue(
1, dtypes_lib.float32, shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.FIFOQueue(
1, dtypes_lib.float32, shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_f")
q_f_2 = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(data_flow_ops.FIFOQueue(10, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.FIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
q2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32)
enq_q = data_flow_ops.FIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = data_flow_ops.FIFOQueue(5, dtypes_lib.float32, ())
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = data_flow_ops.FIFOQueue(5, dtypes_lib.float32)
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.test_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.uint16, dtypes_lib.bool, dtypes_lib.complex64,
dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.FIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = sess.run(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testDequeueEnqueueFail(self):
with self.test_session() as session:
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
a = q.dequeue()
b = control_flow_ops.Assert(False, ["Before enqueue"])
with ops.control_dependencies([b]):
c = q.enqueue(33)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Before enqueue" in str(e)):
session.run([a, c])
class FIFOQueueDictTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
names=("i", "j"),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "j"], q.names)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
names=("i", "f"),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "f"], q.names)
def testEnqueueDequeueOneComponent(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=((),), names="f")
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue(10.0)
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0,))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"f": 10.0, "s": "aa"})
enqueue_op = q.enqueue({"f": 10.0})
enqueue_op2 = q.enqueue({"f": 20.0})
enqueue_op3 = q.enqueue({"f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many([40.0, 50.0])
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "s": ["aa", "bb"]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
sess.run(enqueue_op)
sess.run(enqueue_op2)
sess.run(enqueue_op3)
sess.run(enqueue_op4)
f = sess.run(dequeue["f"])
self.assertEqual(10.0, f)
f = sess.run(dequeue_2["f"])
self.assertEqual([20.0, 30.0], list(f))
f = sess.run(dequeue_2["f"])
self.assertEqual([40.0, 50.0], list(f))
def testEnqueueDequeueMultipleComponent(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32, dtypes_lib.string),
shapes=((), (), ()),
names=("f", "i", "s"))
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0, 123, "aa"))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 10.0})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 12, "s": "aa"})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0, "x": 10.0})
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0})
enqueue_op2 = q.enqueue({"i": 124, "s": "bb", "f": 20.0})
enqueue_op3 = q.enqueue({"i": 125, "s": "cc", "f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many(([40.0, 50.0], [126, 127], ["dd", "ee"]))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": [10.0, 20.0]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"i": [12, 12], "s": ["aa", "bb"]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({
"f": [40.0, 50.0],
"i": [126, 127],
"s": ["dd", "ee"],
"x": [1, 2]
})
enqueue_op4 = q.enqueue_many({
"f": [40.0, 50.0],
"i": [126, 127],
"s": ["dd", "ee"]
})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
sess.run(enqueue_op)
sess.run(enqueue_op2)
sess.run(enqueue_op3)
sess.run(enqueue_op4)
i, f, s = sess.run([dequeue["i"], dequeue["f"], dequeue["s"]])
self.assertEqual(123, i)
self.assertEqual(10.0, f)
self.assertEqual(compat.as_bytes("aa"), s)
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([124, 125], list(i))
self.assertTrue([20.0, 30.0], list(f))
self.assertTrue([compat.as_bytes("bb"), compat.as_bytes("cc")], list(s))
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([126, 127], list(i))
self.assertTrue([40.0, 50.0], list(f))
self.assertTrue([compat.as_bytes("dd"), compat.as_bytes("ee")], list(s))
class FIFOQueueWithTimeoutTest(test.TestCase):
def testDequeueWithTimeout(self):
with self.test_session(
config=config_pb2.ConfigProto(operation_timeout_in_ms=20)) as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual(
compat.as_bytes(""), q.queue_ref.op.get_attr("container"))
dequeued_t = q.dequeue()
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t)
def testReusableAfterTimeout(self):
with self.test_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
dequeued_t = q.dequeue()
enqueue_op = q.enqueue(37)
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
sess.run(enqueue_op)
self.assertEqual(37, sess.run(dequeued_t))
class QueueContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
with ops.container("test"):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual(
compat.as_bytes("test"), q.queue_ref.op.get_attr("container"))
class FIFOQueueBenchmark(test.Benchmark):
"""Benchmark FIFOQueue operations."""
def _build_graph(self):
"""Builds a graph that enqueues and dequeues a single float.
Returns:
A tuple with the graph init tensor and graph output tensor.
"""
q = data_flow_ops.FIFOQueue(1, "float")
init = q.enqueue(1.0)
x = q.dequeue()
q_inc = q.enqueue(x + 1)
return init, q_inc
# TODO(suharshs): Add benchmarks for:
# - different capacities of the queue
# - various sizes of tensors
# - enqueue_many, dequeue_many
def _run(self, num_iters):
"""Benchmarks enqueueing and dequeueing from a FIFOQueue.
Args:
num_iters: The number of iterations to run.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
init, output = self._build_graph()
with session_lib.Session(graph=graph) as session:
init.run()
_ = session.run(output) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run(output)
duration = time.time() - start_time
print("%f secs per enqueue-dequeue" % (duration / num_iters))
self.report_benchmark(
name="fifo_queue", iters=num_iters, wall_time=duration / num_iters)
return duration
if __name__ == "__main__":
test.main()
|
spideronlinecheck.py | """
精简代码
check账号是否在线
create by judy 2019/01/24
"""
import threading
import traceback
from datacontract import Task, ETokenType, ECommandStatus
from datacontract.apps.appbase import AppConfig
from idownclient.spidermanagent.spidermanagebase import SpiderManagebase
from idownclient.spider.spiderbase import SpiderBase
class SpiderOnlineCheck(SpiderManagebase):
def __init__(self):
SpiderManagebase.__init__(self)
def online_check(self, tsk: Task):
"""
账号在线情况查询,
telegram目前需要一个已登录的内置账号去查询目标账号是否在线
:return:
"""
tokentype = tsk.tokentype
if tokentype is not None:
if tokentype == ETokenType.Cookie:
self._logger.error("Cookie cannot inquiry account online")
self._write_tgback(tsk, ECommandStatus.Failed, "Cookie不能查询账号是否在线")
try:
with self._spider_threads_locker:
if self._spider_dealing_dict.__contains__(tsk):
self._logger.info("{} is processing {}.".format(tsk.batchid, self._spider_dealing_dict[tsk].name))
return
appcfg: AppConfig = self._spideradapter.adapter(tsk)[0]
if not isinstance(appcfg, AppConfig):
self._logger.info("No spider match:\nbatchid:{}\ntasktpe:{}\napptype:{}"
.format(tsk.batchid, tsk.tasktype.name, tsk.apptype))
return
spider: SpiderBase = appcfg._appclass(tsk, self._get_appcfg(appcfg), self._clientid)
t = threading.Thread(target=self._execute_online_check, daemon=True, args=(spider,))
t.start()
with self._spider_threads_locker:
# 用元组存入 插件对象 和 线程对象
self._spider_dealing_dict[tsk] = spider
except Exception:
self._logger.log("Task downloading error: {}".format(traceback.format_exc()))
self._write_tgback(tsk, ECommandStatus.Failed, "执行爬虫插件出错,请检查client环境重试")
return
def _execute_online_check(self, spider: SpiderBase):
try:
res = spider.online_check()
if res:
self._logger.info("Online_check is over,this mission is complete.")
except Exception:
self._logger.error("Execute task error:\nbatchid:{}\nerror:{}"
.format(spider.task.batchid, traceback.format_exc()))
self._write_tgback(spider.task, ECommandStatus.Failed, "执行任务出现不可知错误")
finally:
with self._spider_threads_locker:
if self._spider_dealing_dict.__contains__(spider.task):
self._spider_dealing_dict.pop(spider.task, None)
if spider.task is not None:
if callable(spider.task.on_complete):
spider.task.on_complete(spider.task)
|
new_base.py | # Core
import time
import logging
import threading
from abc import ABC
# External frameworks
import websocket
from blinker import Signal
# Our repo
from utilities import json
from deribit.messages import session
from utilities.id import generate_id
# ####################################################################
# LOGGING
# ####################################################################
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# ####################################################################
# CONSTANTS
# ####################################################################
MAX_STARTUP_TIME = 2.0
REQUEST_MAX_RETRIES = 10
CHECK_INTERVAL = 0.00001
STARTUP_SCALING = 10
REQUEST_TIMEOUT = 5.0
# ####################################################################
# DELTA CLIENT FOR PUNCTUAL REQUEST VIA HTTP GET
# ####################################################################
class WebsocketClient(ABC):
# ##################################################################
# INIT
# ##################################################################
def __init__(self, url, key, secret, name):
self._msg_counter = 0
# Base uri
self.__url = url
# Credentials
self.__key = key
self.__secret = secret
# Name for events
self._name = name or generate_id(4).upper()
# Time to establish the initial connection
self._startup_delay = 10 # in milliseconds
# Properties
self._is_closing = False
# Messages: information about SENT messages
self._sent_messages = {}
self._sent_messages_callbacks = {}
# Messages: information about RECEIVED messages
self._answers_received = {}
# Startup precautions
self._secured_connection = False
# Last unexpected disconnect
self._disconnect_back_off = 0.25
# Events
self._event_on_pong = Signal(f"DELTA-PONG-RECEIVED-{self._name}")
# Handle: on message
def on_message(ws, message):
return self._on_message(ws, message)
self.on_message = on_message
# Handle: on error
def on_error(ws, message):
return self._on_error(ws, message)
self.on_error = on_error
# Handle: on open
def on_open(ws):
return self._on_open(ws)
self.on_open = on_open
# Handle: on close
def on_close(ws):
return self._on_close(ws)
self.on_close = on_close
# Handle: on ping
def on_ping(ws, *args, **kwargs):
return self._on_ping(ws, *args, **kwargs)
self.on_ping = on_ping
# Handle: on pong
def on_pong(ws, *args, **kwargs):
return self._on_pong(ws, *args, **kwargs)
self.on_pong = on_pong
# Websocket
self.__ws = None
def __del__(self):
self._is_closing = True
# ##################################################################
# EVENT HANDLERS
# ##################################################################
def _on_message(self, ws, message):
# Parse the message to python data
message = json.loads(message)
print("**********************************************")
print(message)
print(message)
# This is a heartbeat message, NOT to be propagated
# to the callback provided by the user
if self.is_heartbeat(message):
return self.on_heartbeat(message)
# This is a real message:
# propagate to the user's callback
return self.acknowledge(message)
def _on_error(self, ws, message):
logger.error(message)
def _on_open(self, ws):
logger.info("Opening web socket connection.")
credentials = session.login_message(key=self.__key, secret=self.__secret)
id_ = credentials["id"]
self._sent_messages[id_] = credentials
self._sent_messages_callbacks[id_] = self._on_login
ws.send(json.dumps(credentials))
def _on_login(self, message):
if not "error" in message:
self._secured_connection = True
print("Web socket opened.")
self._disconnect_back_off = 0.25
# Enable heartbeat
enable_heartbeat_msg = session.set_heartbeat_message()
id_ = enable_heartbeat_msg["id"]
self._sent_messages[id_] = enable_heartbeat_msg
self._sent_messages_callbacks[id_] = self.on_heartbeat
self.ws.send(json.dumps(enable_heartbeat_msg))
else:
pass
def _on_close(self, ws):
print("Socket closed.")
# Was supposed to happen
if self._is_closing:
try:
ws.retire()
except:
pass
# Not expected
else:
logger.warning("Connection ended unexpectedly. Reconnecting to web socket server.")
self.__ws = None
# Back off:
self._disconnect_back_off *= 2
time.sleep(min(10.0, self._disconnect_back_off))
self.maybe_reconnect()
def _on_ping(self, ws, message, *args, **kwargs):
print(f"On Ping: {message}")
def _on_pong(self, *args, **kwargs):
print("sending Pong event")
self._event_on_pong.send()
def on_heartbeat(self, message, *arg, **kwargs):
params = message.get("params")
type_ = params.get("type")
# External provider sending us a heartbeat
# Ignore for now
if type_ == "heartbeat":
return
# Exchange asking us for a reply
# to their heartbeat, must hit the 'test' api endpoint
if type_ == "test_request":
hb_resp_msg = session.test_heartbeat_request_message()
# return self.ws.send(json.dumps(hb_resp_msg))
self.ws.send(json.dumps(hb_resp_msg))
# FIXME THIS IS TEST, REMOVE THIS
test_sub_msg = session.subscription_message(channels="quote.BTC-25DEC20")
self.send_request(test_sub_msg, callback=self.just_print)
def just_print(self, msg):
self._msg_counter += 1
if self._msg_counter == 10:
from deribit.messages import mkt_data
test_sub_msg = mkt_data.request_currencies()
self.send_request(test_sub_msg, callback=self.just_print)
elif self._msg_counter > 15:
test_unsub_msg = session.unsubscription_message(channels="quote.BTC-25DEC20")
self.send_request(test_unsub_msg, callback=self.just_print)
print(msg)
# ##################################################################
# RUN
# ##################################################################
def run_forever_on_thread(self, ws, startup_delay):
# Trying to implement heartbeat
kwargs = {"ping_interval": 10, "ping_timeout": 3}
th = threading.Thread(target=ws.run_forever, kwargs=kwargs)
# This is the true version
# kwargs = {"ping_interval": 30, "ping_timeout": 15}
# th = threading.Thread(target=ws.run_forever, kwargs=kwargs)
th.start()
time.sleep(max(0.025, startup_delay / 1000.0))
def maybe_reconnect(self):
if not self.__ws:
ws = websocket.WebSocketApp(self.__url,
on_open=self.on_open,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
on_ping=self.on_ping,
on_pong=self.on_pong)
# Set locally
self.__ws = ws
self.run_forever_on_thread(ws, startup_delay=self._startup_delay)
# ##################################################################
# WEBSOCKET BASIC OPERATIONS
# ##################################################################
@property
def ws(self):
if not self.__ws:
self.maybe_reconnect()
return self.__ws
def send_request(self, message, callback=None):
msg_cb = [(message, callback)]
return self.send_multiple_requests(msg_cb)
# ##################################################################
# ABSTRACT METHODS
# ##################################################################
def send_multiple_requests(self, *args, **kwargs):
raise NotImplementedError()
def acknowledge(self, *args, **kwargs):
raise NotImplementedError()
# ##################################################################
# HEARTBEAT MESSAGE HANDLING
# ##################################################################
def is_heartbeat(self, message):
if message.get("method", False):
return False
params = message.get("params", False)
if params:
return True
return False
|
test_functools.py | import abc
import cosmo
import builtins
import collections
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import time
import unittest
from weakref import proxy
import contextlib
try:
import _thread
import threading
except ImportError:
threading = None
import functools
if __name__ == 'PYOBJ.COM':
import decimal
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
if cosmo.MODE == "dbg":
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
@unittest.skipIf(c_functools, "skip pure-python test if C impl is present")
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
@unittest.skipIf(c_functools, "skip pure-python test if C impl is present")
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(cosmo.MODE.startswith("tiny"),
"No .py files available in Cosmo MODE=tiny")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(cosmo.MODE .startswith("tiny"),
"No .py files available in Cosmo MODE=tiny")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
@unittest.skipIf(cosmo.MODE .startswith("tiny"),
"No .py files available in Cosmo MODE=tiny")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduce(unittest.TestCase):
if c_functools:
func = c_functools.reduce
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.func(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.func(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.func(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.func(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.func(add, Squares(10)), 285)
self.assertEqual(self.func(add, Squares(10), 0), 285)
self.assertEqual(self.func(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.func)
self.assertRaises(TypeError, self.func, 42, 42)
self.assertRaises(TypeError, self.func, 42, 42, 42)
self.assertEqual(self.func(42, "1"), "1") # func is never called with one item
self.assertEqual(self.func(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.func, 42, (42, 42))
self.assertRaises(TypeError, self.func, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.func, add, "")
self.assertRaises(TypeError, self.func, add, ())
self.assertRaises(TypeError, self.func, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.func, add, TestFailingIter())
self.assertEqual(self.func(add, [], None), None)
self.assertEqual(self.func(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.func, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.func(add, SequenceClass(5)), 10)
self.assertEqual(self.func(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.func, add, SequenceClass(0))
self.assertEqual(self.func(add, SequenceClass(0), 42), 42)
self.assertEqual(self.func(add, SequenceClass(1)), 0)
self.assertEqual(self.func(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.func(add, d), "".join(d.keys()))
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
@unittest.skipIf(c_functools, "skip pure-python test if C impl is present")
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=-10, currsize=1))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with support.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with support.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with support.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_early_detection_of_bad_call(self):
# Issue #22184
with self.assertRaises(TypeError):
@functools.lru_cache
def f():
pass
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
@unittest.skipIf(c_functools, "skip pure-python test if C impl is present")
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
@unittest.skipIf(cosmo.MODE in ('tiny', 'rel'),
"no pydocs in rel mode")
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, c.OrderedDict]
for haystack in permutations(bases):
m = mro(c.ChainMap, haystack)
self.assertEqual(m, [c.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(c.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [c.defaultdict, dict, c.Sized, c.Container,
object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(c.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
c.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(c.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, c.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(c.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(c.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
_orig_wkd = functools.WeakKeyDictionary
td = TracingDict()
functools.WeakKeyDictionary = lambda: td
c = collections
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
functools.WeakKeyDictionary = _orig_wkd
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
if __name__ == '__main__':
unittest.main()
|
utils.py | # Copyright (c) 2021. Universidad de Pinar del Rio
# This file is part of SCEIBA (sceiba.cu).
# SCEIBA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
from __future__ import absolute_import, print_function
import enum
import json
import re
import traceback
from datetime import datetime, timedelta
from threading import Thread
from uuid import UUID
import requests
from flask import current_app, jsonify, render_template, request
from flask_mail import Message
from invenio_accounts.models import User
from invenio_cache import current_cache
from invenio_i18n.selectors import get_locale
# def get_sources_by_terms(tids):
# """sources by a list of terms"""
# termsources = TermSources.query.filter(TermSources.term_id in tids).group_by(
# TermSources.sources_id).all()
# result[]
# for ts in termsources:
class IrokoResponseStatus(enum.Enum):
SUCCESS = "success"
FAIL = "fail"
ERROR = "error"
NOT_FOUND = "not found"
class IrokoVocabularyIdentifiers(enum.Enum):
COUNTRIES = 'COUNTRIES'
CUBAN_PROVINCES = 'CUBAN_PROVINCES',
LICENCES = 'LICENCES',
CUBAN_INTITUTIONS = 'CUBAN_INTITUTIONS',
EXTRA_INSTITUTIONS = 'EXTRA_INSTITUTIONS',
SUBJECTS = 'SUBJECTS',
INDEXES = 'INDEXES',
INDEXES_CLASIFICATION = 'INDEXES_CLASIFICATION',
RECOD_SETS = 'RECOD_SETS',
RECORD_TYPES = 'RECORD_TYPES',
def iroko_json_response(status: IrokoResponseStatus, message, data_type, data):
"""recibe la respuesta de marshmallow.dump(model)"""
return jsonify(
{
'status': status.value,
'message': message,
'data': {
data_type: data
}
}
)
# @babel.localeselector
# def get_locale():
# # if a user is logged in, use the locale from the user settings
# user = getattr(g, 'user', None)
# if user is not None:
# return user.locale
# # otherwise try to guess the language from the user accept
# # header the browser transmits. We support de/fr/en in this
# # example. The best match wins.
# return request.accept_languages.best_match(['de', 'fr', 'en'])
def validate_uuid4(uuid_string):
"""
Validate that a UUID string is in
fact a valid uuid4.
Happily, the uuid module does the actual
checking for us.
It is vital that the 'version' kwarg be passed
to the UUID() call, otherwise any 32-character
hex string is considered valid.
"""
try:
val = UUID(uuid_string, version=4)
except ValueError:
# If it's a value error, then the string
# is not a valid hex code for a UUID.
return False
# If the uuid_string is a valid hex code,
# but an invalid uuid4,
# the UUID.__init__ will convert it to a
# valid uuid4. This is bad for validation purposes.
return val.hex == uuid_string or str(val) == uuid_string
def validate_integer(int_string):
try:
aux = int(int_string)
return True
except ValueError as e:
return False
def string_as_identifier(value: str):
return re.sub('\W+|^(?=\d)', '_', value.lower())
def send_async_email(app, msg):
with app.app_context():
app.extensions['mail'].send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
# mail.send(msg) #esto lo mando pero no asincronamente
Thread(target=send_async_email, args=(current_app, msg)).start()
def send_contact_email(name, email, user_message):
language = get_locale().upper()
client_ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
send_email(
'message from sceiba',
sender=current_app.config['SECURITY_EMAIL_SENDER'],
# recipients=current_app.config['ADMINS'],
recipients=email,
text_body=render_template(
'iroko_theme/email/contact_email.txt', name=name, email=email,
user_message=user_message, language=language, ip=client_ip
),
html_body=render_template(
'iroko_theme/email/contact_email.html', name=name, email=email,
user_message=user_message, language=language, ip=client_ip
)
)
def get_default_user():
user = User.query.filter_by(email='rafael.martinez@upr.edu.cu').first()
if not user:
user = User.query.filter_by(email='sceiba.cu@gmail.com').first()
return user
class CuorHelper:
# TODO: investigar como hacer esto mas eficientemente, con redis quizas
org_simple_cache = dict()
@classmethod
def query_cuor_by_pid(cls, pid):
"""Request an Organization by Persistent Identifier
not the CUOR UUID
"""
try:
cache = current_cache.get("query_cuor_by_pid:{0}".format(pid)) or {}
if "date" not in cache:
cache["date"] = datetime.now()
if datetime.now() - cache["date"] < timedelta(days=1) and "org" in cache:
print("USING CACHE ORGANIZATION")
if 'status' in cache["org"] and cache["org"]['status'] == '404':
cache["org"] = None
return None
return cache["org"]
api_endpoint = current_app.config['CUOR_API_ENDPOINT']
session = requests.Session()
url = api_endpoint + '/pid?value=' + pid
response = session.get(url, verify=False)
result = json.loads(response.text)
if 'status' in result and result['status'] == '404':
return None
cache["org"] = result
cache["date"] = datetime.now()
current_cache.set("query_cuor_by_pid:{0}".format(pid), cache, timeout=-1)
return result
except Exception:
return None
@classmethod
def query_cuor_by_uuid(cls, uuid):
""""""
try:
cache = current_cache.get("query_cuor_by_pid:{0}".format(uuid)) or {}
if "date" not in cache:
cache["date"] = datetime.now()
if datetime.now() - cache["date"] < timedelta(days=1) and "org" in cache:
return cache["org"]
api_endpoint = current_app.config['CUOR_API_ENDPOINT']
session = requests.Session()
url = api_endpoint + '/' + uuid
response = session.get(url, verify=False)
result = json.loads(response.text)
cache["org"] = result
cache["date"] = datetime.now()
current_cache.set("query_cuor_by_pid:{0}".format(uuid), cache, timeout=-1)
return result
except Exception:
print(traceback.format_exc())
return None
@classmethod
def query_cuor_by_label(cls, text, country='', state='', types=''):
"""get the fist name found in labels.label"""
try:
api_endpoint = current_app.config['CUOR_API_ENDPOINT']
session = requests.Session()
url = api_endpoint + '?q=labels.label:' + text
if country != '':
url += '&country=' + country
if state != '':
url += '&state=' + state
if types != '':
url += '&types=' + types
response = session.get(url, verify=False)
result = json.loads(response.text)
if 'hits' in result and 'total' in result['hits'] and result['hits']['total'] > 0:
return result['hits']['hits'][0]
except Exception:
return None
@classmethod
def get_if_child(cls, org, uuid):
"""
check if uuid is in relationships of org as child
:param org: Organization dict
:param uuid: uuid of the child to search
:return:
"""
if 'metadata' in org and 'relationships' in org['metadata']:
for rel in org['metadata']['relationships']:
if 'id' in rel and 'type' in rel:
if uuid == rel['id'] and rel['type'] == 'child':
return rel
return None
@classmethod
def get_if_parent(cls, org, uuid):
"""
check if uuid is in relationships of org as parent
:param org: Organization dict
:param uuid: uuid of the parent to search
:return:
"""
if 'metadata' in org and 'relationships' in org['metadata']:
for rel in org['metadata']['relationships']:
if 'id' in rel and 'type' in rel:
if uuid == rel['id'] and rel['type'] == 'parent':
return rel
return None
@classmethod
def get_relationship(cls, org, uuid):
"""
check if uuid is in relationships of org as child
:param org: Organization dict
:param uuid: uuid of the relationship to search
:return:
"""
if 'metadata' in org and 'relationships' in org['metadata']:
for rel in org['metadata']['relationships']:
if 'id' in rel and 'type' in rel:
if uuid == rel['id']:
return rel
return None
@classmethod
def get_relationships(cls, org, rel_type):
"""
return all relationships of a organization
:param org:
:param rel_type:
:return:
"""
result = []
if 'metadata' in org and 'relationships' in org['metadata']:
for rel in org['metadata']['relationships']:
if 'id' in rel and 'type' in rel:
if rel['type'] == rel_type:
result.append(rel)
return result
@classmethod
def get_relationships_parent(cls, org):
return cls.get_relationships(org, 'parent')
@classmethod
def get_relationships_child(cls, org):
return cls.get_relationships(org, 'child')
@classmethod
def append_key_value_to_relationship(cls, org, child_id, relation_type, key, value):
"""
:param org: organization
:param child_id: id of the relation
:param relation_type: type of relation
:param key: key to append
:param value: value to append
:return:
"""
if 'metadata' in org and 'relationships' in org['metadata']:
for rel in org['metadata']['relationships']:
if 'id' in rel and 'type' in rel:
if id == rel['id']:
rel[key] = value
|
log_plotter.py | """
Plotting class to be used by Log.
"""
import time
import numpy as nm
from sfepy.base.base import Output, Struct
def draw_data(ax, xdata, ydata, label, plot_kwargs, swap_axes=False):
"""
Draw log data to a given axes, obeying `swap_axes`.
"""
def _update_plot_kwargs(lines):
plot_kwargs['color'] = lines[0].get_color()
alpha = lines[0].get_alpha()
plot_kwargs['alpha'] = 0.5 if alpha is None else 0.5 * alpha
if not swap_axes:
if nm.isrealobj(ydata):
ax.plot(xdata, ydata, label=label,
**plot_kwargs)
else:
lines = ax.plot(xdata, ydata.real,
label='Re ' + label,
**plot_kwargs)
_update_plot_kwargs(lines)
ax.plot(xdata, ydata.imag,
label='Im ' + label,
**plot_kwargs)
else:
if nm.isrealobj(ydata):
ax.plot(ydata, xdata, label=label,
**plot_kwargs)
else:
lines = ax.plot(ydata.real, xdata,
label='Re ' + label,
**plot_kwargs)
_update_plot_kwargs(lines)
ax.plot(ydata.imag, xdata,
label='Im ' + label,
**plot_kwargs)
class LogPlotter(Struct):
"""
LogPlotter to be used by :class:`sfepy.base.log.Log`.
"""
output = Output('plotter:')
output = staticmethod(output)
def __init__(self, aggregate=100, sleep=1.0):
Struct.__init__(self, aggregate=aggregate, sleep=sleep,
ig=0, ip=0)
def process_command(self, command):
from matplotlib.ticker import LogLocator, AutoLocator
self.output(command[0])
if command[0] == 'ig':
self.ig = command[1]
if command[0] == 'ip':
self.ip = command[1]
elif command[0] == 'plot':
xdata, ydata, plot_kwargs = command[1:]
ig, ip = self.ig, self.ip
ax = self.ax[ig]
ax.set_yscale(self.yscales[ig])
ax.yaxis.grid(True)
draw_data(ax, xdata, ydata, self.data_names[ig][ip], plot_kwargs)
if self.yscales[ig] == 'log':
ymajor_formatter = ax.yaxis.get_major_formatter()
ymajor_formatter.label_minor(True)
yminor_locator = LogLocator()
else:
yminor_locator = AutoLocator()
self.ax[ig].yaxis.set_minor_locator(yminor_locator)
elif command[0] == 'vline':
x, kwargs = command[1:]
self.vlines[self.ig].append((x, kwargs))
elif command[0] == 'clear':
self.ax[self.ig].cla()
elif command[0] == 'legends':
for ig, ax in enumerate(self.ax):
try:
ax.legend()
except:
pass
if self.xlabels[ig]:
ax.set_xlabel(self.xlabels[ig])
if self.ylabels[ig]:
ax.set_ylabel(self.ylabels[ig])
for x, kwargs in self.vlines[ig]:
ax.axvline(x, **kwargs)
try:
self.plt.tight_layout(pad=0.5)
except:
pass
elif command[0] == 'add_axis':
ig, names, yscale, xlabel, ylabel = command[1:]
self.data_names[ig] = names
self.yscales[ig] = yscale
self.xlabels[ig] = xlabel
self.ylabels[ig] = ylabel
self.n_gr = len(self.data_names)
self.make_axes()
elif command[0] == 'save':
self.fig.savefig(command[1])
self.pipe.send(True) # Acknowledge save.
def terminate(self):
if self.ii:
self.output('processed %d commands' % self.ii)
self.output('ended.')
self.plt.close('all')
def poll_draw(self):
while 1:
self.ii = 0
while 1:
if not self.pipe.poll():
break
command = self.pipe.recv()
can_break = False
if command is None:
self.terminate()
return False
elif command[0] == 'continue':
can_break = True
else:
self.process_command(command)
if (self.ii >= self.aggregate) and can_break:
break
self.ii += 1
if self.ii:
self.fig.canvas.draw()
self.output('processed %d commands' % self.ii)
time.sleep(self.sleep)
return True
def make_axes(self):
from sfepy.linalg import cycle
self.fig.clf()
self.ax = []
n_col = min(5.0, nm.fix(nm.sqrt(self.n_gr)))
if int(n_col) == 0:
n_row = 0
else:
n_row = int(nm.ceil(self.n_gr / n_col))
n_col = int(n_col)
for ii, (ir, ic) in enumerate(cycle((n_col, n_row))):
if ii == self.n_gr: break
self.ax.append(self.fig.add_subplot(n_row, n_col, ii + 1))
self.vlines.setdefault(ii, [])
def __call__(self, pipe, log_file, data_names, yscales, xlabels, ylabels):
"""
Sets-up the plotting window, starts a thread calling self.poll_draw()
that does the actual plotting, taking commands out of `pipe`.
Note that pyplot _must_ be imported here and not in this module so that
the import occurs _after_ the plotting process is started in that
process.
"""
import matplotlib.pyplot as plt
self.plt = plt
self.output.set_output(filename=log_file)
self.output('starting plotter...')
self.pipe = pipe
self.data_names = data_names
self.yscales = yscales
self.xlabels = xlabels
self.ylabels = ylabels
self.n_gr = len(data_names)
self.vlines = {}
self.fig = self.plt.figure()
self.make_axes()
import threading
draw_thread = threading.Thread(target=self.poll_draw)
draw_thread.start()
self.output('...done')
self.plt.show()
draw_thread.join()
|
flood.py | from threading import Thread, Event
from _bots import *
from sys import argv
from contextlib import suppress
import queue
q = queue.Queue()
e = Event()
e.set()
def check_code(pin):
epoch = int(datetime.datetime.now().timestamp())
r = requests.get(f'https://kahoot.it/reserve/session/{pin}/?{epoch}')
if r.status_code != 200:
return False
return True
#### Command-arguments section ####
pin = None
count = None
interactive = False
prefix = "bot"
style = False
glitch_name = False
gui = True
verbose = False
args = argv[1:]
for i in range(len(args)):
arg = args[i - 1].lower()
if "-" in arg:
if "h" in arg or '--help' in arg:
print(r''' _ __ _ _ ___
| | / / | | | | / _ \
| |/ / __ _| |__ ___ ___ | |_ / /_\ \_ __ _ __ ___ _ _ ___ _ __
| \ / _` | '_ \ / _ \ / _ \| __| | _ | '_ \| '_ \ / _ \| | | |/ _ | '__|
| |\ | (_| | | | | (_) | (_) | |_ | | | | | | | | | | (_) | |_| | __| |
\_| \_/\__,_|_| |_|\___/ \___/ \__| \_| |_|_| |_|_| |_|\___/ \__, |\___|_|
__/ |
|___/ ''')
print("Created by michaelshumshum\nBased on msemple's kahoot-hack and theusaf's kahootPY\nPress ctrl+c to exit or ctr+z to immediately abort. You may need to reset the screen if the terminal gets messed up.\n")
print('usage: python3 flood.py -p <PIN> -b <# of bots> [optional arguments]\n')
print('required arguments:\n-b: # of bots. depending on hardware, performance may be significantly affected at higher values.\n-p: code for kahoot game.\n')
print('optional arguments:\n-h / --help: shows this help information.\n-i: input arguments in an "interactive" fashion.\n-t: disable terminal output.\n-n <name>: set a custom name. by default, name is "bot".\n-s: styled names.\n-g: glitched names (selecting glitched names will override custom name and styled name options).\n-v: verbose.\n')
exit()
if "v" in arg:
print('INFO: Verbose mode ON')
verbose = True
if "i" in arg:
print('INFO: Interactive mode ON')
interactive = True
if "g" in arg:
glitch_name = True
print("INFO: Glitched names ON")
if "t" in arg:
print("INFO: Output to terminal DISABLED.")
gui = False
if "s" in arg:
if not glitch_name:
style = True
print("INFO: Styled names ON")
else:
print("WARN: -s and -g are conflicting arguments. Ignoring -s option.")
if "-b" in arg:
try:
count = int(args[i])
print(f"INFO: Using {count} bots.")
except ValueError:
print('ERR: Number of bots must be an integer.')
exit()
if "-n" in arg:
if not glitch_name:
prefix = args[i]
print(f"INFO: The bots will be named a derivative of {prefix}.")
else:
print("WARN: -n and -g are conflicting arguments. Ignoring -n option.")
if "-p" in arg:
try:
pin = int(args[i])
if not check_code(pin):
raise ValueError
else:
print(f"INFO: Using {pin} as the code.")
except ValueError:
print('ERR: Code is invalid!')
exit()
if (pin and count) or interactive:
pass
else:
print('ERR: Missing arguments, use -h for help')
exit()
if interactive:
while True:
pin = input('PIN:')
while True:
try:
count = int(input('How many:'))
break
except:
print('Please put a valid number')
prefix = input('Custom name (leave blank if no):')
if prefix == '':
prefix = 'bot'
style = input('Add style to the names (y/n):')
if style == 'y':
style = True
glitch_name = False
else:
style = False
glitch_name = input('Glitched names (y/n):')
if glitch_name == 'y':
glitch_name = True
else:
glitch_name = False
if not check_code(pin):
print('ERR: Missing arguments, use -h for help')
exit()
break
names = gen_names(prefix, count, style, glitch_name)
ids = []
for i in range(count):
ids.append(i)
if gui:
from _ui import *
def guifunc(*_):
# global active
f = Form(name='kahoot-annoyer', FIX_MINIMUM_SIZE_WHEN_CREATED=False)
f.update_values(q, e)
def wrapper(_, e):
npyscreen.wrapper_basic(guifunc)
e.clear()
with suppress(KeyboardInterrupt):
raise KeyboardInterrupt
quiz_url = ''
def main_thread(queue):
global quiz_url
while e.is_set():
get = queue.get()
if get[0] != 'main':
queue.put(get)
else:
quiz_url = get[1]
break
threads = []
quizid = ''
thread = Thread(target=main_thread, args=(q,), name='main')
threads.append(thread)
manager = manager(queue=q, bot_names=names, event=e)
thread = Thread(target=manager.run, name='bot-manager')
threads.append(thread)
for i in range(count):
f_bot = bot(name=names[i], pin=pin, ackId=ids[i], queue=q, event=e)
thread = Thread(target=f_bot.run, name=names[i])
threads.append(thread)
time.sleep(0.01)
if gui:
q.put(['gui', count, 'init', pin])
thread = Thread(target=wrapper, args=(q, e,), name='gui')
threads.append(thread)
for thread in threads:
thread.start()
print(f'INFO: Thread[{thread.name} - {thread.ident}] started.') if verbose else None
while e.is_set():
try:
time.sleep(0.0001)
if not e.is_set():
e.clear()
except KeyboardInterrupt:
e.clear()
print('done')
break
for thread in threads:
thread.join()
print(f'INFO: Thread[{thread.name} - {thread.ident}] killed.') if verbose else None
print(quiz_url)
|
federated_learning_keras_PS_MNIST_crossentropy.py | from DataSets import MnistData
from DataSets_task import MnistData_task
from consensus.consensus_v2 import CFA_process
from consensus.parameter_server import Parameter_Server
# best use with PS active
# from ReplayMemory import ReplayMemory
import numpy as np
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import models
import argparse
import warnings
import glob
import datetime
import scipy.io as sio
# import multiprocessing
import threading
import math
from matplotlib.pyplot import pause
import time
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
parser.add_argument('-resume', default=0, help="set 1 to resume from a previous simulation, 0 to start from the beginning", type=float)
parser.add_argument('-PS', default=1, help="set 1 to enable PS server and FedAvg, set 0 to disable PS", type=float)
parser.add_argument('-consensus', default=0, help="set 1 to enable consensus, set 0 to disable", type=float)
parser.add_argument('-mu', default=0.01, help="sets the learning rate for all setups", type=float)
parser.add_argument('-eps', default=1, help="sets the mixing parameters for model averaging (CFA)", type=float)
parser.add_argument('-target', default=0.2, help="sets the target crossentropy loss to stop federation", type=float)
parser.add_argument('-K', default=30, help="sets the number of network devices", type=int)
parser.add_argument('-Ka', default=20, help="sets the number of active devices per round in FA (<= K)", type=int)
parser.add_argument('-N', default=1, help="sets the max. number of neighbors per device per round in CFA", type=int)
parser.add_argument('-samp', default=500, help="sets the number samples per device", type=int)
parser.add_argument('-noniid_assignment', default=1, help=" set 0 for iid assignment, 1 for non-iid random", type=int)
parser.add_argument('-run', default=0, help=" set the run id", type=int)
parser.add_argument('-random_data_distribution', default=0, help=" set 0 for fixed distribution, 1 for time-varying", type=int)
parser.add_argument('-batches', default=5, help="sets the number of batches per learning round", type=int)
parser.add_argument('-batch_size', default=100, help="sets the batch size per learning round", type=int)
parser.add_argument('-graph', default=6, help="sets the input graph: 0 for default graph, >0 uses the input graph in vGraph.mat, and choose one graph from the available adjacency matrices", type=int)
args = parser.parse_args()
devices = args.K # NUMBER OF DEVICES
active_devices_per_round = args.Ka
if args.consensus == 1:
federated = True
parameter_server = False
elif args.PS == 1:
federated = False
parameter_server = True
else: # CL: CENTRALIZED LEARNING ON DEVICE 0 (DATA CENTER)
federated = False
parameter_server = False
if active_devices_per_round > devices:
active_devices_per_round = devices
target_loss = args.target
# Configuration paramaters for the whole setup
seed = 42
# batch_size = 5 # Size of batch taken from replay buffer
batch_size = args.batch_size
number_of_batches = args.batches
training_set_per_device = args.samp # NUMBER OF TRAINING SAMPLES PER DEVICE
validation_train = 60000 # VALIDATION and training DATASET size
validation_test = 10000
if (training_set_per_device > validation_train/args.K):
training_set_per_device = math.floor(validation_train/args.K)
print(training_set_per_device)
if batch_size > training_set_per_device:
batch_size = training_set_per_device
# if batch_size*number_of_batches > training_set_per_device:
# number_of_batches = math.floor(training_set_per_device/batch_size)
# number_of_batches = int(training_set_per_device/batch_size)
# number_of_batches = args.batches
number_of_batches_for_validation = int(validation_test/batch_size)
print("Number of batches for learning {}".format(number_of_batches))
max_lag = number_of_batches*2 # consensus max delay 2= 2 epochs max
refresh_server = 1 # refresh server updates (in sec)
n_outputs = 10 # 6 classes
max_epochs = 200
validation_start = 1 # start validation in epochs
# Using huber loss for stability
loss_function = keras.losses.Huber
def get_noniid_data(total_training_size, devices, batch_size):
samples = np.random.random_integers(batch_size, total_training_size - batch_size * (devices - 1),
devices) # create random numbers
samples = samples / np.sum(samples, axis=0) * total_training_size # force them to sum to totals
# Ignore the following if you don't need integers
samples = np.round(samples) # transform them into integers
remainings = total_training_size - np.sum(samples, axis=0) # check if there are corrections to be done
step = 1 if remainings > 0 else -1
while remainings != 0:
i = np.random.randint(devices)
if samples[i] + step >= 0:
samples[i] += step
remainings -= step
return samples
####
def preprocess_observation(obs, batch_size):
img = obs# crop and downsize
img = (img).astype(np.float)
return img.reshape(batch_size, 28, 28, 1)
def create_q_model():
# Network defined by the Deepmind paper
inputs = layers.Input(shape=(28, 28, 1,))
layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu")(inputs)
layer2 = layers.MaxPooling2D(pool_size=(2, 2))(layer1)
layer3 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu")(layer2)
layer4 = layers.MaxPooling2D(pool_size=(2, 2))(layer3)
layer5 = layers.Flatten()(layer4)
# Convolutions
# layer1 = layers.Conv2D(32, 8, strides=4, activation="relu")(inputs)
# layer2 = layers.Conv2D(64, 4, strides=2, activation="relu")(layer1)
# layer3 = layers.Conv2D(64, 3, strides=1, activation="relu")(layer2)
#
# layer4 = layers.Flatten()(layer3)
#
# layer5 = layers.Dense(512, activation="relu")(layer4)
classification = layers.Dense(n_outputs, activation="softmax")(layer5)
return keras.Model(inputs=inputs, outputs=classification)
def processParameterServer(devices, active_devices_per_round, federated, refresh_server=1):
model_global = create_q_model()
model_parameters_initial = np.asarray(model_global.get_weights())
parameter_server = Parameter_Server(devices, model_parameters_initial, active_devices_per_round)
global_target_model = 'results/model_global.npy'
np.save(global_target_model, model_parameters_initial)
pause(5) # wait for neighbors
while True:
pause(refresh_server) # refresh global model on every xx seconds
np.save(global_target_model, parameter_server.federated_target_weights_aggregation(epoch=0, aggregation_type=0))
fileList = glob.glob('*.mat', recursive=False)
if len(fileList) == devices:
# stop the server
break
# execute for each deployed device
def processData(device_index, start_samples, samples, federated, full_data_size, number_of_batches, parameter_server, sample_distribution):
pause(5) # PS server (if any) starts first
checkpointpath1 = 'results/model{}.h5'.format(device_index)
outfile = 'results/dump_train_variables{}.npz'.format(device_index)
outfile_models = 'results/dump_train_model{}.npy'.format(device_index)
global_model = 'results/model_global.npy'
np.random.seed(1)
tf.random.set_seed(1) # common initialization
learning_rate = args.mu
learning_rate_local = learning_rate
B = np.ones((devices, devices)) - tf.one_hot(np.arange(devices), devices)
Probabilities = B[device_index, :]/(devices - 1)
training_signal = False
# check for backup variables on start
if os.path.isfile(checkpointpath1):
train_start = False
# backup the model and the model target
model = models.load_model(checkpointpath1)
data_history = []
label_history = []
local_model_parameters = np.load(outfile_models, allow_pickle=True)
model.set_weights(local_model_parameters.tolist())
dump_vars = np.load(outfile, allow_pickle=True)
frame_count = dump_vars['frame_count']
epoch_loss_history = dump_vars['epoch_loss_history'].tolist()
running_loss = np.mean(epoch_loss_history[-5:])
epoch_count = dump_vars['epoch_count']
else:
train_start = True
model = create_q_model()
data_history = []
label_history = []
frame_count = 0
# Experience replay buffers
epoch_loss_history = []
epoch_count = 0
running_loss = math.inf
training_end = False
# set an arbitrary optimizer, here Adam is used
optimizer = keras.optimizers.Adam(learning_rate=args.mu, clipnorm=1.0)
# create a data object (here radar data)
# data_handle = MnistData(device_index, start_samples, samples, full_data_size, args.random_data_distribution)
if args.noniid_assignment == 1:
data_handle = MnistData_task(device_index, start_samples, samples, full_data_size,
args.random_data_distribution)
else:
data_handle = MnistData(device_index, start_samples, samples, full_data_size, args.random_data_distribution)
# create a consensus object
cfa_consensus = CFA_process(devices, device_index, args.N)
while True: # Run until solved
# collect 1 batch
frame_count += 1
obs, labels = data_handle.getTrainingData(batch_size)
data_batch = preprocess_observation(obs, batch_size)
# Save data and labels in the current learning session
data_history.append(data_batch)
label_history.append(labels)
# Local learning update every "number of batches" batches
if frame_count % number_of_batches == 0 and not training_signal:
epoch_count += 1
for i in range(number_of_batches):
data_sample = np.array(data_history[i])
label_sample = np.array(label_history[i])
# Create a mask to calculate loss
masks = tf.one_hot(label_sample, n_outputs).numpy()
# class_v = np.zeros(batch_size, dtype=int)
with tf.GradientTape() as tape:
# Train the model on data samples
classes = model(data_sample, training=False)
# Apply the masks
# for k in range(batch_size):
# class_v[k] = tf.argmax(classes[k])
# class_v = tf.reduce_sum(tf.multiply(classes, masks), axis=1)
# Take best action
# Calculate loss
loss = tf.reduce_mean(-tf.reduce_sum(masks * tf.math.log(classes), axis=1))
# Backpropagation
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
del data_history
del label_history
data_history = []
label_history = []
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
# Consensus round
# update local model
cfa_consensus.update_local_model(model_weights)
# neighbor = cfa_consensus.get_connectivity(device_index, args.N, devices) # fixed neighbor
# neighbor = np.random.choice(np.arange(devices), args.N, p=Probabilities, replace=False) # choose neighbor
neighbor = np.random.choice(np.arange(devices), args.N, replace=False) # choose neighbor
while neighbor == device_index:
neighbor = np.random.choice(np.arange(devices), args.N, replace=False)
if not train_start:
if federated and not training_signal:
eps_c = 1 / (args.N + 1)
# apply consensus for model parameter
print("Consensus from neighbor {} for device {}, local loss {:.2f}".format(neighbor, device_index,
loss.numpy()))
model.set_weights(cfa_consensus.federated_weights_computing(neighbor, args.N, frame_count, eps_c, max_lag))
if cfa_consensus.getTrainingStatusFromNeightbor():
# a neighbor completed the training, with loss < target, transfer learning is thus applied (the device will copy and reuse the same model)
training_signal = True # stop local learning, just do validation
else:
print("Warm up")
train_start = False
# check if parameter server is enabled
stop_aggregation = False
if parameter_server:
pause(refresh_server)
while not os.path.isfile(global_model):
# implementing consensus
print("waiting")
pause(1)
try:
model_global = np.load(global_model, allow_pickle=True)
except:
pause(5)
print("retrying opening global model")
try:
model_global = np.load(global_model, allow_pickle=True)
except:
print("halting aggregation")
stop_aggregation = True
if not stop_aggregation:
# print("updating from global model inside the parmeter server")
for k in range(cfa_consensus.layers):
# model_weights[k] = model_weights[k]+ 0.5*(model_global[k]-model_weights[k])
model_weights[k] = model_global[k]
model.set_weights(model_weights.tolist())
del model_weights
# validation tool for device 'device_index'
if epoch_count > validation_start and frame_count % number_of_batches == 0:
avg_cost = 0.
for i in range(number_of_batches_for_validation):
obs_valid, labels_valid = data_handle.getTestData(batch_size, i)
# obs_valid, labels_valid = data_handle.getRandomTestData(batch_size)
data_valid = preprocess_observation(np.squeeze(obs_valid), batch_size)
data_sample = np.array(data_valid)
label_sample = np.array(labels_valid)
# Create a mask to calculate loss
masks = tf.one_hot(label_sample, n_outputs).numpy()
classes = model(data_sample, training=False)
# Apply the masks
# class_v = tf.reduce_sum(tf.multiply(classes, masks), axis=1)
# class_v = np.zeros(batch_size, dtype=int)
# for k in range(batch_size):
# class_v[k] = tf.argmax(classes[k]).numpy()
# Calculate loss
# loss = loss_function(label_sample, classes)
loss = tf.reduce_mean(-tf.reduce_sum(masks * tf.math.log(classes), axis=1)).numpy()
avg_cost += loss / number_of_batches_for_validation # Training loss
epoch_loss_history.append(avg_cost)
print("Device {} epoch count {}, validation loss {:.2f}".format(device_index, epoch_count,
avg_cost))
# mean loss for last 5 epochs
running_loss = np.mean(epoch_loss_history[-5:])
if running_loss < target_loss or training_signal: # Condition to consider the task solved
print("Solved for device {} at epoch {} with average loss {:.2f} !".format(device_index, epoch_count, running_loss))
training_end = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
# model_target.save(checkpointpath2, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
if federated:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices, "neighbors": args.N,
"active_devices": args.Ka_consensus,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
elif parameter_server:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"active_devices": active_devices_per_round,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
else:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
if federated:
sio.savemat(
"results/matlab/CFA_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1)
elif parameter_server:
sio.savemat(
"results/matlab/FA2_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"FA2_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size),
dict_1)
else: # CL
sio.savemat(
"results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
samples, devices, number_of_batches, batch_size,args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
break
if epoch_count > max_epochs: # stop simulation
print("Unsolved for device {} at epoch {}!".format(device_index, epoch_count))
training_end = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
# model_target.save(checkpointpath2, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
if federated:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices, "neighbors": args.N,
"active_devices": args.Ka_consensus,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
elif parameter_server:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"active_devices": active_devices_per_round,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
else:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
if federated:
sio.savemat(
"results/matlab/CFA_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1)
elif parameter_server:
sio.savemat(
"results/matlab/FA2_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"FA2_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size),
dict_1)
else: # CL
sio.savemat(
"results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
samples, devices, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
break
if __name__ == "__main__":
if args.resume == 0: # clear all files
# DELETE TEMPORARY CACHE FILES
fileList = glob.glob('results/*.npy', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('results/*.h5', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('results/*.npz', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('*.mat', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
# main loop for multiprocessing
t = []
############# enable consensus based federation #######################
# federated = False
# federated = True
########################################################
##################### enable parameter server ##############
# parameter_server = False
server_index = devices
# parameter_server = True
#########################################################
samples = np.zeros(devices) # training samples per device
for id in range(devices):
# samples[id]=math.floor(w[id]*validation_train)
# samples[id] = math.floor(balancing_vect[id]*fraction_training)
samples[id] = training_set_per_device
# samples = int(fraction_training/devices) # training samples per device
######################### Create a non-iid assignment ##########################
# if args.noniid_assignment == 1:
# total_training_size = training_set_per_device * devices
# samples = get_noniid_data(total_training_size, devices, batch_size)
# while np.min(samples) < batch_size:
# samples = get_noniid_data(total_training_size, devices, batch_size)
#############################################################################
print(samples)
#################################### code testing CL learning (0: data center)
# federated = False
# parameter_server = False
# processData(0, validation_train, federated, validation_train, number_of_batches, parameter_server)
######################################################################################
if federated or parameter_server:
for ii in range(devices):
# position start
if ii == 0:
start_index = 0
else:
start_index = start_index + int(samples[ii-1])
t.append(threading.Thread(target=processData, args=(ii, start_index, int(samples[ii]), federated, validation_train, number_of_batches, parameter_server, samples)))
t[ii].start()
# last process is for the target server
if parameter_server:
print("Target server starting with active devices {}".format(active_devices_per_round))
t.append(threading.Thread(target=processParameterServer, args=(devices, active_devices_per_round, federated, refresh_server)))
t[devices].start()
else: # run centralized learning on device 0 (data center)
processData(0, 0, training_set_per_device*devices, federated, validation_train, number_of_batches, parameter_server, samples)
exit(0)
|
pod.py | """
Pod related functionalities and context info
Each pod in the openshift cluster will have a corresponding pod object
"""
import logging
import os
import re
import yaml
import tempfile
import time
import calendar
from threading import Thread
import base64
from ocs_ci.ocs.bucket_utils import craft_s3_command
from ocs_ci.ocs.ocp import OCP, verify_images_upgraded
from ocs_ci.helpers import helpers
from ocs_ci.ocs import constants, defaults, node, workload, ocp
from ocs_ci.framework import config
from ocs_ci.ocs.exceptions import (
CommandFailed,
NonUpgradedImagesFoundError,
ResourceWrongStatusException,
TimeoutExpiredError,
UnavailableResourceException,
)
from ocs_ci.ocs.utils import setup_ceph_toolbox, get_pod_name_by_pattern
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.utility import templating
from ocs_ci.utility.utils import run_cmd, check_timeout_reached, TimeoutSampler
from ocs_ci.utility.utils import check_if_executable_in_path
from ocs_ci.utility.retry import retry
logger = logging.getLogger(__name__)
FIO_TIMEOUT = 600
TEXT_CONTENT = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, "
"sed do eiusmod tempor incididunt ut labore et dolore magna "
"aliqua. Ut enim ad minim veniam, quis nostrud exercitation "
"ullamco laboris nisi ut aliquip ex ea commodo consequat. "
"Duis aute irure dolor in reprehenderit in voluptate velit "
"esse cillum dolore eu fugiat nulla pariatur. Excepteur sint "
"occaecat cupidatat non proident, sunt in culpa qui officia "
"deserunt mollit anim id est laborum."
)
TEST_FILE = "/var/lib/www/html/test"
FEDORA_TEST_FILE = "/mnt/test"
class Pod(OCS):
"""
Handles per pod related context
"""
def __init__(self, **kwargs):
"""
Initializer function
kwargs:
Copy of ocs/defaults.py::<some pod> dictionary
"""
self.pod_data = kwargs
super(Pod, self).__init__(**kwargs)
with tempfile.NamedTemporaryFile(
mode="w+", prefix="POD_", delete=False
) as temp_info:
self.temp_yaml = temp_info.name
self._name = self.pod_data.get("metadata").get("name")
self._labels = self.get_labels()
self._roles = []
self.ocp = OCP(
api_version=defaults.API_VERSION,
kind=constants.POD,
namespace=self.namespace,
)
self.fio_thread = None
# TODO: get backend config !!
self.wl_obj = None
self.wl_setup_done = False
@property
def name(self):
return self._name
@property
def namespace(self):
return self._namespace
@property
def roles(self):
return self._roles
@property
def labels(self):
return self._labels
@property
def restart_count(self):
return self.get().get("status").get("containerStatuses")[0].get("restartCount")
def __setattr__(self, key, val):
self.__dict__[key] = val
def add_role(self, role):
"""
Adds a new role for this pod
Args:
role (str): New role to be assigned for this pod
"""
self._roles.append(role)
def get_fio_results(self, timeout=FIO_TIMEOUT):
"""
Get FIO execution results
Returns:
dict: Dictionary represents the FIO execution results
Raises:
Exception: In case of exception from FIO
"""
logger.info(f"Waiting for FIO results from pod {self.name}")
try:
result = self.fio_thread.result(timeout)
if result:
return yaml.safe_load(result)
raise CommandFailed(f"FIO execution results: {result}.")
except CommandFailed as ex:
logger.exception(f"FIO failed: {ex}")
raise
except Exception as ex:
logger.exception(f"Found Exception: {ex}")
raise
def exec_cmd_on_pod(
self, command, out_yaml_format=True, secrets=None, timeout=600, **kwargs
):
"""
Execute a command on a pod (e.g. oc rsh)
Args:
command (str): The command to execute on the given pod
out_yaml_format (bool): whether to return yaml loaded python
object OR to return raw output
secrets (list): A list of secrets to be masked with asterisks
This kwarg is popped in order to not interfere with
subprocess.run(``**kwargs``)
timeout (int): timeout for the exec_oc_cmd, defaults to 600 seconds
Returns:
Munch Obj: This object represents a returned yaml file
"""
rsh_cmd = f"rsh {self.name} "
rsh_cmd += command
return self.ocp.exec_oc_cmd(
rsh_cmd, out_yaml_format, secrets=secrets, timeout=timeout, **kwargs
)
def exec_s3_cmd_on_pod(self, command, mcg_obj=None):
"""
Execute an S3 command on a pod
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection credentials
command (str): The command to execute on the given pod
Returns:
Munch Obj: This object represents a returned yaml file
"""
return self.exec_cmd_on_pod(
craft_s3_command(command, mcg_obj),
out_yaml_format=False,
secrets=[mcg_obj.access_key_id, mcg_obj.access_key, mcg_obj.s3_endpoint]
if mcg_obj
else None,
)
def exec_sh_cmd_on_pod(self, command, sh="bash"):
"""
Execute a pure bash command on a pod via oc exec where you can use
bash syntaxt like &&, ||, ;, for loop and so on.
Args:
command (str): The command to execute on the given pod
Returns:
str: stdout of the command
"""
cmd = f'exec {self.name} -- {sh} -c "{command}"'
return self.ocp.exec_oc_cmd(cmd, out_yaml_format=False)
def get_labels(self):
"""
Get labels from pod
Raises:
NotFoundError: If resource not found
Returns:
dict: All the openshift labels on a given pod
"""
return self.pod_data.get("metadata").get("labels")
def exec_ceph_cmd(self, ceph_cmd, format="json-pretty"):
"""
Execute a Ceph command on the Ceph tools pod
Args:
ceph_cmd (str): The Ceph command to execute on the Ceph tools pod
format (str): The returning output format of the Ceph command
Returns:
dict: Ceph command output
Raises:
CommandFailed: In case the pod is not a toolbox pod
"""
if "rook-ceph-tools" not in self.labels.values():
raise CommandFailed("Ceph commands can be executed only on toolbox pod")
ceph_cmd = ceph_cmd
if format:
ceph_cmd += f" --format {format}"
out = self.exec_cmd_on_pod(ceph_cmd)
# For some commands, like "ceph fs ls", the returned output is a list
if isinstance(out, list):
return [item for item in out if item]
return out
def get_storage_path(self, storage_type="fs"):
"""
Get the pod volume mount path or device path
Returns:
str: The mount path of the volume on the pod (e.g. /var/lib/www/html/) if storage_type is fs
else device path of raw block pv
"""
# TODO: Allow returning a path of a specified volume of a specified
# container
if storage_type == "block":
return (
self.pod_data.get("spec")
.get("containers")[0]
.get("volumeDevices")[0]
.get("devicePath")
)
return (
self.pod_data.get("spec")
.get("containers")[0]
.get("volumeMounts")[0]
.get("mountPath")
)
def workload_setup(self, storage_type, jobs=1):
"""
Do setup on pod for running FIO
Args:
storage_type (str): 'fs' or 'block'
jobs (int): Number of jobs to execute FIO
"""
work_load = "fio"
name = f"test_workload_{work_load}"
path = self.get_storage_path(storage_type)
# few io parameters for Fio
self.wl_obj = workload.WorkLoad(name, path, work_load, storage_type, self, jobs)
assert self.wl_obj.setup(), f"Setup for FIO failed on pod {self.name}"
self.wl_setup_done = True
def run_io(
self,
storage_type,
size,
io_direction="rw",
rw_ratio=75,
jobs=1,
runtime=60,
depth=4,
rate="1m",
rate_process="poisson",
fio_filename=None,
bs="4K",
end_fsync=0,
):
"""
Execute FIO on a pod
This operation will run in background and will store the results in
'self.thread.result()'.
In order to wait for the output and not continue with the test until
FIO is done, call self.thread.result() right after calling run_io.
See tests/manage/test_pvc_deletion_during_io.py::test_run_io
for usage of FIO
Args:
storage_type (str): 'fs' or 'block'
size (str): Size in MB, e.g. '200M'
io_direction (str): Determines the operation:
'ro', 'wo', 'rw' (default: 'rw')
rw_ratio (int): Determines the reads and writes using a
<rw_ratio>%/100-<rw_ratio>%
(e.g. the default is 75 which means it is 75%/25% which
equivalent to 3 reads are performed for every 1 write)
jobs (int): Number of jobs to execute FIO
runtime (int): Number of seconds IO should run for
depth (int): IO depth
rate (str): rate of IO default 1m, e.g. 16k
rate_process (str): kind of rate process default poisson, e.g. poisson
fio_filename(str): Name of fio file created on app pod's mount point
bs (str): Block size, e.g. 4K
end_fsync (int): If 1, fio will sync file contents when a write
stage has completed. Fio default is 0
"""
if not self.wl_setup_done:
self.workload_setup(storage_type=storage_type, jobs=jobs)
if io_direction == "rw":
self.io_params = templating.load_yaml(constants.FIO_IO_RW_PARAMS_YAML)
self.io_params["rwmixread"] = rw_ratio
else:
self.io_params = templating.load_yaml(constants.FIO_IO_PARAMS_YAML)
self.io_params["runtime"] = runtime
size = size if isinstance(size, str) else f"{size}G"
self.io_params["size"] = size
if fio_filename:
self.io_params["filename"] = fio_filename
self.io_params["iodepth"] = depth
self.io_params["rate"] = rate
self.io_params["rate_process"] = rate_process
self.io_params["bs"] = bs
if end_fsync:
self.io_params["end_fsync"] = end_fsync
self.fio_thread = self.wl_obj.run(**self.io_params)
def fillup_fs(self, size, fio_filename=None):
"""
Execute FIO on a pod to fillup a file
This will run sequantial IO of 1MB block size to fill up the fill with data
This operation will run in background and will store the results in
'self.thread.result()'.
In order to wait for the output and not continue with the test until
FIO is done, call self.thread.result() right after calling run_io.
See tests/manage/test_pvc_deletion_during_io.py::test_run_io
for usage of FIO
Args:
size (str): Size in MB, e.g. '200M'
fio_filename(str): Name of fio file created on app pod's mount point
"""
if not self.wl_setup_done:
self.workload_setup(storage_type="fs", jobs=1)
self.io_params = templating.load_yaml(constants.FIO_IO_FILLUP_PARAMS_YAML)
size = size if isinstance(size, str) else f"{size}M"
self.io_params["size"] = size
if fio_filename:
self.io_params["filename"] = fio_filename
self.fio_thread = self.wl_obj.run(**self.io_params)
def run_git_clone(self):
"""
Execute git clone on a pod to simulate a Jenkins user
"""
name = "test_workload"
work_load = "jenkins"
wl = workload.WorkLoad(
name=name, work_load=work_load, pod=self, path=self.get_storage_path()
)
assert wl.setup(), "Setup up for git failed"
wl.run()
def install_packages(self, packages):
"""
Install packages in a Pod
Args:
packages (list): List of packages to install
"""
if isinstance(packages, list):
packages = " ".join(packages)
cmd = f"yum install {packages} -y"
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def copy_to_server(self, server, authkey, localpath, remotepath, user=None):
"""
Upload a file from pod to server
Args:
server (str): Name of the server to upload
authkey (str): Authentication file (.pem file)
localpath (str): Local file/dir in pod to upload
remotepath (str): Target path on the remote server
user (str): User name to connect to server
"""
if not user:
user = "root"
cmd = (
f'scp -i {authkey} -o "StrictHostKeyChecking no"'
f" -r {localpath} {user}@{server}:{remotepath}"
)
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def exec_cmd_on_node(self, server, authkey, cmd, user=None):
"""
Run command on a remote server from pod
Args:
server (str): Name of the server to run the command
authkey (str): Authentication file (.pem file)
cmd (str): command to run on server from pod
user (str): User name to connect to server
"""
if not user:
user = "root"
cmd = f'ssh -i {authkey} -o "StrictHostKeyChecking no" {user}@{server} {cmd}'
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def get_memory(self):
"""
Get the pod memory size
Returns:
dict: The names of the pod's containers (str) as keys and their memory
size (str) as values
"""
containers = self.pod_data.get("spec").get("containers")
container_names_and_memory = {
container.get("name"): container.get("resources")
.get("limits")
.get("memory")
for container in containers
}
return container_names_and_memory
def get_node(self):
"""
Gets the node name
Returns:
str: Node name
"""
return self.pod_data["spec"]["nodeName"]
# Helper functions for Pods
def get_all_pods(
namespace=None,
selector=None,
selector_label="app",
exclude_selector=False,
wait=False,
):
"""
Get all pods in a namespace.
Args:
namespace (str): Name of the namespace
If namespace is None - get all pods
selector (list) : List of the resource selector to search with.
Example: ['alertmanager','prometheus']
selector_label (str): Label of selector (default: app).
exclude_selector (bool): If list of the resource selector not to search with
Returns:
list: List of Pod objects
"""
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
# In case of >4 worker nodes node failures automatic failover of pods to
# other nodes will happen.
# So, we are waiting for the pods to come up on new node
if wait:
wait_time = 180
logger.info(f"Waiting for {wait_time}s for the pods to stabilize")
time.sleep(wait_time)
pods = ocp_pod_obj.get()["items"]
if selector:
if exclude_selector:
pods_new = [
pod
for pod in pods
if pod["metadata"].get("labels", {}).get(selector_label) not in selector
]
else:
pods_new = [
pod
for pod in pods
if pod["metadata"].get("labels", {}).get(selector_label) in selector
]
pods = pods_new
pod_objs = [Pod(**pod) for pod in pods]
return pod_objs
def get_ceph_tools_pod():
"""
Get the Ceph tools pod
Returns:
Pod object: The Ceph tools pod object
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"]
)
ct_pod_items = ocp_pod_obj.get(selector="app=rook-ceph-tools")["items"]
if not ct_pod_items:
# setup ceph_toolbox pod if the cluster has been setup by some other CI
setup_ceph_toolbox()
ct_pod_items = ocp_pod_obj.get(selector="app=rook-ceph-tools")["items"]
assert ct_pod_items, "No Ceph tools pod found"
# In the case of node failure, the CT pod will be recreated with the old
# one in status Terminated. Therefore, need to filter out the Terminated pod
running_ct_pods = list()
for pod in ct_pod_items:
if (
ocp_pod_obj.get_resource_status(pod.get("metadata").get("name"))
== constants.STATUS_RUNNING
):
running_ct_pods.append(pod)
assert running_ct_pods, "No running Ceph tools pod found"
ceph_pod = Pod(**running_ct_pods[0])
return ceph_pod
def get_csi_provisioner_pod(interface):
"""
Get the provisioner pod based on interface
Returns:
Pod object: The provisioner pod object based on iterface
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"]
)
selector = (
"app=csi-rbdplugin-provisioner"
if (interface == constants.CEPHBLOCKPOOL)
else "app=csi-cephfsplugin-provisioner"
)
provision_pod_items = ocp_pod_obj.get(selector=selector)["items"]
assert provision_pod_items, f"No {interface} provisioner pod found"
provisioner_pod = (
Pod(**provision_pod_items[0]).name,
Pod(**provision_pod_items[1]).name,
)
return provisioner_pod
def get_csi_snapshoter_pod():
"""
Get the csi snapshot controller pod
Returns:
Pod object: csi snapshot controller pod
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace="openshift-cluster-storage-operator"
)
selector = "app=csi-snapshot-controller"
snapshotner_pod = ocp_pod_obj.get(selector=selector)["items"]
snapshotner_pod = Pod(**snapshotner_pod[0]).name
return snapshotner_pod
def get_rgw_pods(rgw_label=constants.RGW_APP_LABEL, namespace=None):
"""
Fetches info about rgw pods in the cluster
Args:
rgw_label (str): label associated with rgw pods
(default: defaults.RGW_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: none)
Returns:
list: Pod objects of rgw pods
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
rgws = get_pods_having_label(rgw_label, namespace)
return [Pod(**rgw) for rgw in rgws]
def get_ocs_operator_pod(ocs_label=constants.OCS_OPERATOR_LABEL, namespace=None):
"""
Fetches info about rgw pods in the cluster
Args:
ocs_label (str): label associated with ocs_operator pod
(default: defaults.OCS_OPERATOR_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: none)
Returns:
Pod object: ocs_operator pod object
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
ocs_operator = get_pods_having_label(ocs_label, namespace)
ocs_operator_pod = Pod(**ocs_operator[0])
return ocs_operator_pod
def list_ceph_images(pool_name="rbd"):
"""
Args:
pool_name (str): Name of the pool to get the ceph images
Returns (List): List of RBD images in the pool
"""
ct_pod = get_ceph_tools_pod()
return ct_pod.exec_ceph_cmd(ceph_cmd=f"rbd ls {pool_name}", format="json")
@retry(TypeError, tries=5, delay=2, backoff=1)
def check_file_existence(pod_obj, file_path):
"""
Check if file exists inside the pod
Args:
pod_obj (Pod): The object of the pod
file_path (str): The full path of the file to look for inside
the pod
Returns:
bool: True if the file exist, False otherwise
"""
try:
check_if_executable_in_path(pod_obj.exec_cmd_on_pod("which find"))
except CommandFailed:
pod_obj.install_packages("findutils")
ret = pod_obj.exec_cmd_on_pod(f'bash -c "find {file_path}"')
if re.search(file_path, ret):
return True
return False
def get_file_path(pod_obj, file_name):
"""
Get the full path of the file
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which path to get
Returns:
str: The full path of the file
"""
path = (
pod_obj.get()
.get("spec")
.get("containers")[0]
.get("volumeMounts")[0]
.get("mountPath")
)
file_path = os.path.join(path, file_name)
return file_path
def cal_md5sum(pod_obj, file_name, block=False):
"""
Calculates the md5sum of the file
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which md5sum to be calculated
block (bool): True if the volume mode of PVC used on pod is 'Block'.
file_name will be the devicePath in this case.
Returns:
str: The md5sum of the file
"""
file_path = file_name if block else get_file_path(pod_obj, file_name)
md5sum_cmd_out = pod_obj.exec_cmd_on_pod(
command=f'bash -c "md5sum {file_path}"', out_yaml_format=False
)
md5sum = md5sum_cmd_out.split()[0]
logger.info(f"md5sum of file {file_name}: {md5sum}")
return md5sum
def verify_data_integrity(pod_obj, file_name, original_md5sum, block=False):
"""
Verifies existence and md5sum of file created from first pod
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which md5sum to be calculated
original_md5sum (str): The original md5sum of the file
block (bool): True if the volume mode of PVC used on pod is 'Block'.
file_name will be the devicePath in this case.
Returns:
bool: True if the file exists and md5sum matches
Raises:
AssertionError: If file doesn't exist or md5sum mismatch
"""
file_path = file_name if block else get_file_path(pod_obj, file_name)
assert check_file_existence(pod_obj, file_path), f"File {file_name} doesn't exists"
current_md5sum = cal_md5sum(pod_obj, file_name, block)
logger.info(f"Original md5sum of file: {original_md5sum}")
logger.info(f"Current md5sum of file: {current_md5sum}")
assert current_md5sum == original_md5sum, "Data corruption found"
logger.info(f"File {file_name} exists and md5sum matches")
return True
def get_fio_rw_iops(pod_obj):
"""
Execute FIO on a pod
Args:
pod_obj (Pod): The object of the pod
"""
fio_result = pod_obj.get_fio_results()
logging.info(f"FIO output: {fio_result}")
logging.info("IOPs after FIO:")
logging.info(f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}")
logging.info(f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}")
def run_io_in_bg(pod_obj, expect_to_fail=False, fedora_dc=False):
"""
Run I/O in the background
Args:
pod_obj (Pod): The object of the pod
expect_to_fail (bool): True for the command to be expected to fail
(disruptive operations), False otherwise
fedora_dc (bool): set to False by default. If set to True, it runs IO in
background on a fedora dc pod.
Returns:
Thread: A thread of the I/O execution
"""
logger.info(f"Running I/O on pod {pod_obj.name}")
def exec_run_io_cmd(pod_obj, expect_to_fail, fedora_dc):
"""
Execute I/O
"""
try:
# Writing content to a new file every 0.01 seconds.
# Without sleep, the device will run out of space very quickly -
# 5-10 seconds for a 5GB device
if fedora_dc:
FILE = FEDORA_TEST_FILE
else:
FILE = TEST_FILE
pod_obj.exec_cmd_on_pod(
command=f'bash -c "let i=0; while true; do echo '
f'{TEXT_CONTENT} >> {FILE}$i; let i++; sleep 0.01; done"',
timeout=2400,
)
# Once the pod gets deleted, the I/O execution will get terminated.
# Hence, catching this exception
except CommandFailed as ex:
if expect_to_fail:
if re.search("code 137", str(ex)) or (re.search("code 143", str(ex))):
logger.info("I/O command got terminated as expected")
return
raise ex
thread = Thread(target=exec_run_io_cmd, args=(pod_obj, expect_to_fail, fedora_dc))
thread.start()
time.sleep(2)
# Checking file existence
if fedora_dc:
FILE = FEDORA_TEST_FILE
else:
FILE = TEST_FILE
test_file = FILE + "1"
# Check I/O started
try:
for sample in TimeoutSampler(
timeout=20,
sleep=1,
func=check_file_existence,
pod_obj=pod_obj,
file_path=test_file,
):
if sample:
break
logger.info(f"Waiting for I/O to start inside {pod_obj.name}")
except TimeoutExpiredError:
logger.error(
f"Wait timeout: I/O failed to start inside {pod_obj.name}. "
"Collect file list."
)
parent_dir = os.path.join(TEST_FILE, os.pardir)
pod_obj.exec_cmd_on_pod(
command=f"ls -l {os.path.abspath(parent_dir)}", out_yaml_format=False
)
raise TimeoutExpiredError(f"I/O failed to start inside {pod_obj.name}")
return thread
def get_admin_key_from_ceph_tools():
"""
Fetches admin key secret from ceph
Returns:
admin keyring encoded with base64 as a string
"""
tools_pod = get_ceph_tools_pod()
out = tools_pod.exec_ceph_cmd(ceph_cmd="ceph auth get-key client.admin")
base64_output = base64.b64encode(out["key"].encode()).decode()
return base64_output
def run_io_and_verify_mount_point(pod_obj, bs="10M", count="950"):
"""
Run I/O on mount point
Args:
pod_obj (Pod): The object of the pod
bs (str): Read and write up to bytes at a time
count (str): Copy only N input blocks
Returns:
used_percentage (str): Used percentage on mount point
"""
pod_obj.exec_cmd_on_pod(
command=f"dd if=/dev/urandom of=/var/lib/www/html/dd_a bs={bs} count={count}"
)
# Verify data's are written to mount-point
mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
mount_point = mount_point.split()
used_percentage = mount_point[mount_point.index("/var/lib/www/html") - 1]
return used_percentage
def get_pods_having_label(label, namespace):
"""
Fetches pod resources with given label in given namespace
Args:
label (str): label which pods might have
namespace (str): Namespace in which to be looked up
Return:
list: of pods info
"""
ocp_pod = OCP(kind=constants.POD, namespace=namespace)
pods = ocp_pod.get(selector=label).get("items")
return pods
def get_deployments_having_label(label, namespace):
"""
Fetches deployment resources with given label in given namespace
Args:
label (str): label which deployments might have
namespace (str): Namespace in which to be looked up
Return:
list: deployment OCP instances
"""
ocp_deployment = OCP(kind=constants.DEPLOYMENT, namespace=namespace)
pods = ocp_deployment.get(selector=label).get("items")
return pods
def get_mds_pods(mds_label=constants.MDS_APP_LABEL, namespace=None):
"""
Fetches info about mds pods in the cluster
Args:
mds_label (str): label associated with mds pods
(default: defaults.MDS_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mds pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mdss = get_pods_having_label(mds_label, namespace)
mds_pods = [Pod(**mds) for mds in mdss]
return mds_pods
def get_mon_pods(mon_label=constants.MON_APP_LABEL, namespace=None):
"""
Fetches info about mon pods in the cluster
Args:
mon_label (str): label associated with mon pods
(default: defaults.MON_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mon pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mons = get_pods_having_label(mon_label, namespace)
mon_pods = [Pod(**mon) for mon in mons]
return mon_pods
def get_mgr_pods(mgr_label=constants.MGR_APP_LABEL, namespace=None):
"""
Fetches info about mgr pods in the cluster
Args:
mgr_label (str): label associated with mgr pods
(default: defaults.MGR_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mgr pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mgrs = get_pods_having_label(mgr_label, namespace)
mgr_pods = [Pod(**mgr) for mgr in mgrs]
return mgr_pods
def get_osd_pods(osd_label=constants.OSD_APP_LABEL, namespace=None):
"""
Fetches info about osd pods in the cluster
Args:
osd_label (str): label associated with osd pods
(default: defaults.OSD_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of osd pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_pods_having_label(osd_label, namespace)
osd_pods = [Pod(**osd) for osd in osds]
return osd_pods
def get_osd_prepare_pods(
osd_prepare_label=constants.OSD_PREPARE_APP_LABEL,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
):
"""
Fetches info about osd prepare pods in the cluster
Args:
osd_prepare_label (str): label associated with osd prepare pods
(default: constants.OSD_PREPARE_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list: OSD prepare pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_pods_having_label(osd_prepare_label, namespace)
osd_pods = [Pod(**osd) for osd in osds]
return osd_pods
def get_osd_deployments(osd_label=constants.OSD_APP_LABEL, namespace=None):
"""
Fetches info about osd deployments in the cluster
Args:
osd_label (str): label associated with osd deployments
(default: defaults.OSD_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list: OSD deployment OCS instances
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_deployments_having_label(osd_label, namespace)
osd_deployments = [OCS(**osd) for osd in osds]
return osd_deployments
def get_pod_count(label, namespace=None):
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(label=label, namespace=namespace)
return len(pods)
def get_cephfsplugin_provisioner_pods(
cephfsplugin_provisioner_label=constants.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL,
namespace=None,
):
"""
Fetches info about CSI Cephfs plugin provisioner pods in the cluster
Args:
cephfsplugin_provisioner_label (str): label associated with cephfs
provisioner pods
(default: defaults.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : csi-cephfsplugin-provisioner Pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(cephfsplugin_provisioner_label, namespace)
fs_plugin_pods = [Pod(**pod) for pod in pods]
return fs_plugin_pods
def get_rbdfsplugin_provisioner_pods(
rbdplugin_provisioner_label=constants.CSI_RBDPLUGIN_PROVISIONER_LABEL,
namespace=None,
):
"""
Fetches info about CSI Cephfs plugin provisioner pods in the cluster
Args:
rbdplugin_provisioner_label (str): label associated with RBD
provisioner pods
(default: defaults.CSI_RBDPLUGIN_PROVISIONER_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : csi-rbdplugin-provisioner Pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(rbdplugin_provisioner_label, namespace)
ebd_plugin_pods = [Pod(**pod) for pod in pods]
return ebd_plugin_pods
def get_pod_obj(name, namespace=None):
"""
Returns the pod obj for the given pod
Args:
name (str): Name of the resources
Returns:
obj : A pod object
"""
ocp_obj = OCP(api_version="v1", kind=constants.POD, namespace=namespace)
ocp_dict = ocp_obj.get(resource_name=name)
pod_obj = Pod(**ocp_dict)
return pod_obj
def get_pod_logs(
pod_name, container=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE, previous=False
):
"""
Get logs from a given pod
pod_name (str): Name of the pod
container (str): Name of the container
namespace (str): Namespace of the pod
previous (bool): True, if pod previous log required. False otherwise.
Returns:
str: Output from 'oc get logs <pod_name> command
"""
pod = OCP(kind=constants.POD, namespace=namespace)
cmd = f"logs {pod_name}"
if container:
cmd += f" -c {container}"
if previous:
cmd += " --previous"
return pod.exec_oc_cmd(cmd, out_yaml_format=False)
def get_pod_node(pod_obj):
"""
Get the node that the pod is running on
Args:
pod_obj (OCS): The pod object
Returns:
ocs_ci.ocs.ocp.OCP: The node object
"""
node_name = pod_obj.get().get("spec").get("nodeName")
return node.get_node_objs(node_names=node_name)[0]
def delete_pods(pod_objs, wait=True):
"""
Deletes list of the pod objects
Args:
pod_objs (list): List of the pod objects to be deleted
wait (bool): Determines if the delete command should wait for
completion
"""
for pod in pod_objs:
pod.delete(wait=wait)
def validate_pods_are_respinned_and_running_state(pod_objs_list):
"""
Verifies the list of the pods are respinned and in running state
Args:
pod_objs_list (list): List of the pods obj
Returns:
bool : True if the pods are respinned and running, False otherwise
Raises:
ResourceWrongStatusException: In case the resources hasn't
reached the Running state
"""
for pod in pod_objs_list:
helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING, timeout=180)
for pod in pod_objs_list:
pod_obj = pod.get()
start_time = pod_obj["status"]["startTime"]
ts = time.strptime(start_time, "%Y-%m-%dT%H:%M:%SZ")
ts = calendar.timegm(ts)
current_time_utc = time.time()
sec = current_time_utc - ts
if (sec / 3600) >= 1:
logger.error(
f"Pod {pod.name} is not respinned, the age of the pod is {start_time}"
)
return False
return True
def verify_node_name(pod_obj, node_name):
"""
Verifies that the pod is running on a particular node
Args:
pod_obj (Pod): The pod object
node_name (str): The name of node to check
Returns:
bool: True if the pod is running on a particular node, False otherwise
"""
logger.info(
f"Checking whether the pod {pod_obj.name} is running on " f"node {node_name}"
)
actual_node = pod_obj.get().get("spec").get("nodeName")
if actual_node == node_name:
logger.info(
f"The pod {pod_obj.name} is running on the specified node " f"{actual_node}"
)
return True
else:
logger.info(
f"The pod {pod_obj.name} is not running on the specified node "
f"specified node: {node_name}, actual node: {actual_node}"
)
return False
def get_pvc_name(pod_obj):
"""
Function to get pvc_name from pod_obj
Args:
pod_obj (str): The pod object
Returns:
str: The pvc name of a given pod_obj,
Raises:
UnavailableResourceException: If no pvc attached
"""
pvc = pod_obj.get().get("spec").get("volumes")[0].get("persistentVolumeClaim")
if not pvc:
raise UnavailableResourceException
return pvc.get("claimName")
def get_used_space_on_mount_point(pod_obj):
"""
Get the used space on a mount point
Args:
pod_obj (POD): The pod object
Returns:
int: Percentage represent the used space on the mount point
"""
# Verify data's are written to mount-point
mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
mount_point = mount_point.split()
used_percentage = mount_point[mount_point.index(constants.MOUNT_POINT) - 1]
return used_percentage
def get_plugin_pods(interface, namespace=None):
"""
Fetches info of csi-cephfsplugin pods or csi-rbdplugin pods
Args:
interface (str): Interface type. eg: CephBlockPool, CephFileSystem
namespace (str): Name of cluster namespace
Returns:
list : csi-cephfsplugin pod objects or csi-rbdplugin pod objects
"""
if interface == constants.CEPHFILESYSTEM:
plugin_label = constants.CSI_CEPHFSPLUGIN_LABEL
if interface == constants.CEPHBLOCKPOOL:
plugin_label = constants.CSI_RBDPLUGIN_LABEL
namespace = namespace or config.ENV_DATA["cluster_namespace"]
plugins_info = get_pods_having_label(plugin_label, namespace)
plugin_pods = [Pod(**plugin) for plugin in plugins_info]
return plugin_pods
def get_plugin_provisioner_leader(interface, namespace=None, leader_type="provisioner"):
"""
Get csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader pod
Args:
interface (str): Interface type. eg: CephBlockPool, CephFileSystem
namespace (str): Name of cluster namespace
leader_type (str): Parameter to check the lease. eg: 'snapshotter' to
select external-snapshotter leader holder
Returns:
Pod: csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader
pod
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
leader_types = {
"provisioner": namespace,
"snapshotter": f"external-snapshotter-leader-{namespace}",
"resizer": f"external-resizer-{namespace}",
"attacher": f"external-attacher-{namespace}",
}
if interface == constants.CEPHBLOCKPOOL:
lease_cmd = f"get leases {leader_types[leader_type]}-rbd-csi-ceph-com -o yaml"
elif interface == constants.CEPHFILESYSTEM:
lease_cmd = (
f"get leases {leader_types[leader_type]}-cephfs-csi-ceph-com " "-o yaml"
)
ocp_obj = ocp.OCP(kind=constants.POD, namespace=namespace)
lease = ocp_obj.exec_oc_cmd(command=lease_cmd)
leader = lease.get("spec").get("holderIdentity").strip()
assert leader, "Couldn't identify plugin provisioner leader pod."
logger.info(f"Plugin provisioner leader pod is {leader}")
ocp_obj._resource_name = leader
leader_pod = Pod(**ocp_obj.get())
return leader_pod
def get_operator_pods(operator_label=constants.OPERATOR_LABEL, namespace=None):
"""
Fetches info about rook-ceph-operator pods in the cluster
Args:
operator_label (str): Label associated with rook-ceph-operator pod
namespace (str): Namespace in which ceph cluster lives
Returns:
list : of rook-ceph-operator pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
operators = get_pods_having_label(operator_label, namespace)
operator_pods = [Pod(**operator) for operator in operators]
return operator_pods
def upload(pod_name, localpath, remotepath, namespace=None):
"""
Upload a file to pod
Args:
pod_name (str): Name of the pod
localpath (str): Local file to upload
remotepath (str): Target path on the pod
"""
namespace = namespace or constants.DEFAULT_NAMESPACE
cmd = (
f"oc -n {namespace} cp {os.path.expanduser(localpath)} {pod_name}:{remotepath}"
)
run_cmd(cmd)
def download_file_from_pod(pod_name, remotepath, localpath, namespace=None):
"""
Download a file from a pod
Args:
pod_name (str): Name of the pod
remotepath (str): Target path on the pod
localpath (str): Local file to upload
namespace (str): The namespace of the pod
"""
namespace = namespace or constants.DEFAULT_NAMESPACE
cmd = (
f"oc -n {namespace} cp {pod_name}:{remotepath} {os.path.expanduser(localpath)}"
)
run_cmd(cmd)
def wait_for_storage_pods(timeout=200):
"""
Check all OCS pods status, they should be in Running or Completed state
Args:
timeout (int): Number of seconds to wait for pods to get into correct
state
"""
all_pod_obj = get_all_pods(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
# Ignoring pods with "app=rook-ceph-detect-version" app label
all_pod_obj = [
pod
for pod in all_pod_obj
if pod.get_labels()
and constants.ROOK_CEPH_DETECT_VERSION_LABEL not in pod.get_labels()
]
for pod_obj in all_pod_obj:
state = constants.STATUS_RUNNING
if any(i in pod_obj.name for i in ["-1-deploy", "ocs-deviceset"]):
state = constants.STATUS_COMPLETED
try:
helpers.wait_for_resource_state(
resource=pod_obj, state=state, timeout=timeout
)
except ResourceWrongStatusException:
# 'rook-ceph-crashcollector' on the failed node stucks at
# pending state. BZ 1810014 tracks it.
# Ignoring 'rook-ceph-crashcollector' pod health check as
# WA and deleting its deployment so that the pod
# disappears. Will revert this WA once the BZ is fixed
if "rook-ceph-crashcollector" in pod_obj.name:
ocp_obj = ocp.OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
pod_name = pod_obj.name
deployment_name = "-".join(pod_name.split("-")[:-2])
command = f"delete deployment {deployment_name}"
ocp_obj.exec_oc_cmd(command=command)
logger.info(f"Deleted deployment for pod {pod_obj.name}")
else:
raise
def verify_pods_upgraded(old_images, selector, count=1, timeout=720):
"""
Verify that all pods do not have old image.
Args:
old_images (set): Set with old images.
selector (str): Selector (e.g. app=ocs-osd)
count (int): Number of resources for selector.
timeout (int): Timeout in seconds to wait for pods to be upgraded.
Raises:
TimeoutException: If the pods didn't get upgraded till the timeout.
"""
namespace = config.ENV_DATA["cluster_namespace"]
pod = OCP(
kind=constants.POD,
namespace=namespace,
)
info_message = (
f"Waiting for {count} pods with selector: {selector} to be running "
f"and upgraded."
)
logger.info(info_message)
start_time = time.time()
selector_label, selector_value = selector.split("=")
while True:
pod_count = 0
try:
pods = get_all_pods(namespace, [selector_value], selector_label)
pods_len = len(pods)
logger.info(f"Found {pods_len} pod(s) for selector: {selector}")
if pods_len != count:
logger.warning(
f"Number of found pods {pods_len} is not as expected: " f"{count}"
)
for pod in pods:
verify_images_upgraded(old_images, pod.get())
pod_count += 1
except CommandFailed as ex:
logger.warning(
f"Failed when getting pods with selector {selector}." f"Error: {ex}"
)
except NonUpgradedImagesFoundError as ex:
logger.warning(ex)
check_timeout_reached(start_time, timeout, info_message)
if pods_len != count:
logger.error(f"Found pods: {pods_len} but expected: {count}!")
elif pod_count == count:
return
def get_noobaa_pods(noobaa_label=constants.NOOBAA_APP_LABEL, namespace=None):
"""
Fetches info about noobaa pods in the cluster
Args:
noobaa_label (str): label associated with osd pods
(default: defaults.NOOBAA_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of noobaa pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
noobaas = get_pods_having_label(noobaa_label, namespace)
noobaa_pods = [Pod(**noobaa) for noobaa in noobaas]
return noobaa_pods
def wait_for_dc_app_pods_to_reach_running_state(
dc_pod_obj, timeout=120, exclude_state=None
):
"""
Wait for DC app pods to reach running state
Args:
dc_pod_obj (list): list of dc app pod objects
timeout (int): Timeout in seconds to wait for pods to be in Running
state.
exclude_state (str): A resource state to ignore
"""
for pod_obj in dc_pod_obj:
name = pod_obj.get_labels().get("name")
dpod_list = get_all_pods(selector_label=f"name={name}", wait=True)
for dpod in dpod_list:
if "-1-deploy" not in dpod.name and dpod.status != exclude_state:
helpers.wait_for_resource_state(
dpod, constants.STATUS_RUNNING, timeout=timeout
)
def delete_deploymentconfig_pods(pod_obj):
"""
Delete a DeploymentConfig pod and all the pods that are controlled by it
Args:
pod_obj (Pod): Pod object
"""
dc_ocp_obj = ocp.OCP(kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace)
pod_data_list = dc_ocp_obj.get().get("items")
if pod_data_list:
for pod_data in pod_data_list:
if pod_obj.get_labels().get("name") == pod_data.get("metadata").get("name"):
dc_ocp_obj.delete(resource_name=pod_obj.get_labels().get("name"))
dc_ocp_obj.wait_for_delete(
resource_name=pod_obj.get_labels().get("name")
)
def wait_for_new_osd_pods_to_come_up(number_of_osd_pods_before):
status_options = ["Init:1/4", "Init:2/4", "Init:3/4", "PodInitializing", "Running"]
try:
for osd_pods in TimeoutSampler(timeout=180, sleep=3, func=get_osd_pods):
# Check if the new osd pods has started to come up
new_osd_pods = osd_pods[number_of_osd_pods_before:]
new_osd_pods_come_up = [
pod.status() in status_options for pod in new_osd_pods
]
if any(new_osd_pods_come_up):
logging.info("One or more of the new osd pods has started to come up")
break
except TimeoutExpiredError:
logging.warning("None of the new osd pods reached the desired status")
def get_pod_restarts_count(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Gets the dictionary of pod and its restart count for all the pods in a given namespace
Returns:
dict: dictionary of pod name and its corresponding restart count
"""
list_of_pods = get_all_pods(namespace)
restart_dict = {}
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
for p in list_of_pods:
# we don't want to compare osd-prepare and canary pods as they get created freshly when an osd need to be added.
if (
"rook-ceph-osd-prepare" not in p.name
and "rook-ceph-drain-canary" not in p.name
):
restart_dict[p.name] = int(ocp_pod_obj.get_resource(p.name, "RESTARTS"))
logging.info(f"get_pod_restarts_count: restarts dict = {restart_dict}")
return restart_dict
def check_pods_in_running_state(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
checks whether all the pods in a given namespace are in Running state or not
Returns:
Boolean: True, if all pods in Running state. False, otherwise
"""
ret_val = True
list_of_pods = get_all_pods(namespace)
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
for p in list_of_pods:
# we don't want to compare osd-prepare and canary pods as they get created freshly when an osd need to be added.
if (
"rook-ceph-osd-prepare" not in p.name
and "rook-ceph-drain-canary" not in p.name
):
status = ocp_pod_obj.get_resource(p.name, "STATUS")
if (
("rook-ceph-osd-prepare" not in p.name)
and ("rook-ceph-drain-canary" not in p.name)
and ("debug" not in p.name)
):
status = ocp_pod_obj.get_resource(p.name, "STATUS")
if status not in "Running":
logging.error(
f"The pod {p.name} is in {status} state. Expected = Running"
)
ret_val = False
return ret_val
def get_running_state_pods(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Checks the running state pods in a given namespace.
Returns:
List: all the pod objects that are in running state only
"""
list_of_pods = get_all_pods(namespace)
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
running_pods_object = list()
for pod in list_of_pods:
status = ocp_pod_obj.get_resource(pod.name, "STATUS")
if "Running" in status:
running_pods_object.append(pod)
return running_pods_object
def wait_for_pods_to_be_running(timeout=200, namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Wait for all the pods in a specific namespace to be running.
Args:
timeout (int): time to wait for pods to be running
namespace (str): the namespace ot the pods
Returns:
bool: True, if all pods in Running state. False, otherwise
"""
try:
for pods_running in TimeoutSampler(
timeout=timeout,
sleep=10,
func=check_pods_in_running_state,
namespace=namespace,
):
# Check if all the pods in running state
if pods_running:
logging.info("All the pods reached status running!")
return True
except TimeoutExpiredError:
logging.warning(
f"Not all the pods reached status running " f"after {timeout} seconds"
)
return False
def list_of_nodes_running_pods(selector, namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
The function returns the list of nodes for the given selector
Args:
selector (str): The resource selector to search with
Returns:
list: a list of nodes that runs the given selector pods
"""
pod_obj_list = get_all_pods(namespace=namespace, selector=[selector])
pods_running_nodes = [get_pod_node(pod) for pod in pod_obj_list]
logger.info(f"{selector} running on nodes {pods_running_nodes}")
return list(set(pods_running_nodes))
def get_osd_removal_pod_name(osd_id, timeout=60):
"""
Get the osd removal pod name
Args:
osd_id (int): The osd's id to get the osd removal pod name
timeout (int): The time to wait for getting the osd removal pod name
Returns:
str: The osd removal pod name
"""
try:
for osd_removal_pod_names in TimeoutSampler(
timeout=timeout,
sleep=5,
func=get_pod_name_by_pattern,
pattern=f"ocs-osd-removal-{osd_id}",
):
if osd_removal_pod_names:
osd_removal_pod_name = osd_removal_pod_names[0]
logging.info(f"Found pod {osd_removal_pod_name}")
return osd_removal_pod_name
except TimeoutExpiredError:
logger.warning(f"Failed to get pod ocs-osd-removal-{osd_id}")
return None
|
__init__.py | """
Python Geographic Visualizer (GeoVis)
**Version: 0.2.0**
**Date: April 15, 2014**
**Author: [Karim Bahgat](https://uit.academia.edu/KarimBahgat)**
**Contact: karim.bahgat.norway@gmail.com**
**Homepage: https://github.com/karimbahgat/geovis**
## Table of Contents
- [About](#about)
- [System Compatibility](#system-compatibility)
- [Dependencies](#dependencies)
- [License](#license)
- [How GeoVis Works](#how-geovis-works)
- [Usage Philosophy](#usage-philosophy)
- [Screen Coordinate System](#screen-coordinate-system)
- [Stylizing Options](#stylizing-options)
- [Text Options](#text-options)
- [Available Text Fonts](#available-text-fonts)
- [Functions and Classes](#functions-and-classes)
- [geovis.AskColor](#geovisaskcolor)
- [geovis.AskFieldName](#geovisaskfieldname)
- [geovis.AskNumber](#geovisasknumber)
- [geovis.AskShapefilePath](#geovisaskshapefilepath)
- [geovis.AskString](#geovisaskstring)
- [geovis.Color](#geoviscolor)
- [geovis.Layer](#geovislayer----class-object)
- [.AddClassification](#addclassification)
- [geovis.NewMap](#geovisnewmap----class-object)
- [.AddLegend](#addlegend)
- [.AddShape](#addshape)
- [.AddText](#addtext)
- [.AddToMap](#addtomap)
- [.DrawCircle](#drawcircle)
- [.DrawLine](#drawline)
- [.DrawRectangle](#drawrectangle)
- [.SaveMap](#savemap)
- [.ViewMap](#viewmap)
- [geovis.SaveShapefileImage](#geovissaveshapefileimage)
- [geovis.SetMapBackground](#geovissetmapbackground)
- [geovis.SetMapDimensions](#geovissetmapdimensions)
- [geovis.SetMapZoom](#geovissetmapzoom)
- [geovis.SetRenderingOptions](#geovissetrenderingoptions)
- [geovis.Shapefile](#geovisshapefile----class-object)
- [.ClearSelection](#clearselection)
- [.InvertSelection](#invertselection)
- [.SelectByQuery](#selectbyquery)
- [geovis.ShapefileFolder](#geovisshapefilefolder)
- [geovis.ViewShapefile](#geovisviewshapefile)
## About
Python Geographic Visualizer (GeoVis) is a standalone geographic visualization
module for the Python programming language intended for easy everyday-use by
novices and power-programmers alike. It has one-liners for quickly visualizing
a shapefile, building and styling basic maps with multiple shapefile layers,
and/or saving to imagefiles. Uses the built-in Tkinter or other third-party
rendering modules to do its main work. The current version is functional, but
should be considered a work in progress with potential bugs, so use with care.
For now, only visualizes shapefiles that are in lat/long unprojected coordinate
system.
### System Compatibility
Should work on Python version 2.x and Windows. Has not yet been tested on
Python 3.x or other OS systems.
### Dependencies
Technically speaking, GeoVis has no external dependencies, but it is highly
recommended that you install the [Aggdraw](http://effbot.org/zone/aggdraw-index.htm),
[PIL](http://www.pythonware.com/products/pil/) or [PyCairo](http://cairographics.org/pycairo/)
renderer libraries to do the rendering. GeoVis automatically detects which
renderer module you have and uses the first it finds in the following order
(aggdraw, PIL, pycairo). If you wish to manually choose a different renderer
this has to be specified for each session. If none of these are available then
GeoVis will default to using the built-in Tkinter Canvas as its renderer, but
due to major limitations this is not recommended for viewing larger shapefiles.
### License
Contributors are wanted and needed, so this code is free to share, use, reuse,
and modify according to the MIT license, see license.txt
## How GeoVis works
The following section describes some general info and options about how
GeoVis works.
### Usage Philosophy
The general philosophy of GeoVis is that it should be easy to both learn
and use for end-users, particularly for people who are new to programming.
More specifically:
- It should be logical and intuitive what commands to use.
- Making a simple map should require relatively few lines of code.
- The user should only have to learn and deal with a few basic commands.
- All command names use full wording and first-letter uppercasing
of each word for easy identification, ala the Arcpy syntax.
The precise commands and arguments to use can be looked up in the
documentation. Using these the general steps to follow are:
1. Create a new map
2. Create and symbolize layers of geographical data
3. Add the layers to the map
4. View or save the map
### Screen Coordinate system
Many of the rendering methods let the user to specify one or more
locations in relative screen coordinates. These screen coordinates
are given as x and y values with a float between 0 and 1. The relative
coordinates (0,0) places something in the upper left corner of the
screen, while (1,1) places it in the bottom right corner.
### Stylizing Options
Styling a map layer is done by setting one or more keyword arguments
during the creation of the Layer class. The same styling keywords can
also be used when manually drawing shapes and figures on a map (the ones
offering the "customoptions" argument option).
| __option__ | __description__
| --- | ---
| fillsize | the size of a circle, square, pyramid, or the thickness of a line. Has no effect on polygon shapes. Given as proportion of the map size, so that a circle of size 0.10 will cover about 10 percent of the map. A float between 0 and 1
| fillwidth | currently only used for the width of a pyramid when using the pyramid symbolizer. Given as proportion of the map size. A float between 0 and 1
| fillheight | currently has no effect
| fillcolor | the hex color of the fill
| outlinewidth | the width of the outline if any, given as proportion of the fillsize. A float between 0 and 1
| outlinecolor | the hex color of the outline
### Text Options
When adding text to a map one can use one or more of the following
keyword arguments:
| __option__ | __description__
| --- | ---
| textfont | the name of the textfont to use; available textfonts vary depending on the renderer being used, see list below.
| textsize | the size of the text, given as percent pixelheight of the map dimensions (eg 0.20 being a really large text with a size of about 20 percent of the map)
| textcolor | the hex color string of the text
| textopacity | currently not being used
| texteffect | currently not being used
| textanchor | what area of the text to use as the anchor point when placing it, given as one of the following compass direction strings: center, n, ne, e, se, s, sw, w, nw
| textboxfillcolor | the fillcolor of the text's bounding box, if any (default is None, meaning no bounding box)
| textboxoutlinecolor | the outlinecolor of the bounding box, if any (default is None, meaning no bounding box outline)
| textboxfillsize | proportional size of the text's bounding box relative to the textsize (eg 1.10 gives the bounding box about a 10 percent padding around the text, default is 1.10)
| textboxoutlinewidth | width of the textbox outline as percent of the textboxfilling (eg 1 gives a 1 percent outline width)
| textboxopacity | currently not being used
### Available Text Fonts
Only a few basic text fonts are currently supported by each renderer.
They are:
- Tkinter
- times new roman
- courier
- helvetica
- PIL
- times new roman
- arial
- Aggdraw
- times new roman
- arial
- PyCairo
- serif
- sans-serif
- cursive
- fantasy
- monospace
"""
# IMPORTS
#builtins
import sys, os, itertools, array, random, math, datetime, platform, operator
import threading, Queue, multiprocessing
import Tkinter as tk
import tkFileDialog, tkColorChooser
#customized
import messages, listy, guihelper
#third party modules
import shapefile_fork as pyshp
import colour
# GLOBAL VARS
OSSYSTEM = platform.system().lower()
#PyShp shapetype constants
NULL = 0
POINT = 1
POLYLINE = 3
POLYGON = 5
MULTIPOINT = 8
POINTZ = 11
POLYLINEZ = 13
POLYGONZ = 15
MULTIPOINTZ = 18
POINTM = 21
POLYLINEM = 23
POLYGONM = 25
MULTIPOINTM = 28
MULTIPATCH = 31
#PyShp shapetypes as text
PYSHPTYPE_AS_TEXT = {\
NULL:"Null",
POINT:"Point",
POINTZ:"PointZ",
POINTM:"PointM",
POLYLINE:"PolyLine",
POLYLINEZ:"PolyLineZ",
POLYLINEM:"PolyLineM",
POLYGON:"Polygon",
POLYGONZ:"PolygonZ",
POLYGONM:"PolygonM",
MULTIPOINT:"MultiPoint",
MULTIPOINTZ:"MultiPointZ",
MULTIPOINTM:"MultiPointM",
MULTIPATCH:"MultiPatch"}
#default rendering options
try:
import numpy
NUMPYSPEED = True
except:
NUMPYSPEED = False
REDUCEVECTORS = False
SHOWPROGRESS = True
#some map stuff
MAPBACKGROUND = None
try:
import aggdraw
RENDERER = "aggdraw"
except:
try:
import PIL
RENDERER = "PIL"
except:
try:
import cairo
RENDERER = "pycairo"
except:
RENDERER = "tkinter"
#setup coordinate system (this can be done by user too, see SetMapZoom function towards the bottom)
PROJ_XYRATIO = 2.0
XMIN,XMAX = (-180,180)
YMIN,YMAX = (-90,90)
x2x = (XMIN,XMAX)
y2y = (YMIN,YMAX)
nw = (-1*min(x2x),max(y2y)) #northwest corner of zoomextent
XWIDTH = x2x[1]-x2x[0]
YHEIGHT = y2y[1]-y2y[0]
XOFFSET = nw[0]
YOFFSET = nw[1]
#set mapdims to window size
mapdimstest = tk.Tk()
width = int(mapdimstest.winfo_screenwidth())
height = int(mapdimstest.winfo_screenheight())
if width/float(height) < PROJ_XYRATIO:
#snap to world ratio in case screenratio is different
height = width/PROJ_XYRATIO
MAPWIDTH = width
MAPHEIGHT = height
mapdimstest.destroy()
del mapdimstest
#update mapdims
def _UpdateMapDims():
if NUMPYSPEED:
global ZOOMDIM, OFFSET, TRANSLATION, RENDERAREA, SCALING
ZOOMDIM = numpy.array([XWIDTH,YHEIGHT])
OFFSET = numpy.array([0.0,0.0])*-1 #move x or y by normal +- coordinates (not compat with zoom yet
TRANSLATION = numpy.array([XOFFSET, -YOFFSET]) + OFFSET
RENDERAREA = numpy.array([MAPWIDTH, -MAPWIDTH/PROJ_XYRATIO])
SCALING = RENDERAREA / ZOOMDIM
else:
#?? evrything is done with set zoom...?
pass
_UpdateMapDims()
#define colorstyles
COLORSTYLES = dict([("strong", dict( [("intensity",1), ("brightness",0.5)]) ),
("dark", dict( [("intensity",0.8), ("brightness",0.2)]) ),
("matte", dict( [("intensity",0.4), ("brightness",0.2)]) ),
("bright", dict( [("intensity",0.8), ("brightness",0.7)] ) ),
("weak", dict( [("intensity",0.3), ("brightness",0.5)] ) ),
("pastelle", dict( [("intensity",0.5), ("brightness",0.6)] ) )
])
# INTERNAL CLASSES
class _PyShpShape:
def __init__(self, shapefile, fieldnames, uniqid, coords, shapetype, bbox=None):
"""
every shapetype is always multi (upon entry) so have to be looped through when retrieved.
"""
self._shapefile = shapefile
self.fieldnames = fieldnames
self.id = uniqid
self.coords = coords
#print coords
if not bbox:
#this is only needed for single points
x,y = coords[0][0]
bbox = [x,y,x,y]
self.bbox = bbox
self.type = shapetype
def to_tkinter(self):
convertedcoords = (self._MapCoords(eachmulti) for eachmulti in self.coords)
formattedcoords = convertedcoords
return (eachmulti for eachmulti in formattedcoords)
def to_PIL(self):
convertedcoords = (self._MapCoords(eachmulti) for eachmulti in self.coords)
formattedcoords = convertedcoords
return (array.array("f",eachmulti) for eachmulti in formattedcoords)
def to_aggdraw(self):
convertedcoords = (self._MapCoords(eachmulti) for eachmulti in self.coords)
formattedcoords = convertedcoords
return (array.array("f",eachmulti) for eachmulti in formattedcoords)
def to_pydraw(self):
convertedcoords = (self._MapCoords(eachmulti) for eachmulti in self.coords)
formattedcoords = (self.__pairwise(eachmulti) for eachmulti in convertedcoords)
return (eachmulti for eachmulti in formattedcoords)
def to_pycairo(self):
convertedcoords = (self._MapCoords(eachmulti) for eachmulti in self.coords)
formattedcoords = (self.__pairwise(eachmulti) for eachmulti in convertedcoords)
return (eachmulti for eachmulti in formattedcoords)
def GetAttributes(self, fieldname=None):
if fieldname:
rowdict = dict(zip(self.fieldnames, self._shapefile.record(self.id)))
fieldvalue = rowdict[fieldname]
return fieldvalue
else:
entirerow = self._shapefile.record(self.id)
return entirerow
def GetTime(self):
shapetime = dict()
for timeunit,timeunitfield in self._shapefile.timefields.iteritems():
if timeunitfield:
value = self.GetAttributes(fieldname=timeunitfield)
else:
value = timeunitfield
if timeunit in ("year","month","day"):
if value < 1:
value = 1 #missing value for data, so set to 1
else:
if value < 0:
value = 0 #missing value for data, so set to 0
shapetime.update([(timeunit,value)])
try:
return datetime.datetime(**shapetime)
except:
print(shapetime)
def GetAvgCenter(self):
"""
so far only simple nonnumpy
"""
if "point" in self.type:
xs,ys = itertools.izip(*self.coords)
avgx = sum(xs)/float(len(xs))
avgy = sum(ys)/float(len(ys))
else:
x1,y1,x2,y2 = self.bbox
avgx = (x1+x2)/2.0
avgy = (y1+y2)/2.0
avgcenter = [(avgx,avgy)]
avgcenter = self._MapCoords(avgcenter)
return avgcenter
def GetMultiCenters(self):
"""
so far only simple nonnumpy
"""
for single in self.coords:
xs = [xy[0] for xy in single]
xmid = sum(xs)/float(len(xs))
xmid = self._MapCoords(xmid)
ys = [xy[1] for xy in single]
ymid = sum(ys)/float(len(ys))
ymid = self._MapCoords(ymid)
yield (xmid,ymid)
#internal use only
def __pairwise(self, coords, batchsize=2):
"""
only used when sending coordinates to pycairo, bc can only draw as a path one xy point at a time
"""
return [pair for pair in itertools.izip(*[iter(coords)] * batchsize)]
def _MapCoords(self, incoords):
"""
takes single set of coords, not multicoords
"""
if NUMPYSPEED:
converted = (incoords + TRANSLATION) * SCALING
#for smoother drawings comment out the rint and vstack commands below
if REDUCEVECTORS:
converted = numpy.rint(converted).astype(int)
converted = numpy.vstack((converted[0], converted[1:][numpy.any(converted[1:]!=converted[:-1], axis=1)]))
aslist = converted.flatten()
return aslist
else:
outcoords = []
previous = None
for point in incoords:
inx, iny = point
newx = (XOFFSET+inx)/XWIDTH*MAPWIDTH
newy = MAPHEIGHT-(YOFFSET+iny)/YHEIGHT*MAPHEIGHT
if REDUCEVECTORS:
newpoint = (int(newx),int(newy))
if newpoint != previous:
outcoords.extend(newpoint)
previous = newpoint
else:
newpoint = [newx,newy]
outcoords.extend(newpoint)
return outcoords
class _GeojShape(_PyShpShape):
def __init__(self, geojson):
#not finished, only barely works with polygons and multipolygons
self.geojson = geojson
_type = geojson["type"]
if _type == "Polygon":
self.coords = [geojson["coordinates"][0]]
self.type = "polygon"
elif _type == "MultiPolygon":
self.coords = [eachmulti[0] for eachmulti in geojson["coordinates"]]
self.type = "polygon"
elif _type == "Point":
x,y = self.coords[0][0]
bbox = [x,y,x,y]
self.bbox = bbox
class Shapefile:
#builtins
"""
Opens and reads a shapefile. Supports looping through it to extract one PyShpShape instance at a time. Using it with a print() function passes the filename, and measuring its len() returns the number of rows.
| __options__ | __description__
| --- | ---
| shapefilepath | the filepath of the shapefile, including the .shp extension
| showprogress | True if wanting to display a progressbar while looping through the shapefile (default), otherwise False (default)
| progresstext | a textstring to print alongside the progressbar to help identify why it is being looped
"""
def __init__(self, shapefilepath=None, showprogress="not specified", progresstext="looping shapefile"):
self.showprogress = showprogress
self.progresstext = progresstext
self.selection = "all"
if shapefilepath:
self.shapefile = pyshp.Reader(shapefilepath)
name = ".".join(shapefilepath.split(".")[:-1])
name = name.split("\\")[-1]
self.filename = name
self.fieldnames = [fieldinfo[0] for fieldinfo in self.shapefile.fields[1:]]
else:
self.shapefile = None
self.filename = "empty_shapefile"
self.fieldnames = []
self.writer = pyshp.Writer()
def __len__(self):
self._UpdateShapefile()
return self.shapefile.numRecords
def __str__(self):
return self.filename
def __iter__(self):
self._UpdateShapefile()
#prepare progressreporting
if self.showprogress == "not specified":
if SHOWPROGRESS:
shellreport = "progressbar"
else:
shellreport = None
else:
shellreport = self.showprogress
SHAPEFILELOOP = messages.ProgressReport(self.shapefile.iterShapes(numpyspeed=NUMPYSPEED), text=self.progresstext+" "+self.filename, shellreport=shellreport, countmethod="manual", genlength=self.shapefile.numRecords)
if NUMPYSPEED:
#loop
for shapeindex, shape in enumerate(SHAPEFILELOOP):
SHAPEFILELOOP.Increment()
if self.selection != "all":
if shapeindex in self.selection:
pyshpshape = self._PrepShape(shapeindex, shape)
xmin,ymin,xmax,ymax = pyshpshape.bbox
if (xmin < XMAX and xmax > XMIN) or (ymin < YMAX and ymax > YMIN):
yield pyshpshape
else:
pyshpshape = self._PrepShape(shapeindex, shape)
xmin,ymin,xmax,ymax = pyshpshape.bbox
if (xmin < XMAX and xmax > XMIN) or (ymin < YMAX and ymax > YMIN):
yield pyshpshape
else:
for shapeindex, shape in enumerate(SHAPEFILELOOP):
SHAPEFILELOOP.Increment()
if self.selection != "all":
if shapeindex in self.selection:
pyshpshape = self._PrepShape(shapeindex, shape)
xmin,ymin,xmax,ymax = pyshpshape.bbox
if (xmin < XMAX and xmax > XMIN) or (ymin < YMAX and ymax > YMIN):
yield pyshpshape
else:
pyshpshape = self._PrepShape(shapeindex, shape)
xmin,ymin,xmax,ymax = pyshpshape.bbox
if (xmin < XMAX and xmax > XMIN) or (ymin < YMAX and ymax > YMIN):
yield pyshpshape
#BASICS
## def SetFields(self, fieldnames):
## self.fieldnames = fieldnames
## def GetFeature(self, shapeindex):
## self._UpdateShapefile()
## shape = self.shapefile.shape(shapeindex, numpyspeed=NUMPYSPEED)
## return self._PrepShape(shapeindex, shape)
## def AddFeature(self, shape, record, shapetype=None):
## #first process inshape somehow
## #...
## self.writer._shapes.append(shapeinput)
## self.writer.records.append(attributes)
## def ChangeFeature(self, shapeid=None, shape=None, recordid=None, record=None):
## #first process inshape somehow
## #...
## if shapeid and shape:
## self.writer._shapes[shapeid] = shape
## if recordid and record:
## self.writer.records[recordid] = attributes
#USEFUL
## def Save(self, savepath):
## "work in progress..."
## # create writer
## shapewriter = pyshp.Writer()
## shapewriter.autoBalance = 1
## # add fields in correct fieldtype
## self.progresstext = "checking fieldtypes for"
## for fieldname in self.fieldnames:
## if not fieldname.startswith(tuple(shapetable.invisiblefields)):
## # write field
## fieldinfo = self._ShapefileFieldInfo(shapetable, fieldname)
## shapewriter.field(*fieldinfo)
## self.progresstext = "saving"
## for shape in self:
## attributes = dict(zip(self.fieldnames, shape.GetAttributes()))
## self.writer.save(savepath)
##
## """shapefilename: shapefile to be saved, name given to shapefile when loaded into shapeholder, type str; savepath: path of where to save, type str, use double backslash"""
## # create writer
## shapewriter = shapefile.Writer()
## shapewriter.autoBalance = 1
## # add fields in correct fieldtype
## shapetable = self.filenames[shapefilename].table
## for fieldname in shapetable.fields:
## if not fieldname.startswith(tuple(shapetable.invisiblefields)):
## # write field
## fieldinfo = self._ShapefileFieldInfo(shapetable, fieldname)
## shapewriter.field(*fieldinfo)
## # convert shapely shapetypes to shapefile points/parts format, and then match shapeid rows to each shape and write to output
## origshapes = self.filenames[shapefilename].shapes
## for eachshapeid in origshapes:
## points, shapetype = self._ShapelyToShapeparts(origshapes[eachshapeid]["shape"])
## shapewriter.poly(parts=points, shapeType=shapetype)
## rowinfo = [encode(eachcell, strlen=250, floatlen=16, floatprec=6) for index, eachcell in enumerate(shapetable.FetchEntireRow(eachshapeid)) if not shapetable.fields[index].startswith(tuple(shapetable.invisiblefields))]
## shapewriter.record(*rowinfo)
## # save
## shapewriter.save(savepath)
## # finally copy prj file from original if exists
## self._SaveProjection(shapefilename, savepath)
def SelectByQuery(self, query, inverted=False):
"""
Make a query selection on the shapefile so that only those features where the query evaluates to True are returned.
| __option__ | __description__
| --- | ---
| query | a string containing Python-like syntax (required). Feature values for fieldnames can be grabbed by specifying the fieldname as if it were a variable (case-sensitive). Note that evaluating string expressions is currently case-sensitive, which becomes particularly unintuitive for less-than/more-than alphabetic queries.
| *inverted | a boolean specifying whether to invert the selection (default is False).
"""
self._UpdateShapefile()
self.ClearSelection()
self.progresstext = "making selection for"
tempselection = []
for shape in self:
attributes = dict(zip(self.fieldnames, shape.GetAttributes()))
#first make temp variables out of all fieldnames
for field in self.fieldnames:
value = attributes[field]
if isinstance(value, basestring):
value = '"""'+str(value)+'"""'
elif isinstance(value, (int,float)):
value = str(value)
code = str(field)+" = "+value
exec(code)
#then run query
queryresult = eval(query)
if queryresult:
tempselection.append(shape.id)
self.selection = tempselection
if inverted:
self.InvertSelection()
return self.selection
def InvertSelection(self):
"""
Inverts the current selection
"""
self.progresstext = "inverting selection for"
oldselection = self.selection
self.ClearSelection()
tempselection = [shape.id for shape in self if shape.id not in oldselection]
self.selection = tempselection
def ClearSelection(self):
"""
Clears the current selection so that all shapes will be looped
"""
self.selection = "all"
## def SplitByAttribute(self, fieldname):
## self._UpdateShapefile()
## pass
def AssignTime(self, yearfield=0, monthfield=1, dayfield=1, hourfield=0, minutefield=0, secondfield=0):
"""
Assigns a field to contain the time dimension of a shapefile. Used by the NewMap SaveTimeSequence method to determine the time of multiple shapefiles simultaneously.
"""
self.shapefile.timefields = dict(year=yearfield,month=monthfield,day=dayfield,hour=hourfield,minute=minutefield,second=secondfield)
#
#INTERNAL USE ONLY
def _UpdateShapefile(self):
#first check that a few essentials have been set...
#...
#then activate
#self.shapefile = pyshp.Reader(self.writer)
if self.filename == "empty_shapefile":
#only do this first time done on a previously empty shapefile
self.filename = "custom_shapefile"
self.fieldnames = [fieldinfo[0] for fieldinfo in self.shapefile.fields[1:]]
def _PrepShape(self, shapeindex, shape):
if NUMPYSPEED:
shapetype = PYSHPTYPE_AS_TEXT[shape.shapeType].lower()
if "polygon" in shapetype:
if not numpy.any(shape.parts):
nestedcoords = [shape.points]
return _PyShpShape(self.shapefile, self.fieldnames, shapeindex, nestedcoords, shapetype, bbox=shape.bbox)
else:
coords = numpy.split(shape.points, shape.parts[1:])
return _PyShpShape(self.shapefile, self.fieldnames, shapeindex, coords, "polygon", bbox=shape.bbox)
elif "line" in shapetype:
if not numpy.any(shape.parts):
nestedcoords = [shape.points]
return _PyShpShape(self.shapefile, self.fieldnames, shapeindex, nestedcoords, shapetype, bbox=shape.bbox)
else:
coords = numpy.split(shape.points, shape.parts[1:])
return _PyShpShape(self.shapefile, self.fieldnames, shapeindex, coords, "line", bbox=shape.bbox)
elif "point" in shapetype:
if "multi" in shapetype:
return _PyShpShape(self.shapefile, self.fieldnames, shapeindex, shape.points, "point", bbox=shape.bbox)
else:
nestedcoords = [shape.points]
return _PyShpShape(self.shapefile, self.fieldnames, shapeindex, nestedcoords, "point")
else:
#first set new shapetype to pass on
shapetype = PYSHPTYPE_AS_TEXT[shape.shapeType].lower()
if "polygon" in shapetype:
newshapetype = "polygon"
if "line" in shapetype:
newshapetype = "line"
if "point" in shapetype:
newshapetype = "point"
#then serve up points universal for all shapetypes
if "point" in shapetype:
nestedcoords = [shape.points]
return _PyShpShape(self.shapefile, self.fieldnames, shapeindex, nestedcoords, newshapetype)
elif len(shape.parts) == 1:
nestedcoords = [shape.points]
return _PyShpShape(self.shapefile, self.fieldnames, shapeindex, nestedcoords, newshapetype)
else:
nestedcoords = []
shapeparts = list(shape.parts)
shapeparts.append(len(shape.points))
startindex = shapeparts[0]
for endindex in shapeparts[1:]:
eachmulti = shape.points[startindex:endindex]
nestedcoords.append(eachmulti)
startindex = endindex
return _PyShpShape(self.shapefile, self.fieldnames, shapeindex, nestedcoords, newshapetype)
class _TkCanvas_Renderer:
def __init__(self):
global tkFont
import tkFont
self.fontnames = dict([("default", "Times"),
("times new roman", "Times"),
("courier", "Courier"),
("helvetica","Helvetica") ])
def NewImage(self):
"""
this must be called before doing any rendering.
Note: this replaces any previous image drawn on so be sure to
retrieve the old image before calling it again to avoid losing work
"""
width = MAPWIDTH
height = MAPHEIGHT
background = MAPBACKGROUND
self.img = None
self.window = tk.Tk()
self.window_frame = tk.Frame(self.window)
self.window_frame.pack()
screenwidth = self.window.winfo_screenwidth()
if MAPWIDTH >= screenwidth:
self.window.wm_state('zoomed')
self.drawer = tk.Canvas(self.window_frame, width=width, height=height, bg="white")
self.drawer.pack()
#place the shadow
if background:
x0,y0,x1,y1 = ( -int(width/50.0), int(width/50.0), width-int(width/50.0), height+int(width/50.0) )
self.drawer.create_rectangle(x0,y0,x1,y1, fill="Gray80", outline="")
#place background
x0,y0,x1,y1 = ( 0, 0, width, height )
self.drawer.create_rectangle(x0,y0,x1,y1, fill=background, outline="")
#make image pannable
def mouseovermap(event):
global mouseovermapvar
self.window.config(cursor="fleur") #draft_large
mouseovermapvar = True
def mouseoutofmap(event):
global mouseovermapvar
self.window.config(cursor="")
mouseovermapvar = False
def activatedrag(event):
global mouseclicked
if mouseovermapvar == True:
mouseclicked = True
def deactivatedrag(event):
global mouseclicked
mouseclicked = False
def mark(event):
self.drawer.scan_mark(event.x, event.y)
def dragto(event):
try:
if mouseclicked == True:
self.drawer.scan_dragto(event.x, event.y, 1)
except:
pass
self.drawer.bind("<Enter>", mouseovermap, "+")
self.drawer.bind("<Leave>", mouseoutofmap, "+")
self.window.bind("<Button-1>", mark, "+")
self.window.bind("<Motion>", dragto, "+")
self.window.bind("<Button-1>", activatedrag, "+")
self.window.bind("<ButtonRelease-1>", deactivatedrag, "+")
def RenderText(self, relx, rely, text, options):
if not options.get("texteffect"):
self._BasicText(relx, rely, text, options)
def RenderRectangle(self, upperleft, bottomright, customoptions):
self.__FixHollowPolyError(customoptions)
leftrelx, uprely = upperleft
leftx,upy = (int(MAPWIDTH*leftrelx), int(MAPHEIGHT*uprely))
rightrelx, downrely = bottomright
rightx,downy = (int(MAPWIDTH*rightrelx), int(MAPHEIGHT*downrely))
rectanglecoords = [leftx,upy, rightx,upy, rightx,downy, leftx,downy, leftx,upy]
self._BasicPolygon(rectanglecoords, customoptions)
def RenderCircle(self, relx, rely, fillsize, customoptions):
customoptions["fillsize"] = fillsize
x = int(MAPWIDTH*relx)
y = int(MAPHEIGHT*rely)
self._BasicCircle((x,y), customoptions)
def RenderLine(self, startpos, stoppos, customoptions):
startrelx, startrely = startpos
startxy = [int(MAPWIDTH*startrelx), int(MAPHEIGHT*startrely)]
stoprelx, stoprely = stoppos
stopxy = [int(MAPWIDTH*stoprelx), int(MAPHEIGHT*stoprely)]
linecoords = startxy
linecoords.extend(stopxy)
self._BasicLine(linecoords, customoptions)
def RenderShape(self, shapeobj, options):
"""
looks at instructions in options to decide which draw method to use
"""
self.__FixHollowPolyError(options)
multishapes = shapeobj.to_tkinter()
symbolizer = options.get("symbolizer")
if shapeobj.type == "polygon":
if symbolizer:
if symbolizer == "circle":
coords = shapeobj.GetAvgCenter()
self._BasicCircle(coords, options)
elif symbolizer == "square":
coords = shapeobj.GetAvgCenter()
self._BasicSquare(coords, options)
elif symbolizer == "pyramid":
coords = shapeobj.GetAvgCenter()
self._Pyramid(coords, options)
else:
for coords in multishapes:
self._BasicPolygon(coords, options)
elif shapeobj.type == "line":
if symbolizer:
if symbolizer == "circle":
coords = shapeobj.GetAvgCenter()
self._BasicCircle(coords, options)
elif symbolizer == "square":
coords = shapeobj.GetAvgCenter()
self._BasicSquare(coords, options)
elif symbolizer == "pyramid":
coords = shapeobj.GetAvgCenter()
self._Pyramid(coords, options)
else:
for coords in multishapes:
self._BasicLine(coords, options)
elif shapeobj.type == "point":
if symbolizer:
if symbolizer == "circle":
for coords in multishapes:
self._BasicCircle(coords, options)
elif symbolizer == "square":
for coords in multishapes:
self._BasicSquare(coords, options)
elif symbolizer == "pyramid":
for coords in multishapes:
self._Pyramid(coords, options)
else:
for coords in multishapes:
self._BasicCircle(coords, options)
def RunTk(self):
self.drawer.create_rectangle(0,0,MAPWIDTH,MAPHEIGHT, fill="", outline=Color("black")) #this is the map outline edge
self.window.mainloop()
#Internal use only
def __FixHollowPolyError(self, options):
if not options.get("fillcolor"):
options["fillcolor"] = ""
if not options.get("outlinecolor"):
options["outlinecolor"] = ""
def _BasicText(self, relx, rely, text, options):
"""
draws basic text, no effects
"""
font = tkFont.Font(family=self.fontnames[options["textfont"]], size=options["textsize"])
fontwidth, fontheight = (font.measure(text), font.metrics("ascent"))
textanchor = options.get("textanchor")
if textanchor:
textanchor = textanchor.lower()
if textanchor == "center":
x = int(MAPWIDTH*relx) - int(fontwidth/2.0)
y = int(MAPHEIGHT*rely) - int(fontheight/2.0)
else:
x = int(MAPWIDTH*relx) - int(fontwidth/2.0)
y = int(MAPHEIGHT*rely) - int(fontheight/2.0)
if "n" in textanchor:
y = int(MAPHEIGHT*rely)
elif "s" in textanchor:
y = int(MAPHEIGHT*rely) - int(fontheight)
if "e" in textanchor:
x = int(MAPWIDTH*relx) - int(fontwidth)
elif "w" in textanchor:
x = int(MAPWIDTH*relx)
if options.get("textboxfillcolor") or options.get("textboxoutlinecolor"):
relfontwidth, relfontheight = (fontwidth/float(MAPWIDTH), fontheight/float(MAPHEIGHT))
relxmid,relymid = (x/float(MAPWIDTH)+relfontwidth/2.0,y/float(MAPHEIGHT)+relfontheight/2.0)
relupperleft = (relxmid-relfontwidth*options["textboxfillsize"]/2.0, relymid-relfontheight*options["textboxfillsize"]/2.0)
relbottomright = (relxmid+relfontwidth*options["textboxfillsize"]/2.0, relymid+relfontheight*options["textboxfillsize"]/2.0)
options["fillcolor"] = options["textboxfillcolor"]
options["outlinecolor"] = options["textboxoutlinecolor"]
options["outlinewidth"] = options["textboxoutlinewidth"]
self.RenderRectangle(relupperleft, relbottomright, options)
self.drawer.create_text((x,y), text=text, font=font, fill=options["textcolor"], anchor="nw")
def _BasicLine(self, coords, options):
"""
draw basic lines with outline, but nothing at start and end
"""
if len(coords) < 4:
return
#first draw outline line
if options["outlinecolor"]:
self.drawer.create_line(*coords, fill=options.get("outlinecolor"), width=int(options.get("fillsize")+(options.get("outlinewidth")*2)))
#then draw fill line which is thinner
self.drawer.create_line(*coords, fill=options.get("fillcolor"), width=int(options.get("fillsize")))
def _BasicPolygon(self, coords, options):
"""
draw polygon with color fill
"""
if len(coords) > 6:
self.drawer.create_polygon(*coords, fill=options["fillcolor"], outline=options["outlinecolor"])
def _BasicCircle(self, coords, options):
"""
draw points with a symbol path representing a circle
"""
size = int(options["fillsize"]/2.0)
x,y = coords
circlecoords = (x-size, y-size, x+size, y+size)
self.drawer.create_oval(circlecoords, fill=options["fillcolor"], outline=options["outlinecolor"])
def _BasicSquare(self, coords, options):
"""
draw points with a symbol path representing a square
"""
#build circle
size = int(options["fillsize"]/2.0)
x,y = coords
squarecoords = [x-size,y-size, x+size,y-size, x+size,y+size, x-size,y+size, x-size,y-size]
#draw
self.drawer.create_polygon(*squarecoords, fill=options["fillcolor"], outline=options["outlinecolor"])
def _Pyramid(self, coords, options):
"""
draw basic lines with outline, but nothing at start and end
"""
size = int(options["fillsize"])
width = int(options["fillwidth"]) #pxls
#calculate three pyramid coords
x,y = coords
leftbase = [x-int(width/2.0), y]
peak = [x, y-size]
rightbase = [x+int(width/2.0), y]
#first draw left line
leftlinecoords = list(leftbase)
leftlinecoords.extend(peak)
self.drawer.create_line(*leftlinecoords, fill=options["outlinecolor"], width=options["outlinewidth"])
#then draw right line
rightlinecoords = list(rightbase)
rightlinecoords.extend(peak)
self.drawer.create_line(*rightlinecoords, fill=options["outlinecolor"], width=options["outlinewidth"])
class _PIL_Renderer:
"""
this class can be called on to draw each feature with PIL as long as
it is given instructions via a color/size/options dictionary
"""
#NEED TO RECEIVE GENERATOR OF TRANSFORMED COORDS FROM MAPCANVAS
#ALSO NEEDS THE Aggdraw.Draw(img) OBJECT
def __init__(self):
global PIL
import PIL, PIL.Image, PIL.ImageDraw, PIL.ImageTk, PIL.ImageFont
self.upscaled = False
self.sysfontfolders = dict([("windows","C:/Windows/Fonts/"),
("darwin", "/Library/Fonts/"),
("linux", "/usr/share/fonts/truetype/") ])
self.fontfilenames = dict([("default", "TIMES.TTF"),
("times new roman","TIMES.TTF"),
("arial","ARIAL.TTF")])
def NewImage(self):
"""
this must be called before doing any rendering.
Note: this replaces any previous image drawn on so be sure to
retrieve the old image before calling it again to avoid losing work
"""
#first mode
mode = "RGBA"
#then other specs
if not self.upscaled:
global MAPWIDTH, MAPHEIGHT
MAPWIDTH = MAPWIDTH*2
MAPHEIGHT = MAPHEIGHT*2
_UpdateMapDims()
self.upscaled = True
width = int(MAPWIDTH)
height = int(MAPHEIGHT)
background = MAPBACKGROUND
dimensions = (width, height)
self.img = PIL.Image.new(mode, dimensions, background)
self.drawer = PIL.ImageDraw.Draw(self.img)
def RenderText(self, relx, rely, text, options):
options = options.copy()
options["textsize"] = options["textsize"]*2
if not options.get("texteffect"):
self._BasicText(relx, rely, text, options)
def RenderRectangle(self, upperleft, bottomright, customoptions):
leftrelx, uprely = upperleft
leftx,upy = (int(MAPWIDTH*leftrelx), int(MAPHEIGHT*uprely))
rightrelx, downrely = bottomright
rightx,downy = (int(MAPWIDTH*rightrelx), int(MAPHEIGHT*downrely))
rectanglecoords = [leftx,upy, rightx,upy, rightx,downy, leftx,downy, leftx,upy]
self._BasicPolygon(rectanglecoords, customoptions)
def RenderCircle(self, relx, rely, fillsize, customoptions):
customoptions["fillsize"] = fillsize
x = int(MAPWIDTH*relx)
y = int(MAPHEIGHT*rely)
self._BasicCircle((x,y), customoptions)
def RenderLine(self, startpos, stoppos, customoptions):
startrelx, startrely = startpos
startxy = [int(MAPWIDTH*startrelx), int(MAPHEIGHT*startrely)]
stoprelx, stoprely = stoppos
stopxy = [int(MAPWIDTH*stoprelx), int(MAPHEIGHT*stoprely)]
linecoords = startxy
linecoords.extend(stopxy)
self._BasicLine(linecoords, customoptions)
def RenderShape(self, shapeobj, options):
"""
looks at instructions in options to decide which draw method to use
"""
#possibly use an options filterer here to enure all needed options
#are given, otherwise snap to default
#............
multishapes = shapeobj.to_PIL()
symbolizer = options.get("symbolizer")
if shapeobj.type == "polygon":
if symbolizer:
if symbolizer == "circle":
coords = shapeobj.GetAvgCenter()
self._BasicCircle(coords, options)
elif symbolizer == "square":
coords = shapeobj.GetAvgCenter()
self._BasicSquare(coords, options)
elif symbolizer == "pyramid":
coords = shapeobj.GetAvgCenter()
self._Pyramid(coords, options)
else:
for coords in multishapes:
self._BasicPolygon(coords, options)
elif shapeobj.type == "line":
if symbolizer:
if symbolizer == "circle":
coords = shapeobj.GetAvgCenter()
self._BasicCircle(coords, options)
elif symbolizer == "square":
coords = shapeobj.GetAvgCenter()
self._BasicSquare(coords, options)
elif symbolizer == "pyramid":
coords = shapeobj.GetAvgCenter()
self._Pyramid(coords, options)
else:
for coords in multishapes:
self._BasicLine(coords, options)
elif shapeobj.type == "point":
if symbolizer:
if symbolizer == "circle":
for coords in multishapes:
self._BasicCircle(coords, options)
elif symbolizer == "square":
for coords in multishapes:
self._BasicSquare(coords, options)
elif symbolizer == "pyramid":
for coords in multishapes:
self._Pyramid(coords, options)
else:
for coords in multishapes:
self._BasicCircle(coords, options)
def GetImage(self):
if self.upscaled:
global MAPWIDTH, MAPHEIGHT
MAPWIDTH = int(round(MAPWIDTH/2.0))
MAPHEIGHT = int(round(MAPHEIGHT/2.0))
_UpdateMapDims()
width,height = self.img.size
self.img = self.img.resize((int(round(width/2.0)),int(round(height/2.0))), PIL.Image.ANTIALIAS)
self.upscaled = False
return PIL.ImageTk.PhotoImage(self.img)
def SaveImage(self, savepath):
if self.upscaled:
global MAPWIDTH, MAPHEIGHT
MAPWIDTH = int(round(MAPWIDTH/2.0))
MAPHEIGHT = int(round(MAPHEIGHT/2.0))
_UpdateMapDims()
width,height = self.img.size
self.img = self.img.resize((int(round(width/2.0)),int(round(height/2.0))), PIL.Image.ANTIALIAS)
self.upscaled = False
self.img.save(savepath)
#Internal use only
def __DoubleSizeOptions(self, options):
"""
NO LONGER USED BC SIZE VALUES WERE CHANGED TO PERCENTAGES INSTEAD OF ACTUAL PIXELS. ORIGINAL DESCRIPTION: doubles and returns all size related options, since PIL draws on a 2x larger image so that it can be reduced to normal size using Antialias later.
"""
#remove soon...
options = options.copy()
options["fillsize"] = options["fillsize"]*2
options["outlinewidth"] = options["outlinewidth"]*2
options["fillwidth"] = options["fillwidth"]*2
options["fillheight"] = options["fillheight"]*2
return options
def _BasicText(self, relx, rely, text, options):
"""
draws basic text, no effects
"""
fontlocation = self.sysfontfolders[OSSYSTEM]+self.fontfilenames[options["textfont"]]
font = PIL.ImageFont.truetype(fontlocation, size=options["textsize"])
fontwidth, fontheight = self.drawer.textsize(text, font)
textanchor = options.get("textanchor")
if textanchor:
textanchor = textanchor.lower()
if textanchor == "center":
x = int(MAPWIDTH*relx) - int(fontwidth/2.0)
y = int(MAPHEIGHT*rely) - int(fontheight/2.0)
else:
x = int(MAPWIDTH*relx) - int(fontwidth/2.0)
y = int(MAPHEIGHT*rely) - int(fontheight/2.0)
if "n" in textanchor:
y = int(MAPHEIGHT*rely)
elif "s" in textanchor:
y = int(MAPHEIGHT*rely) - int(fontheight)
if "e" in textanchor:
x = int(MAPWIDTH*relx) - int(fontwidth)
elif "w" in textanchor:
x = int(MAPWIDTH*relx)
if options.get("textboxfillcolor") or options.get("textboxoutlinecolor"):
relfontwidth, relfontheight = (fontwidth/float(MAPWIDTH), fontheight/float(MAPHEIGHT))
relxmid,relymid = (x/float(MAPWIDTH)+relfontwidth/2.0,y/float(MAPHEIGHT)+relfontheight/2.0)
relupperleft = (relxmid-relfontwidth*options["textboxfillsize"]/2.0, relymid-relfontheight*options["textboxfillsize"]/2.0)
relbottomright = (relxmid+relfontwidth*options["textboxfillsize"]/2.0, relymid+relfontheight*options["textboxfillsize"]/2.0)
options["fillcolor"] = options["textboxfillcolor"]
options["outlinecolor"] = options["textboxoutlinecolor"]
options["outlinewidth"] = options["textboxoutlinewidth"]
self.RenderRectangle(relupperleft, relbottomright, options)
self.drawer.text((x,y), text=text, font=font, fill=options["textcolor"])
def _BasicLine(self, coords, options):
"""
draw basic lines with outline, but nothing at start and end
"""
#first draw outline line
if options["outlinecolor"]:
self.drawer.line(coords, fill=options.get("outlinecolor"), width=int(options.get("fillsize")+(options.get("outlinewidth")*2)))
#then draw fill line which is thinner
self.drawer.line(coords, fill=options.get("fillcolor"), width=int(options.get("fillsize")))
def _BasicPolygon(self, coords, options):
"""
draw polygon with color fill
"""
if len(coords) > 6:
self.drawer.polygon(coords, fill=options["fillcolor"], outline=options["outlinecolor"])
def _BasicCircle(self, coords, options):
"""
draw points with a symbol path representing a circle
"""
size = int(options["fillsize"]/2.0)
x,y = coords
circlecoords = (x-size, y-size, x+size, y+size)
self.drawer.ellipse(circlecoords, fill=options["fillcolor"], outline=options["outlinecolor"])
def _BasicSquare(self, coords, options):
"""
draw points with a symbol path representing a square
"""
#build circle
size = int(options["fillsize"]/2.0)
x,y = coords
squarecoords = (x-size, y-size, x+size, y+size)
#draw
self.drawer.rectangle(squarecoords, fill=options["fillcolor"], outline=options["outlinecolor"])
def _Pyramid(self, coords, options):
"""
draw basic lines with outline, but nothing at start and end
"""
size = int(options["fillsize"])
width = int(options["fillwidth"]) #pxls
#calculate three pyramid coords
x,y = coords
leftbase = [x-int(width/2.0), y]
peak = [x, y-size]
rightbase = [x+int(width/2.0), y]
#first draw left line
leftlinecoords = list(leftbase)
leftlinecoords.extend(peak)
self.drawer.line(leftlinecoords, fill=options["outlinecolor"], width=options["outlinewidth"])
#then draw right line
rightlinecoords = list(rightbase)
rightlinecoords.extend(peak)
self.drawer.line(rightlinecoords, fill=options["outlinecolor"], width=options["outlinewidth"])
class _Pydraw_Renderer:
"""
this class can be called on to draw each feature with pydraw as long as
it is given instructions via a color/size/options dictionary
NOTE: this class is not yet finished, only supports polygons for now...
"""
#NEED TO RECEIVE GENERATOR OF TRANSFORMED COORDS FROM MAPCANVAS
#ALSO NEEDS THE Aggdraw.Draw(img) OBJECT
def __init__(self):
global pydraw
import pydraw
self.sysfontfolders = dict([("windows","C:/Windows/Fonts/"),
("darwin", "/Library/Fonts/"),
("linux", "/usr/share/fonts/truetype/") ])
self.fontfilenames = dict([("default", "TIMES.TTF"),
("times new roman","TIMES.TTF"),
("arial","ARIAL.TTF")])
def NewImage(self):
"""
this must be called before doing any rendering.
Note: this replaces any previous image drawn on so be sure to
retrieve the old image before calling it again to avoid losing work
"""
#first mode
mode = "RGBA"
#then other specs
width = MAPWIDTH
height = MAPHEIGHT
background = MAPBACKGROUND
dimensions = (width, height)
self.img = pydraw.Image().new(width=width, height=height, background=background)
self.drawer = self.img
def RenderShape(self, shapeobj, options):
"""
looks at instructions in options to decide which draw method to use
"""
multishapes = shapeobj.to_pydraw()
symbolizer = options.get("symbolizer")
if shapeobj.type == "polygon":
if symbolizer:
if symbolizer == "circle":
centercoords = shapeobj.GetAvgCenter()
self._BasicCircle(centercoords, options)
elif symbolizer == "square":
centercoords = shapeobj.GetAvgCenter()
self._BasicSquare(centercoords, options)
elif symbolizer == "pyramid":
centercoords = shapeobj.GetAvgCenter()
self._Pyramid(centercoords, options)
else:
for coords in multishapes:
self._BasicPolygon(coords, options)
elif shapeobj.type == "line":
if symbolizer:
if symbolizer == "circle":
centercoords = shapeobj.GetAvgCenter()
self._BasicCircle(centercoords, options)
elif symbolizer == "square":
centercoords = shapeobj.GetAvgCenter()
self._BasicSquare(centercoords, options)
elif symbolizer == "pyramid":
centercoords = shapeobj.GetAvgCenter()
self._Pyramid(centercoords, options)
else:
for coords in multishapes:
self._BasicLine(coords, options)
elif shapeobj.type == "point":
if symbolizer:
if symbolizer == "circle":
for coords in multishapes:
self._BasicCircle(coords, options)
elif symbolizer == "square":
for coords in multishapes:
self._BasicSquare(coords, options)
elif symbolizer == "pyramid":
for coords in multishapes:
self._Pyramid(coords, options)
else:
for coords in multishapes:
self._BasicCircle(coords, options)
def RenderText(self, relx, rely, text, options):
if not options.get("texteffect"):
self._BasicText(relx, rely, text, options)
def RenderRectangle(self, upperleft, bottomright, customoptions):
leftrelx, uprely = upperleft
leftx,upy = (int(MAPWIDTH*leftrelx), int(MAPHEIGHT*uprely))
rightrelx, downrely = bottomright
rightx,downy = (int(MAPWIDTH*rightrelx), int(MAPHEIGHT*downrely))
rectanglecoords = [leftx,upy, rightx,upy, rightx,downy, leftx,downy, leftx,upy]
self._BasicPolygon(rectanglecoords, customoptions)
def RenderCircle(self, relx, rely, fillsize, customoptions):
customoptions["fillsize"] = fillsize
x = int(MAPWIDTH*relx)
y = int(MAPHEIGHT*rely)
self._BasicCircle((x,y), customoptions)
def RenderLine(self, startpos, stoppos, customoptions):
startrelx, startrely = startpos
startxy = [int(MAPWIDTH*startrelx), int(MAPHEIGHT*startrely)]
stoprelx, stoprely = stoppos
stopxy = [int(MAPWIDTH*stoprelx), int(MAPHEIGHT*stoprely)]
linecoords = startxy
linecoords.extend(stopxy)
self._BasicLine(linecoords, customoptions)
def GetImage(self):
return pydraw._tkimage()
def SaveImage(self, savepath):
self.img.save(savepath)
#Internal use only
def _BasicText(self, relx, rely, text, options):
"""
draws basic text, no effects
"""
fontlocation = self.sysfontfolders[OSSYSTEM]+self.fontfilenames[options["textfont"]]
font = aggdraw.Font(color=options["textcolor"], file=fontlocation, size=options["textsize"], opacity=options["textopacity"])
fontwidth, fontheight = self.drawer.textsize(text, font)
textanchor = options.get("textanchor")
if textanchor:
textanchor = textanchor.lower()
if textanchor == "center":
x = int(MAPWIDTH*relx) - int(fontwidth/2.0)
y = int(MAPHEIGHT*rely) - int(fontheight/2.0)
else:
x = int(MAPWIDTH*relx) - int(fontwidth/2.0)
y = int(MAPHEIGHT*rely) - int(fontheight/2.0)
if "n" in textanchor:
y = int(MAPHEIGHT*rely)
elif "s" in textanchor:
y = int(MAPHEIGHT*rely) - int(fontheight)
if "e" in textanchor:
x = int(MAPWIDTH*relx) - int(fontwidth)
elif "w" in textanchor:
x = int(MAPWIDTH*relx)
if options.get("textboxfillcolor") or options.get("textboxoutlinecolor"):
relfontwidth, relfontheight = (fontwidth/float(MAPWIDTH), fontheight/float(MAPHEIGHT))
relxmid,relymid = (x/float(MAPWIDTH)+relfontwidth/2.0,y/float(MAPHEIGHT)+relfontheight/2.0)
relupperleft = (relxmid-relfontwidth*options["textboxfillsize"]/2.0, relymid-relfontheight*options["textboxfillsize"]/2.0)
relbottomright = (relxmid+relfontwidth*options["textboxfillsize"]/2.0, relymid+relfontheight*options["textboxfillsize"]/2.0)
options["fillcolor"] = options["textboxfillcolor"]
options["outlinecolor"] = options["textboxoutlinecolor"]
options["outlinewidth"] = options["textboxoutlinewidth"]
self.RenderRectangle(relupperleft, relbottomright, options)
self.drawer.text((x,y), text, font)
def _BasicLine(self, coords, options):
"""
draw basic lines with outline, but nothing at start and end
"""
#first draw outline line
if options["outlinecolor"]:
outlinepen = aggdraw.Pen(options["outlinecolor"], options["fillsize"]+options["outlinewidth"])
self.drawer.line(coords, outlinepen)
#then draw fill line which is thinner
if options["fillcolor"]:
fillpen = aggdraw.Pen(options["fillcolor"], options["fillsize"])
self.drawer.line(coords, fillpen)
def _BasicPolygon(self, coords, options):
"""
draw polygon with color fill
"""
self.drawer.drawpolygon(coords, fillcolor=options["fillcolor"], outlinecolor=options["outlinecolor"], outlinewidth=options["outlinewidth"], outlinejoinstyle=None)
def _BasicCircle(self, coords, options):
"""
draw points with a symbol path representing a circle
"""
#build circle
size = int(options["fillsize"]/2.0)
x,y = coords
circlecoords = (x-size, y-size, x+size, y+size)
#set symbol options
args = []
if options["fillcolor"]:
fillbrush = aggdraw.Brush(options["fillcolor"])
args.append(fillbrush)
if options["outlinecolor"]:
outlinepen = aggdraw.Pen(options["outlinecolor"], options["outlinewidth"])
args.append(outlinepen)
#draw
self.drawer.ellipse(circlecoords, *args)
def _BasicSquare(self, coords, options):
"""
draw points with a symbol path representing a square
"""
#build circle
size = int(options["fillsize"]/2.0)
x,y = coords
squarecoords = (x-size, y-size, x+size, y+size)
#set symbol options
args = []
if options["fillcolor"]:
fillbrush = aggdraw.Brush(options["fillcolor"])
args.append(fillbrush)
if options["outlinecolor"]:
outlinepen = aggdraw.Pen(options["outlinecolor"], options["outlinewidth"])
args.append(outlinepen)
#draw
self.drawer.rectangle(squarecoords, *args)
def _Pyramid(self, coords, options):
"""
draw basic lines with outline, but nothing at start and end.
"""
if options["outlinecolor"]:
size = int(options["fillsize"])
width = int(options["fillwidth"]) #pxls
#calculate three pyramid coords
x,y = coords
leftbase = [x-int(width/2.0), y]
peak = [x, y-size]
rightbase = [x+int(width/2.0), y]
#first draw left line
leftlinecoords = list(leftbase)
leftlinecoords.extend(peak)
outlinepen = aggdraw.Pen(options["outlinecolor"], options["outlinewidth"])
self.drawer.line(leftlinecoords, outlinepen)
#then draw right line
rightlinecoords = list(rightbase)
rightlinecoords.extend(peak)
outlinepen = aggdraw.Pen(options["outlinecolor"], options["outlinewidth"])
self.drawer.line(rightlinecoords, outlinepen)
def _PyramidScape(self, coords, options):
"""
similar to pyramid, except pyramids stretch across all of map horizontally at specified y interval, and goes up and down by aggregating all values in each x area of its respective y range.
"""
pass
class _Aggdraw_Renderer:
"""
this class can be called on to draw each feature with aggdraw as long as
it is given instructions via a color/size/options dictionary
"""
#NEED TO RECEIVE GENERATOR OF TRANSFORMED COORDS FROM MAPCANVAS
#ALSO NEEDS THE Aggdraw.Draw(img) OBJECT
def __init__(self):
global aggdraw, PIL
import aggdraw, PIL, PIL.Image, PIL.ImageDraw, PIL.ImageTk
self.sysfontfolders = dict([("windows","C:/Windows/Fonts/"),
("darwin", "/Library/Fonts/"),
("linux", "/usr/share/fonts/truetype/") ])
self.fontfilenames = dict([("default", "TIMES.TTF"),
("times new roman","TIMES.TTF"),
("arial","ARIAL.TTF")])
def NewImage(self):
"""
this must be called before doing any rendering.
Note: this replaces any previous image drawn on so be sure to
retrieve the old image before calling it again to avoid losing work
"""
#first mode
mode = "RGBA"
#then other specs
width = int(MAPWIDTH)
height = int(MAPHEIGHT)
background = MAPBACKGROUND
dimensions = (width, height)
self.img = PIL.Image.new(mode, dimensions, background)
self.drawer = aggdraw.Draw(self.img)
def RenderShape(self, shapeobj, options):
"""
looks at instructions in options to decide which draw method to use
"""
multishapes = shapeobj.to_aggdraw()
symbolizer = options.get("symbolizer")
if shapeobj.type == "polygon":
if symbolizer:
if symbolizer == "circle":
centercoords = shapeobj.GetAvgCenter()
self._BasicCircle(centercoords, options)
elif symbolizer == "square":
centercoords = shapeobj.GetAvgCenter()
self._BasicSquare(centercoords, options)
elif symbolizer == "pyramid":
centercoords = shapeobj.GetAvgCenter()
self._Pyramid(centercoords, options)
else:
for coords in multishapes:
self._BasicPolygon(coords, options)
elif shapeobj.type == "line":
if symbolizer:
if symbolizer == "circle":
centercoords = shapeobj.GetAvgCenter()
self._BasicCircle(centercoords, options)
elif symbolizer == "square":
centercoords = shapeobj.GetAvgCenter()
self._BasicSquare(centercoords, options)
elif symbolizer == "pyramid":
centercoords = shapeobj.GetAvgCenter()
self._Pyramid(centercoords, options)
else:
for coords in multishapes:
self._BasicLine(coords, options)
elif shapeobj.type == "point":
if symbolizer:
if symbolizer == "circle":
for coords in multishapes:
self._BasicCircle(coords, options)
elif symbolizer == "square":
for coords in multishapes:
self._BasicSquare(coords, options)
elif symbolizer == "pyramid":
for coords in multishapes:
self._Pyramid(coords, options)
else:
for coords in multishapes:
self._BasicCircle(coords, options)
def RenderText(self, relx, rely, text, options):
if not options.get("texteffect"):
self._BasicText(relx, rely, text, options)
def RenderRectangle(self, upperleft, bottomright, customoptions):
leftrelx, uprely = upperleft
leftx,upy = (int(MAPWIDTH*leftrelx), int(MAPHEIGHT*uprely))
rightrelx, downrely = bottomright
rightx,downy = (int(MAPWIDTH*rightrelx), int(MAPHEIGHT*downrely))
rectanglecoords = [leftx,upy, rightx,upy, rightx,downy, leftx,downy, leftx,upy]
self._BasicPolygon(rectanglecoords, customoptions)
def RenderCircle(self, relx, rely, fillsize, customoptions):
customoptions["fillsize"] = fillsize
x = int(MAPWIDTH*relx)
y = int(MAPHEIGHT*rely)
self._BasicCircle((x,y), customoptions)
def RenderLine(self, startpos, stoppos, customoptions):
startrelx, startrely = startpos
startxy = [int(MAPWIDTH*startrelx), int(MAPHEIGHT*startrely)]
stoprelx, stoprely = stoppos
stopxy = [int(MAPWIDTH*stoprelx), int(MAPHEIGHT*stoprely)]
linecoords = startxy
linecoords.extend(stopxy)
self._BasicLine(linecoords, customoptions)
def GetImage(self):
self.drawer.flush()
return PIL.ImageTk.PhotoImage(self.img)
def SaveImage(self, savepath):
self.drawer.flush()
self.img.save(savepath)
#Internal use only
def _BasicText(self, relx, rely, text, options):
"""
draws basic text, no effects
"""
fontlocation = self.sysfontfolders[OSSYSTEM]+self.fontfilenames[options["textfont"]]
font = aggdraw.Font(color=options["textcolor"], file=fontlocation, size=options["textsize"], opacity=options["textopacity"])
fontwidth, fontheight = self.drawer.textsize(text, font)
textanchor = options.get("textanchor")
if textanchor:
textanchor = textanchor.lower()
if textanchor == "center":
x = int(MAPWIDTH*relx) - int(fontwidth/2.0)
y = int(MAPHEIGHT*rely) - int(fontheight/2.0)
else:
x = int(MAPWIDTH*relx) - int(fontwidth/2.0)
y = int(MAPHEIGHT*rely) - int(fontheight/2.0)
if "n" in textanchor:
y = int(MAPHEIGHT*rely)
elif "s" in textanchor:
y = int(MAPHEIGHT*rely) - int(fontheight)
if "e" in textanchor:
x = int(MAPWIDTH*relx) - int(fontwidth)
elif "w" in textanchor:
x = int(MAPWIDTH*relx)
if options.get("textboxfillcolor") or options.get("textboxoutlinecolor"):
relfontwidth, relfontheight = (fontwidth/float(MAPWIDTH), fontheight/float(MAPHEIGHT))
relxmid,relymid = (x/float(MAPWIDTH)+relfontwidth/2.0,y/float(MAPHEIGHT)+relfontheight/2.0)
relupperleft = (relxmid-relfontwidth*options["textboxfillsize"]/2.0, relymid-relfontheight*options["textboxfillsize"]/2.0)
relbottomright = (relxmid+relfontwidth*options["textboxfillsize"]/2.0, relymid+relfontheight*options["textboxfillsize"]/2.0)
options["fillcolor"] = options["textboxfillcolor"]
options["outlinecolor"] = options["textboxoutlinecolor"]
options["outlinewidth"] = options["textboxoutlinewidth"]
self.RenderRectangle(relupperleft, relbottomright, options)
self.drawer.text((x,y), text, font)
def _BasicLine(self, coords, options):
"""
draw basic lines with outline, but nothing at start and end
"""
#first draw outline line
if options["outlinecolor"]:
outlinepen = aggdraw.Pen(options["outlinecolor"], options["fillsize"]+options["outlinewidth"])
self.drawer.line(coords, outlinepen)
#then draw fill line which is thinner
if options["fillcolor"]:
fillpen = aggdraw.Pen(options["fillcolor"], options["fillsize"])
self.drawer.line(coords, fillpen)
def _BasicPolygon(self, coords, options):
"""
draw polygon with color fill
"""
args = []
if options["fillcolor"]:
fillbrush = aggdraw.Brush(options["fillcolor"])
args.append(fillbrush)
if options["outlinecolor"]:
outlinepen = aggdraw.Pen(options["outlinecolor"], options["outlinewidth"])
args.append(outlinepen)
self.drawer.polygon(coords, *args)
pass
def _BasicCircle(self, coords, options):
"""
draw points with a symbol path representing a circle
"""
#build circle
size = int(options["fillsize"]/2.0)
x,y = coords
circlecoords = (x-size, y-size, x+size, y+size)
#set symbol options
args = []
if options["fillcolor"]:
fillbrush = aggdraw.Brush(options["fillcolor"])
args.append(fillbrush)
if options["outlinecolor"]:
outlinepen = aggdraw.Pen(options["outlinecolor"], options["outlinewidth"])
args.append(outlinepen)
#draw
self.drawer.ellipse(circlecoords, *args)
def _BasicSquare(self, coords, options):
"""
draw points with a symbol path representing a square
"""
#build circle
size = int(options["fillsize"]/2.0)
x,y = coords
squarecoords = (x-size, y-size, x+size, y+size)
#set symbol options
args = []
if options["fillcolor"]:
fillbrush = aggdraw.Brush(options["fillcolor"])
args.append(fillbrush)
if options["outlinecolor"]:
outlinepen = aggdraw.Pen(options["outlinecolor"], options["outlinewidth"])
args.append(outlinepen)
#draw
self.drawer.rectangle(squarecoords, *args)
def _Pyramid(self, coords, options):
"""
draw basic lines with outline, but nothing at start and end.
"""
if options["outlinecolor"]:
size = int(options["fillsize"])
width = int(options["fillwidth"]) #pxls
#calculate three pyramid coords
x,y = coords
leftbase = [x-int(width/2.0), y]
peak = [x, y-size]
rightbase = [x+int(width/2.0), y]
#first draw left line
leftlinecoords = list(leftbase)
leftlinecoords.extend(peak)
outlinepen = aggdraw.Pen(options["outlinecolor"], options["outlinewidth"])
self.drawer.line(leftlinecoords, outlinepen)
#then draw right line
rightlinecoords = list(rightbase)
rightlinecoords.extend(peak)
outlinepen = aggdraw.Pen(options["outlinecolor"], options["outlinewidth"])
self.drawer.line(rightlinecoords, outlinepen)
def _PyramidScape(self, coords, options):
"""
similar to pyramid, except pyramids stretch across all of map horizontally at specified y interval, and goes up and down by aggregating all values in each x area of its respective y range.
"""
pass
class _PyCairo_Renderer:
"""
This class can be called on to draw each feature with PyCairo as long as
it is given instructions via a color/size/options dictionary
"""
#NEED TO RECEIVE GENERATOR OF TRANSFORMED COORDS FROM MAPCANVAS
#ALSO NEEDS THE Aggdraw.Draw(img) OBJECT
def __init__(self):
global cairo
import cairo
self.fontnames = dict([("default", "cursive"),
("serif", "serif"),
("sans-serif", "sans-serif"),
("cursive","cursive"),
("fantasy", "fantasy"),
("monospace","monospace") ])
def NewImage(self):
"""
This must be called before doing any rendering.
Note: this replaces any previous image drawn on so be sure to
retrieve the old image before calling it again to avoid losing work
"""
#first mode
mode = cairo.FORMAT_ARGB32
#then other specs
width = MAPWIDTH
height = MAPHEIGHT
background = MAPBACKGROUND
self.img = cairo.ImageSurface(mode, int(MAPWIDTH), int(MAPHEIGHT))
self.drawer = cairo.Context(self.img)
if background:
backgroundcolor = self.__hex_to_rgb(background)
self.drawer.set_source_rgb(*backgroundcolor)
self.drawer.rectangle(0,0,MAPWIDTH,MAPHEIGHT)
self.drawer.fill()
def RenderText(self, relx, rely, text, options):
if not options.get("texteffect"):
self._BasicText(relx, rely, text, options)
def RenderRectangle(self, upperleft, bottomright, customoptions):
leftrelx, uprely = upperleft
leftx,upy = (int(MAPWIDTH*leftrelx), int(MAPHEIGHT*uprely))
rightrelx, downrely = bottomright
rightx,downy = (int(MAPWIDTH*rightrelx), int(MAPHEIGHT*downrely))
rectanglecoords = [(leftx,upy), (rightx,upy), (rightx,downy), (leftx,downy), (leftx,upy)]
self._BasicPolygon(rectanglecoords, customoptions)
def RenderCircle(self, relx, rely, fillsize, customoptions):
customoptions["fillsize"] = fillsize
x = int(MAPWIDTH*relx)
y = int(MAPHEIGHT*rely)
self._BasicCircle([(x,y)], customoptions)
def RenderLine(self, startpos, stoppos, customoptions):
startrelx, startrely = startpos
startxy = (int(MAPWIDTH*startrelx), int(MAPHEIGHT*startrely))
stoprelx, stoprely = stoppos
stopxy = (int(MAPWIDTH*stoprelx), int(MAPHEIGHT*stoprely))
linecoords = [startxy, stopxy]
self._BasicLine(linecoords, customoptions)
def RenderShape(self, shapeobj, options):
"""
looks at instructions in options to decide which draw method to use
"""
#possibly use an options filterer here to enure all needed options
#are given, otherwise snap to default
#............
multishapes = shapeobj.to_pycairo()
for coords in multishapes:
if shapeobj.type == "polygon":
self._BasicPolygon(coords, options)
elif shapeobj.type == "line":
self._BasicLine(coords, options)
elif shapeobj.type == "point":
self._BasicCircle(coords, options)
def GetImage(self):
self.img.write_to_png("tempgif.gif")
gifimg = tk.PhotoImage(file="tempgif.gif")
os.remove("tempgif.gif")
return gifimg
def SaveImage(self, savepath):
if savepath.endswith(".png"):
self.img.write_to_png(savepath)
#Internal use only
def __hex_to_rgb(self, hexcolor):
return colour.Color(hexcolor).rgb
def _BasicText(self, relx, rely, text, options):
"""
Draws basic text, no effects
"""
self.drawer.select_font_face(self.fontnames[options["textfont"]])
self.drawer.set_font_size(options["textsize"]) # em-square height is 90 pixels
_, _, fontwidth, fontheight, _, _ = self.drawer.text_extents(text)
x = int(MAPWIDTH*relx) - int(fontwidth/2.0)
y = int(MAPHEIGHT*rely) + int(fontheight/2.0) #NOTICE: for some odd reason height has to be plussed, not minused
self.drawer.move_to(x, y) # move to point (x, y) = (10, 90)
textcolor = self.__hex_to_rgb(options["textcolor"])
self.drawer.set_source_rgb(*textcolor) # yellow
self.drawer.show_text(text)
self.drawer.stroke()
def _BasicLine(self, coords, options):
"""
Draw basic lines with outline, but nothing at start and end
"""
if len(coords) >= 2:
#outline symbolics
outlinecolor = self.__hex_to_rgb(options["outlinecolor"])
self.drawer.set_source_rgb(*outlinecolor) # Solid color
self.drawer.set_line_width(options.get("fillsize")+(options.get("outlinewidth")*2))
#draw outline
xy = coords[0]
self.drawer.move_to(*xy)
for xy in coords[1:]:
self.drawer.line_to(*xy)
self.drawer.stroke_preserve()
#fill symbolics
fillcolor = self.__hex_to_rgb(options["fillcolor"])
self.drawer.set_source_rgb(*fillcolor) # Solid color
self.drawer.set_line_width(options.get("fillsize"))
#then draw fill line which is thinner
xy = coords[0]
self.drawer.move_to(*xy)
for xy in coords[1:]:
self.drawer.line_to(*xy)
self.drawer.stroke_preserve()
def _BasicPolygon(self, coords, options):
"""
Draw polygon with color fill
"""
if len(coords) >= 3:
#define outline symbolics
outlinecolor = self.__hex_to_rgb(options["outlinecolor"])
self.drawer.set_source_rgb(*outlinecolor) # Solid color
self.drawer.set_line_width(options["outlinewidth"])
#...self.drawer.set_line_join(cairo.LINE_JOIN_ROUND)
#first starting point
xy = coords[0]
self.drawer.move_to(*xy)
#then add path for each new vertex
for xy in coords[1:]:
self.drawer.line_to(*xy)
self.drawer.close_path()
self.drawer.stroke_preserve()
#then fill insides
fillcolor = self.__hex_to_rgb(options["fillcolor"])
self.drawer.set_source_rgb(*fillcolor)
self.drawer.fill()
def _BasicCircle(self, coords, options):
"draw points with a symbol path representing a circle"
#define outline symbolics
outlinecolor = self.__hex_to_rgb(options["outlinecolor"])
self.drawer.set_source_rgb(*outlinecolor) # Solid color
self.drawer.set_line_width(options["outlinewidth"])
#draw circle
size = int(options["fillsize"]/2.0)
x,y = coords[0] #0 necessary bc pycairo receives a list of coordinate pairs, and with points there is only one pair
self.drawer.arc(x, y, size, 0, 2*math.pi)
self.drawer.stroke_preserve()
#fill circle
fillcolor = self.__hex_to_rgb(options["fillcolor"])
self.drawer.set_source_rgb(*fillcolor)
self.drawer.fill()
class _Renderer:
#builtins
def __init__(self):
if RENDERER == "tkinter":
self.renderer = _TkCanvas_Renderer()
elif RENDERER == "PIL":
self.renderer = _PIL_Renderer()
elif RENDERER == "pydraw":
self.renderer = _Pydraw_Renderer()
elif RENDERER == "aggdraw":
self.renderer = _Aggdraw_Renderer()
elif RENDERER == "pycairo":
self.renderer = _PyCairo_Renderer()
#automatically create blank image
self.NewImage()
self.layers = dict()
#custom methods
def NewImage(self):
self.renderer.NewImage()
def ViewShapefile(self, shapefilepath, customoptions):
self._RenderShapefile(shapefilepath, customoptions)
self._RenderMapTitle(shapefilepath, customoptions)
self._ViewRenderedShapefile()
def SaveShapefileImage(self, shapefilepath, savepath, customoptions):
self._RenderShapefile(shapefilepath, customoptions)
self._RenderMapTitle(shapefilepath, customoptions)
self._SaveRenderedShapefile(savepath)
#internal use only
def _RelSizesToPixels(self, customoptions):
customoptions = customoptions.copy()
customoptions["fillsize"] = MAPWIDTH*customoptions["fillsize"]/100.0
customoptions["fillwidth"] = MAPWIDTH*customoptions["fillwidth"]/100.0
customoptions["fillheight"] = MAPHEIGHT*customoptions["fillheight"]/100.0
customoptions["outlinewidth"] = MAPWIDTH*customoptions["outlinewidth"]/100.0
return customoptions
def _RenderMapTitle(self, shapefilepath, customoptions):
#unless not specified, default maptitle is set to name of shapefile
if customoptions.get("maptitle", "not set") == "not set":
shapefilename = shapefilepath.split("\\")[-1]
shapefilename = ".".join(shapefilename.split(".")[:-1])
customoptions["maptitle"] = shapefilename
#unless asked not to show maptitle, generate default textoptions except large text size
if customoptions.get("maptitle"):
textoptions = _CheckTextOptions(dict([("textsize",0.0452)]))
self._RenderText(0.5, 0.05, customoptions["maptitle"], textoptions)
def _RenderText(self, relx, rely, text, textoptions):
self.renderer.RenderText(relx, rely, text, textoptions)
def _RenderRectangle(self, upperleft, bottomright, customoptions):
customoptions = self._RelSizesToPixels(customoptions)
self.renderer.RenderRectangle(upperleft, bottomright, customoptions)
def _RenderCircle(self, relx, rely, fillsize, customoptions):
customoptions["fillsize"] = fillsize
customoptions = self._RelSizesToPixels(customoptions)
fillsize = customoptions["fillsize"]
self.renderer.RenderCircle(relx, rely, fillsize, customoptions)
def _RenderLine(self, startpos, stoppos, customoptions):
#NOTE: TO ADD BETTER LINES DRAWING, DRAW AS POLYGON.
#CALCULTE POLYCOORDS OF LINEWIDTH BASED ON:
#http://mathforum.org/library/drmath/view/68285.html
#hmm...
customoptions = self._RelSizesToPixels(customoptions)
self.renderer.RenderLine(startpos, stoppos, customoptions)
def _RenderShape(self, shape, customoptions):
customoptions = self._RelSizesToPixels(customoptions)
self.renderer.RenderShape(shape, customoptions)
def _RenderShapefile(self, shapefilepath, customoptions):
"this one loads a filepath from scratch, does not take preloaded layers"
#create shapefile generator
shapefile = Shapefile(shapefilepath)
#exclude values if specified
excludequery = customoptions.get("excludequery")
if excludequery:
shapefile.SelectByQuery(excludequery, inverted=True)
#then iterate through shapes and render each
shapefile.progresstext = "rendering"
for eachshape in shapefile:
#then send to be rendered
self._RenderShape(eachshape, customoptions)
def _RenderLayer(self, layer):
"renders a preloaded layer"
#create shapefile generator
shapefile = layer.fileobj
customoptions = layer.customoptions
#exclude values if specified
excludequery = customoptions.get("excludequery")
if excludequery:
shapefile.SelectByQuery(excludequery, inverted=True)
#then iterate through shapes and render each
shapefile.progresstext = "rendering"
for eachshape in shapefile:
#then send to be rendered
self._RenderShape(eachshape, customoptions)
def _AddLayerInfo(self, layername, allclassifications):
self.layers[layername] = allclassifications
def _ViewRenderedShapefile(self):
#finally open image in tkinter
if RENDERER == "tkinter":
#if tkinter is the renderer then all that is needed is to run the mainloop
self.renderer.RunTk()
else:
def ViewInTkinter():
#setup GUI
window = tk.Tk()
window.wm_title("Static MapCanvas Viewer")
window_frame = tk.Frame(window)
window_frame.pack()
#embed image in a canvas
tkimg = self.renderer.GetImage()
screenwidth = window.winfo_screenwidth()
viewimgwidth,viewimgheight = (MAPWIDTH,MAPHEIGHT)
if MAPWIDTH >= screenwidth:
viewimgwidth,viewimgheight = (screenwidth,int(screenwidth/2.0))
resizedimg = self.renderer.img.resize((viewimgwidth,viewimgheight), PIL.Image.ANTIALIAS)
tkimg = PIL.ImageTk.PhotoImage(image=resizedimg)
window.wm_state('zoomed')
canvas = tk.Canvas(window_frame, width=viewimgwidth, height=viewimgheight, bg="white")
canvas.pack()
x0,y0,x1,y1 = ( -int(viewimgwidth/50.0), int(viewimgwidth/50.0), viewimgwidth-int(viewimgwidth/50.0), viewimgheight+int(viewimgwidth/50.0) )
if MAPBACKGROUND:
canvas.create_rectangle(x0,y0,x1,y1, fill="Gray80", outline="") #this is the shadow
canvas.create_image(0,0, anchor="nw", image=tkimg)
canvas.create_rectangle(0,0,viewimgwidth,viewimgheight, fill="", outline=Color("black")) #this is the map outline edge
#make image pannable
def mouseovermap(event):
global mouseovermapvar
window.config(cursor="fleur") #draft_large
mouseovermapvar = True
def mouseoutofmap(event):
global mouseovermapvar
window.config(cursor="")
mouseovermapvar = False
def activatedrag(event):
global mouseclicked
if mouseovermapvar == True:
mouseclicked = True
def deactivatedrag(event):
global mouseclicked
mouseclicked = False
def mark(event):
canvas.scan_mark(event.x, event.y)
def dragto(event):
try:
if mouseclicked == True:
canvas.scan_dragto(event.x, event.y, 1)
except:
pass
canvas.bind("<Enter>", mouseovermap, "+")
canvas.bind("<Leave>", mouseoutofmap, "+")
window.bind("<Button-1>", mark, "+")
window.bind("<Motion>", dragto, "+")
window.bind("<Button-1>", activatedrag, "+")
window.bind("<ButtonRelease-1>", deactivatedrag, "+")
#place save button to enable saving map image
def imagesavefiledialog():
savepath = tkFileDialog.asksaveasfilename(defaultextension=".png", initialfile="unnamed_map")
self._SaveRenderedShapefile(savepath)
saveimg = "R0lGODlhAAEAAfcAABEAEUNDQ0tLSx9Cnx5EohxIpxtKqxhPshdSthhQtBVVuxRYvhNbwz5uwj5xxTVzzDx1yz15zjN30jR61Tt91DJ/3EB3y0F6zkp9zEB/00h/0D6A1jWA3D2D2z+I3jOF4jmH4DSJ5jyL5DOO7TqO6TaQ7TyT7U+AzFOCzFqGzF2IzEaB00yD0kCF2kiH2UWJ3EuL21OG0VSJ1FuM0lON2V+Q1VOS3VuU3GOLzWmOzm6RznSUz2OO0WSS1WuT1G+Y1mKV2mSa3mya23KW03iW0HWa1Xyb03Ke23yf2Heg3H2i20CM5EmP4UKR50yT5EGV7UqW6UaY7kub7lOV4liX4VWZ5lyb5FGf7lme6WKe5Gye4FSh71qj716m8GWi52ui5GKl7Gun6mSo7muq7XOk4num4Xao5H2r5XSs63qv6nex73yy7WSp8Gqt8XCv8XOx8nq18paJlpmNmZyTnKGWoaOco6meqaaipqqlqqurq7GmsbOss7mtubOzs7uzu7y7vIKd0oah1YOk2oim2Yap3oqr3oOs44qv44ew5oyy5YS17Iq26oy67ZG255O87YK38IO684u+9JK+8MK2wsS8xMq+yo7B9JXD9ZnG9sXExcvEy8zMzNHF0dTL1NjN2NTU1NzT3Nzc3ODX4OPc4+Tj5Ovm6+zs7PDt8PT09Pj2+P///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKsALAAAAAAAAQABAAj+AFcJHEiwoMGDCBMqXMiwocOHECNKnEixosWLGDNq3Mixo8ePIEOKHEmypMmTKFOqXMmypcuXMGPKnEmzps2bOHPq3Mmzp8+fQIMKHUq0qNGjSJMqXcq0qdOnUKNKnUq1qtWrWLNqFcmgq9evYMOKHUu2rNmzaNOqXcu2LdoFcBUgQBDUrd27ePPq3cs3LNwFCuT+7Eu4sOHDiMv+BTy3Z+LHkCNLNruAwWLAgnWGxcS5s+fOqkKLHk26tOnTqFOrXs26tevXsE1/no3pUly5CXJupj07tu/fwIMLH86a92fbcREouLnbuGfi0KNLn07cuWfbDAIryE2zuXXO1MP+ix9P/vulzskRcI/p/Tum0Z/iy59Pv779+/jz69/Pv7////+N5l5nlyiAWQIHsAfWgM+JBuCDEEYo4YQU7icgg5cgtx2CL7U3IHwVhijiiCRKeCGDjmBy4AEJsrSbJAyC52CJNNZoY4knDniJI44EhsABBhiwkoe8wfgZeUgmqWRsMXbG41wIBpkSkQwuaeWVWIbWZIqY8LgdkAYUcBKVVWZp5pnUNdkljzyyGGQBYpJEZplo1mnnb1tyxmMjjYAJJwEjhXXeoLWdx5klBNamKKF3NupocRlyZuikhDq5ZyOFvElAAQOE5F2lx4Fa6aOkljoapYVigqiklq6JaSH+g7w5AAGdfgRWhrjmGimBuuZqySWmBvsoroXmWmxtMEoiySV8JgJrIHBySgAAtnrVa7G78hqpse8J622d2KZ63q/jYqKsJI7wecizgNA6K7UefdXrJeT+Wlu92OKKKIg39uvvvxaKpmu9+qpK77k8JrLuIIC0SwCtAMDLkbz5Drwtvb4aagm/AHfssb+nzssttugy0sghhAwSiMMQS7yRvJFkSDCxv1pis6r11txtaB/37DOOAtuM8dAy22yzJJFIwggjChMiiCAOvxtxR/JeEgnBNReMqNBDG83xz2CHHaBoM9ess9EHo7tI01AbsenDEbuc0VdGo2301XXnrff+xt/2neXegFsiieBLL4LIIU8b4fbDA8RNtVeAdx243n5XbqXe9AKetCWFH+50wwW87fjEkONteiSo4z153Za3juTqeaO+NCOGG5L44nBPTXpXqFtydeqmw24zJHy7bvx0wt8tO+2I2N7w4gM0rvvLXgFvferD+4438ZbAwfrx4EOXvffEl8898ZFAwoj6ihhiiBJKKB56y3Jj9NX113dvM+reW1J+9uELoHAgwb3s1Y2A+1sf7dpnu/gRYX7Sq99FvgIJ4BEwfagzHwJtBof+eQ8OkBCgCH1zPqORj4DnQyEjFIGIM7zPgaGTmgQtcr8KohCDKOwf8Tp4QP/ZbIT+QHQNCrunwxSqEBKPUET7zgA/IzyQcaOjXlcI+MHuofCKWIREFa/4NbF58Yv1GU0Wx4jCR5jxEWtoXhmSgIQi7OBhuZthRSg4RBCSMYsf5OKMwMjHPorxjmM84xrWcIYyKKEIRCAC4yL4uCmC8JGPzCEVtahFEBJxh13soyZ/JsZI2tGTOUTjINNwBjIkoQhD2AEEo6iRrzzSf5OsJCXtOEkedjCEe9ykLn3WyVh+UpJwMOMaFEHKMpShCEXQwSqn10qvdPCZ0IymNJ9ZyQ4G8ZqqoeY0pbkGOKwhDWkwAxmO8IMfKBOOrJybM6u5TUi205rYjGdp3rlNNawBDeH+TMIRUHnOODaSAR18QzsFSs9nyvOgoikoNNXAUDWgQZxfIOcPcgDHaTFTnV2BA0GfuVFpdlSaCEXoR6HZUYE2FA0P1ecPdkBRd6XTfl55g0xnStOaClSmGrXpG0J6UJrmVKczdYMbUApRIfzABy2l3z+BytSa5jSnPJUnU596UzUIFaVhIIMWjIpUODJydwxoqk7bIFOy0pSsUY1nWcdaVrIKdahoyOpWgeADHDwsdC+dYEzX2lSzNjWt2BQrTd/qhjGEIQxf2CpSc7BMOVLkK22IrGTfMNnIUlayZMWsGySbyV16tl+jqSxmMyvZzbZhDIZFbBCE0AMe4KCx/yT+7WhnS9vSbrazn80t0EJT29qiNrVf+EIQgNBau0LxojDtimTZwIbRNte5kW3uc6OLW91at0KjmW5vMcsGMYTBC+AdbmtVEEOLOnYikKWtdtuw3tpW97rwhVBooUvf047Bu+ElrmvnZ97YMve/zGWvdAFMYAFLNxWADSKB/ytgA/9XDGIAQxayMNwaqIC8McwrDb3ShQYveMDR/bCAE6xgEA84wCeGMHitEIQbWJi8xz2vRL7C3C78tws4JrCNa7zgHZMYiDy+8Y7ZYOMd45gLErbCDVw8gxTwV8Nz9AqR2zBkHX+4ykTuwo9HqGMs37jGYsBCFljsYhWkwKtQfiz+h3HM5jYXOcdujrOWtyxANmc5zncGAxi4wAUxU0HJTY4B4/oLVjkb2s13fjOd63zoOO9ZDF4AAxaUfIMmOxmdyNVrV+LMhUazudNxXnQAPX3kI/MZyX9ecpNRANtCk3oLhoZ1m0Udvi7ImtNuPjUWdk2FG9hABim4tFJdvYVOw1rWfEb2qUEta1m/N77Q7s9olH1qHDf7yFvYNa9t8OsUsBrTMo7IV7YAay4029xdMHayjU3uNpP72dGON35GY+t127rdbCY3kvvM60rLQAbfjl6a0cvhaq/71Me2drptfe92w1veEJ/PtBnOcGM3/Njm3vUUrMDtGaAABWjO9Ib+u7Lskt+b4ss+ebMfHvGI05va7KZ4sfmNBSpQgdv//rY/wWpucxf75/VuN77JTfSE0xp8Qm920RNO7itg4QpTmMKvZRCDE8Twq1JkwM8Pru+lL53hRT/68bxOdLAXXdtYiDrOq/5kkUe5K2TvetLjHnaxu+7YdF/6Fbbg9CvgnAVVD3m4ITJun+e96Bdfut3v/vXDa1zt/2Z7jP95+MpvQQpEX3zrms55omO+6FcI/RWqUIUpwMAGLDjBCSo68Bl7xfNL/zy5MS/7y3td85bje+drL3vRkz7qU6AB4FfvUrerGe6fT/7sba985Xse95Vr+t7j/nnRh94JpoeBDFL+X97Wi/v1to993mUve+j7be97b37srV96qQv/BCto++Af8hX1S4H2zF8+/mvP8pbLe9r3N337R3uYJ3pQcIDZN3ysZ3wEh3z7l3+e94DJh3n953/RNhq0l34Q2HtQMHrtBwMwwALcB27/dH/5Z4LM94Czd3+fV4EWGF8AGHqXh38zOHsGeIBMYAMhmHqCR3koaIJACIQpSIAz6IIveF0YuIIzSIQmeAUd6IRT4AQw4AIssALEt3NZx4JaOIE/CIRXEIRSYH59s4RLCIb3BwVScIBQ4ARSGIIngAGD5n2E93r2N4BE+IXpR3ti+C0DOIQpGHoI2IYieIWEloVbWIP+W5iIiriH3qKFNYiILIiHTsiGU5h6GHB1ckh/XqGIiViGh6iFjCgsZviJ9xcFUGCKayiFVbgCl0iCYMWJsCgFURAFnnh/oRgsnBgFsZiKTPACL5B68eeKhhiLxLiFt2gqxbiFutiBbOiLwIiJDOh6XSGLsqiL1piLsXiMpaKFs0iN3FiNp3iATsAEU7gCVriA8+cQ9ReLuuiNpeiO7aiNpPKO3+iOLGiKTrCGL+ACVmh1m1IAmaiOm3iN3niN1tiOBymLpxiGuXSER4iBBfmOBgmOUNAE47iP5ngBcRiN39cVCPmR1EiQ3diO3GiEDplbo/EEJFmN8FiK4ViRvcj+jxjQiljYTB7ZjSyJkDlZjQk5i1FgkifpWRCZhk9AjQuJkz4pjk3gi/x4AhawSAHZEPXXkyO5k0VZigeJhkAZlLqEgU9wlUV5kAl5iqZokU7wAuaoATSJdTbJAD75ljz5lrqoknL5lvL4KHQpl3kpl6fYBGvYi+a4AhbQallXl4bpk2EZl3Z5l42imDlZlyZQlk3Qiy2QkT0IVrO4l4c5l1KgmT7JmI2ymZ4ZBU0gmR6Alqx4Ad3HkXN4k3qZmUn5lVEQloYJmncym4i5mT5pAhUJBadZmRegmsLYlm+5l3n5lcg5myp5nD9pm3VCl2FJm7IJm1DAm02wlC+wASv+EJyE2ZbLCZuIOZ2d2ZnGiZvOWSfkqZzJmZtRYAKReZ2T+QLAqZGTB1bImZwmcJ/6SZ73qZzNeZ5m0p/6KaDtCZ9L6QHayZ3DiVEMAJ3+qZ7LOaARipwAeibJeaESypvu2QQi8JvbOZjFl45S6RX3mZ8SeqISupVcqUkpeaImWqLVeZ0eMKPaGQH0OWxZh6I6iqIquqJ81KI7ipzuaQIkIAJN4AEd0AIUcAEgiqPeGZlD+gQv+qJfmZ9TKqVf2aM++kWhgWBSSqUlOqUbaqQvgKAUEAFPWZ9ZB6WR2Z46CqZUqqVbGjad0KWpcKUDaqVPUJ0VaQJHiqArYKOrKaL+DCEBXlECQ2qlepqo7omliuqoqYBgFXolWFqpJnqpVdoET0ACJLAESwACLZABEQABl7mmUFqljtqoVRqlj5qfqiCpk7okrTqrX5qfJNAESyACLdACG3CmpLqgycUAjDqsxFqs7hmrWEKk7kkCxrqsX8mpIgACHbABETCq0EioC/EVxtoEzVqriYqsV9KsxMqpRRqt0+qr8vdPw8qsQzoCiVqr7IqoJoCo4GolxFoCIiCviUquRRoC0kqto1qqbTmk+qqv82qwnLqsjFqvsiquyqqwHGquGzAB1qqmbYmoJZCxB0sC7koC8qqxzPqxCUsCDKskB0ukzMqpGJuyJeD+sZwaAtHKARRAAQ8AAekKVvOasxnLrgTrsTo7ryGrsSWbJC17sBqLsUaLsuRqrhNAszYbouqasR9bAu5qAiPgsSUgpVJ7tEcrp3MKNqNhtBirsUbrsi1LAiHgrzJLsQ8gsAzKtWQ7Au4qtSMgte6JtC1bAl77tbwkGnAbtFubty8rAmpLAWzbnW+7tQfrrnUbuFi7s42bsXvLtx8zGlursoGbuS/rrxXQtDU7qP+UuRort0V7uTsrtVg7uZTbMZYLuHa7s2J7tYQrrYYbAQ+AuMGasaTbuJErt7xbtxy7u5I7tOQBvHVLulSbuSMQAhnrrx/QuTR7uxabuMlLtcj+m7e+67HHq73WS7zkYb2+27K/y7slkLZpCwKdOwE167a5C77Xe7zG675Uq70j4L3jobtUa7XHC77MSrpp+wHo27QQIL0yFLrCa710W73AG7wITLL2Gx7ji8D7m73L668BrL7SW5MM6rsj8AEc/MEgHMIiPMIkDMLui7wH3Lu/i792y7iKy7hWa7V5q7hwm7liO7YJHL7wO74jXAEh7MMfDMQlPMRyWwHPWwESIAEPQMCF2JZE/MRQHMUcvLVT/MHVe8WRC77Jy7z4O8GvK7p4q7OBm7Tzur8SfMInLMUcLMRq7Ls+bMQVgMRLfLNZ18Z2fMc7nMIIvMe928W6q7/++Zu/xyvDYDzGOFzIdJvHE7zIJczGROzIjbzGz8sBSty20xusRYzHH+zBmmzF8Ju8VZzFfXzFf6zCLuzCOHyyiPzFY2u0Uyy/ivzIa9zJs+zBz1vJbTs/UckQX0HLIMzJvhy+WqzF76vAuku+ZpzDMpzFh0zDYHzDi3vMZozCwTzLaszJcSzHluykGyzL1+zLsKzDxqy8fQzDVBzDg1zDrwvNzkzO1BzMkOzLcBzHlcy+msYA1ZzP4pzCn/zJfJzAiGrG6CzIzizG7azOh7zICszDdhzPTwzJH2DL9DzHtNLE3azP+izBu7vDLEy+fJzOyky+N8y1YjvSNgzKG63+wymNx3HM0nIb0bdcz8B6zyDs0BgNxSjNyKAM0BsduKc8zs3LynBb0ieNw++804zszTfNABEdxwxQAQwgAej4TzeN0Tl91R+tvKUsyCZtyK28yiZdxsScxlVt00Pc1COAxBJAx05c1eD8yuL8zz59zCFNyl5NtoXc1eQ81l7s1g1NzzLNzZh8x8Ds19KM0hrd0QB9zAM9t3Od12CdtAmd0sScz2b9xBKdxEtszyOHz37dycO80iyMxc/806O8ypCN0IsbwSotzZ991iPA1BVAyZtd0bucrV7x0q+Nx4ctvHE9yqIMyjFM2s9816idw5S90prMxpdNwpw8yYFt0YP+vdt37NpITc103c9029iK3cwkXdBfbbdl3NetTd1Q3NRJrMRTDVbmDdqVnczjrN2mrdcHbdwITcWUrdGW3dAd3MHQrcRsfdG7TdYlnNj7fL2K7dFArdX27d3qrMr9zNHv3N5B7LvoHdhsKeAVntGxPMQGLt/XzeCPnblRUNxFXd9+fMbKTeEhDNMxXdsaPN0sjtP+/NtezODMLOKiG9ZCbcPjTeD6PeO/fMTafK1UHcXNbdX53dpYfMA7PdrHbd9EXd9wnd1CLsK2/OIZLN00/dkETuPTXN6IzORUPOKsLMb0veNHrdIdPuNGTOSajbtdvuFpfeX7zNNn/OR2jdr+OR7lY2zQPr3m133lhX3h6q3LrKmJXeHST0zXXx7CsPzRrxzf8B3Uyvu4O+7gJu7HKy7PwczUR6zZ25zhMm7nBR7awhzXj93ni721Jd7jqQzrYyvhnP7oMw7nME7qcx7FQJ7Ie77YDG3FaCzs2N3dv+7nyH7XbA7bqe7aeszrWN7BcTwBmu0uXN7ZUvzoxozgomvKJgzX4M7qHs3qZr7jU67KNLzag57tOd3TzT7C7c7BLt651T7T2E7E8Q7pw4zsIhzOaYzfpIzCOt7qJfDqfs7jEq7GZsvDve3skN7pMJ2+uX7bCtHLYA7vC03u+H3qko7MCs7thQzyGWvw6wz+3mPO2tA+4ade4C3+0hK/5RSfEBbv4Y0+zatc89gt5nv96xq/6ZluyLGryRKsvw/f6VWs4hbe1NRO0YK96/re5g/v6CfP8mSd3V1c6cRt1yL/5zUM6K9r9DWvu8Eb9UntyWbv4s87ARjM2W/HAFUfyjS/wnyO7xEewU+uwqPd81JL8ojM40Zt61QP9m288RYu7WpP0QUMVl/e21SP2DfP8kt+5zyt4N0O5X1/+SGP5N+Ov0T/9MG+7Bv93GkvAbYb4Lkr+VtP8KkdmVwP+Att4Hmf1WW+8999+Xqt7nAv+B9MyFGe41iv2x8QAtBd24lfx2Wv9wve4EMN+GYv1+7+nOz3PeVgXea+zddDXMZECv3C7OjHK/qjP8DrXceHDf1Rzs5cS/ep/r6+b/nafexT++BpnsNA3uiHjOnHDeKu/b8cMLEP4ABwInAAAQDAKoIFDR5EmHAVA4YMSowoETEiRIgSLU6cSLHixYgmIkbh6LHjCJIlTZ7UWPHkQ4wbWXKEeVFkTJgzO5aYSfKhTpYrUaKkSYJmS4kuN5b8ECLEBw4TJkh4QEDqAIEDFV7FurAhxpc6XcL8ivOm2JoeTYj8mTalyYwcVYYdGtKmRJBkLc4l6nXn27R3JQq9SAIu16JgSZJQypTChAdRp1bNGhlhQwZfecbFbFds3Zpq1bb+vHxRY2HSMfF+zCxR5Nm9PUtmXMs2debBRUcITcyhwoMIDwpIJQBZ8nCtDEu/pGl5rEzNq8l6/tla72zC1DHPPA1b5U7ufamH3R6XpNLcFSj0nkpVIPHhlK2Lpmkzu10T0Pu+Tv6WOvblpmuO1cun2FYK6j2LwhKMBMSWagqqqH4Lbj32InMPLAPNGsqjKMxi7SzKPgQxRBFHJLFEynYzMUUVQ+xQNRKww7C//1SLKMEQEOOgKQoaA049qybEqsLjmBvLuZtEqmtDsThksoQVn4QySimnDLEjI816kSwM54vpNgVBCMGDHHd0AMIIfwRSISHjy2w1JEFyjkMrxaL+sk4776xzyzndlLEJImVUcIQQRAgBBA4W6w1CH9MMcqvZ5Osvxj1jlNNDPC8lcQEF7FzgziVxapG1LCV9dCIFlTJ0gh0hAO5MRq+q0EicRoAxJIs4K4EzPc/idUnK8gA2WGH7CJbYP4jtI1ll+zj2jz8ygfaPTTLZpNpNPsH2k1C2DYUUb0kxJVxxUSGX3FRIKTeVVFRhd9112YU3XnnnpZeyUPUEjLXl5pLUphBKWNBQRB0jYNFXE1ozpNruyk7JXY+0lCFhJy622GTzWLZZZzeGFlprr82W226/FdcUVEw2uVxwzVVFXXXjffddemeW195PycpyOT9BtbJf1Tz+wo3QDnSE4MGChTv4oAppPbA+PfelETVdP+W16oh+pThrjJHdOmNmn33W449Bxpbbb8EdN9xy0zXX3ZZblplmuWtuCEYsH75ZS7+W+zfgQydIFDikky5ISFpJqu9w+ubkCNcjqd7SI6y1BpZYjCtfNlmNnc0kbGqtzVZbkc9GW+21UXEZFbhffntu1+N1jz+9TQDsT/l2vkuoBUUwtILeHBvAVcINokxxj5h2+mmql++1aiadj5gByi+vHPOuM+a4c7GrLVtb0c82pfSU13a5fLfljfv1eW2GfkmrO9Q3VOYBjkgEEnjnYIMNHmD1MQmHJ4i9aDUqxQmlSQ97Hqj+nge9qk1uesiyXOY09zXOUcuCnyDb98BXspN1kHwue1vM0qe+mdmreax5ggLd575KzQ9oDynUB4b2gP2ZaXDDM2F9ZhUnLfEncixkYBBN4ECKXWxYGescBcHmsU98DlubCIX3tkWKkXmLg+NTF+rMB0ISvi6HQlRgryDnQrEARgSEMtT+INC/o/0PgMVzGsRoFL9cjRF+kRNi9KZ3uYtxTVmbYBYFtUetUHwsdKMj2bg8mK7ytcuRMetiCesWRjedcIU+JOP9RoDGHG2gaAQoQPBuSDib0comh8PbD51HyTyeJQpEDJYAZDlLWtbSlrf8XAYRmUhFkk+LIDxfJOX+9kUGqpCVOFEQGV8omEKBAAQPcFCPRpm02CWOZwjECZx21TwVLtB5sATWLcU5TlqO7ZBTHFnJUja+X4oQbq0TZr3q5k1K8qxnxsxkCexXqPx50jeCcyMOt5I8lZwFeXnbZpNYWUwnNURr5IToLUHXPW1REXylW6S52vmyYMZzfQMNYs/gt8Jr2rFGX2LK/jZQJv+hSaAMiSPPXnRQUtnxjq3sFTjzEFGeztKQIetWFdXZy7Zx9J3BHGEkiXnC+N2xpPKTkyYR48x+frKlACzcJFuIx2PiEXobch5Yq/ZKh2atp+QMwCyphUG2SrGKicxoURvZUUd6VBX2egKv8pr+V5yuspsiPamCeGeoCEQAAr8RZUBJOdCnXvJmfe2rWHUqgLRGtLK0vKwsfyq6KW7wiqebq9voale8moCvDARrGK/JTavJ9FTO7MAGogmhaR6MfY+rVDG9mkexutIEYJ2sZWVZ2czOMgBiO+QnLOrZdYLWfPBMqke/uNex6vaEL+LQqDD0IhKAoLuxNexhr4rV4jgES8rcUzet1tu+6rS4Z01rZkEHMpF1Vnym+yAw3cU6eJJ2nqaFLBhVCzTaYem1YzJsq2r7KhMqE4wBxilZJWbWAFSYshaOb4YvvOH4avZaH+6e2awovrhqcXXvrKtd2bVUAPs2pA9OYQp7paD+LzlzA6tS1IIZVU0XHvB9EM6je4drYcx2OMPErXCFcwlU+4bvs+xMhYn3CzMVw26eG2IvTgdsNRnrC2Dd7e6YrNpGly4Wprh1rEjzmCshRgHL3ywrxTCM5CTXGckX7jD3dKnct973dBvlYn8FrdT/AvmmNy1BXpdE4y/lqLDiTWyZqekoGZORzYbOclj1SOE5E3e4Q54zZS+cS/rW14pONh2UfxlC1kVXmJRhb6bbB9kV6s6ZIACvBMQrFR2n6bYPbuVCv/pbFxNbwtLLmp2VvWxmV3ha3GNyUHmJMuducbRVNuFp+QpWWc96lQr9i417079Ik7e8rIUYsHmbaW7+8+rYWmt2vJs90WuZetpqS1mU9Y3UEFYZXpR5wl4DDuFMC3ukJsA1ByKwIxsqdtLGqWc9v83b6orVze4GbpwnJm+O23labS2biEeM7z8DOtCuJjRMB27a3rpZ2AZ/8UlxvQHA7dpgWG3wz5o6cdRSfKztdm/HhW7IUvdZnXF9rmjRh+2GaNvd6wVVt318rxLcOn+FJdjN31jogx96vZr27ZvD/lXKGFFYQh/6WkPMZ9JhFL8mZ/XS/QvTFhO7ur+FUza9LfGzMPrWsuWfgh1uW8Yac4EwL3Zk8b5pOaNd3qswZ8jNhmqiso2jRk0x01Ve91719iOyTq3Bafz3DaD+p2BafykDeM4nbw/b9W8W+4aC3uxVOP7j9OWstMGFtpOxE9AobrW/S/uEdoPdm2Jcs99BsAGFGzbHg2fwJB1raLCD/ulD1Hge7nCHOXC89kPfLGeZ+3ZGivbaKs628V0fRPZSUvnLx/rzJU14uvO9lad9epYtjnF3z97x83Yit7KvU0sbRvo9mUG5LjKh4gOJWNM0+7O7qhEs3omt/fknXoO+HeM6xNM063Mx2Ks4/0O776swEnw2kBO53eOgP0u61knAlGMABtSQqKO4/RMi7uouEPAA/XE+gJq/6KM76nNAGVw8Gcy4CWu8CgOA/1M2eguxJksb37O2RnJBzYv+Qf77raiDPTUzNiCCHsH6rhszPQz8QQ08s3uhvutLvLGLQBFkQo+bqNx7K8qjtvyCpCr0t7u6MovLFetjtwicuCb4kgpMsNPrNSBhMUzrOYu7uA90JTd8wyR7Nl1KQVQjFyncon4bNOmqm+JzNwVqRL2LQLELIu4CgSbQQR60uUOcEPZpvZ5bxJ/rQLuTvewLlkhkNj2jKLNJp6EquVabsjz8t06kRZdbPH1pxBdrpRqLrZqTP3NLRKYCMlJsuQ+sRSTcOFxsQieqt4oSKj+DMmuLO2FcQFfCv/dpNxtcQ15RENohAVQcGjUSvDL0NcYSwvxbQ27zRMnKvjsAFm3+bMI4/J455D1VW7UpS58XnBtYuz7iG0WHSUN2rLEdjD8fhMamCyLqujsiZEPY27+Lu0ZkS0KADADIA7GQ0SACTDUWvLw71EROVLmAE7hi88QsHMW+EqyZ86RdEx6cw8iMvDtAND4s28dqxD5sPDuSlMT5kry2+6xq4yL+IsemE7iVS8fq05DrA0nnaQJBpMDSK8QBQD0zU708ckg1VENqpEljwzssg0RmI0EAhLbuWa57Y0lgGsepjMlt47/UakBZjLAnIAHBXAIR6ICheTTE6smtC0L1G7Zk/Eit3JC8UktY4j47W0JcDD97U0GUIb/Qgi69vMK1DCssyyaI1Er+QGwC2hFEHby6NXpG8opG/LNBo2TAgGs3yizCo2QAywkWf1TKpcwgt7qoAvSllkRAYdRDukvHLeQ2NkvGPmTDCFzNM8I18CpEMrvIxqyac2S/xIs9ABu4gWtEt7TFfwzOACC6phw/TGzBQMu8VyPGsaojonQlbUrNZfTK66y5VczAeuTO7hTK8vwq/TMtmdRNYwtJeEvPzdoWtts9jLrEalsdpRPN+yzP+gTPn/sItuSV1XyRMxIBMRFDCCiT4BnLhyvLVpK6AV088gSw8Pytt4zEJYu25aJD98TLuFFI12HIjbw0OLnKzqsjF/MTE+jK+9HJ8HKA9GBF9kg/p3P+zPw8xhi1Uo9c0GRLMjwTNTvjMCITTmirr2/ENx3dL5fMQ3vBMtwsQrWMNanJQiVJ0sJsAVyjAApYAQfQU8RKUfpb0YysRvtkxJ8bTwRV0EfUOH9Ezw7rUmb7UuK6veQaQF+EyqhEU/RriDejTHZzUw1pwPrEia4EGBFYAhBogQ6ggAxYIz09vT59lQWAVdXjK6c7RwN1RLGiTAQ9S7GbrFDb0ka9My49LgdNQRLLqH1TukuduxWNAjYNTzf9S1D91Dkt1cPMgAxg0gZw0v8EEliF1bPAP++szbQcu0ItRgVlCFiFt0cV1i31UjzTReI0OpILR/16yU1UwE401LD+I0UPjdNPnTFBVNJTvVMmdYAGEEuxfFLiUIBOYYB2NFSNnEV8pEXfKlTdLM/gErWNvaX3kq84jKImuy/fa6cKnZkelSSGeNbdlNHalM68IwEPuE4xNCw9bQCETdiFHQ4FUAAEYIjVbDHvrFK0BEFig9GivTiNHafieq/hOsFzQqeRo7Z8kyvzOz/5hClBvcljVMtPxM+q84AXiK1UrdmDxdmc5dYJSQCe3RQGEIEmENpAldGKBVeMZbmtnNHz3Kmz4ti+FYDIY0+pba7ya0l8XdaxG1TQG0IFagIPcNzYwtY1sgCzbdXf0NnhQACfbVvsEtAAq83JlMkDZcuk1dv+jW1a4xKny5pEqNU9PwOtkk3WCwXBBG1OQGy5MELFmStYEz1YDGjVASiAqqBHRkGAnmUIBQBavbLYmbxYXT1QXTXUQXVYZPNHRd1bviUnwA1ZgvysU7BDBFRWuYmuhliA5o3Y54Ve6DUtBVmC9l2Cw7yxDLAACJjcm/1dHxneV8nc4mWId0TS5A1ajezKs1hNuDWtAZbJ1Qy41YQCKDABB0ZSh10AivFH7IUo5GJdoXrKu9zRuoIkuuJRdumUTqlKJAXXJZBJFH4CFCYBFD7QwWzhFV5h94XfVJVcPXWAhEXby52Q4mVbtwXaJwDg5BViAxbiAzVic03gs4Bg4Ir+Ve3Tvuq9XgvGJQcdyHsLl1PwXueCOwuVyhRTulgtX0FM4IBTYRUOuMEczyUYTAArTPd937ENr8nVU7E8vTNJ26RZ2x8WxD6umgFmTQIm4K7s4xlGYRYu4CiwnyhoAhF4YG9dACmW4o6lYgHIBE2YRLIRmVEYBebqPe/d4u9Nuikc5f2C5CIW4neUYRQWARleZVde5VZ2Xw9oAf3BVgug44O9X+HN46TZ301BADYWRBMQ0SRt5D4WxCWgnSVoAmVm5v/tSmauzkZ+22OG5O3DZmwGFine5mDxA2DZAwjKBD/YmOyBlk6olk4ABaASBVFwSlPQYnHR4i0OZQOsWlL+nkJY3RRCLuBofgJZbt9Vdt8VPiNnpuEKPI8ImFwLuFkdVlheNjes2N/MVYCCZoKCFlFSheNjbuZmhuMzimaP5udqbgK2VYBsRumUVlRF3QNw1oM92AM/kGkKooSahhZN0AR07oTQ4SxO7uRvKYWgLoV5Juqi1uI/OwV1SepUOGpz8d4sKhd9VoB31Gg4bt+qtuqsnmUPgF8exGU61lYdDl6IjmisSIAEmGgQWALH9YAwYes3XoIWOKMW0GoPcF+63uoXYOsXWAKeLd47qIPABmzAFmzBTmk8uAM8wAOXDudkkWly5hxKuGmc3oRO2OlPAAVQCAXNFgVOPptRKAX+UhDqcBFqodbioTbq005tox5qtg3mwmxmvO6A9p1tOJbr9sVr2rbrFqjlDLhTCvhqynVoXs7fiD6As0buw+wArmZuWu4A3jZV3rbWU6Vl3q5T6+Zttmbrw5zowJ4D766D7w7s8SbvOrgDO8BmxVbsPHhpPeCDPvADPpgEP6CEP7DpS9YETqhsy8aWzNZsbvHpAP9p0S7tAjfwA0fwoEZrBEgA7LbuuI7rU8XuOI7rDnCBU33uWrZl4P7qmw3rViXush4OAziAEi/xCtSfFLduCmgBFuftDMBu/dFw3t4AGtefFUBxEz+AOeDxHvfx8OZx7ivvOrADIrcD9NYDPND+gyV37z3gA/n2g0mw75quBE2oBE7Ib07oBC1XZ0/I7M0WhVBo5zEfcwHn5FI4c9BWczRf8zVnczQvBR3PgA6A8Tmnc+uucRlPcf1xgT1P8WvlcFy22bDW4RAX8RE3gEQn8RW4U99O1VS91kiH9Gu9ABZfgWtldDzF9BVIcU5fgQMg8R3ncTmYA1L38VP3cTpQdTog7yO3AyVncph+8ieP8kmYBCqvhCrnhF3ncsv2hF/3cv8m82En9mI3dmI3cQOI9GVf9g0AdA3YAA1gdklP1Qv46lxuaB3G414+9FVQdEUvrAi4gAsorGuFAFW1gAwg93Vfd0of93VfAXGPgBX+iPcV+PY4wHc5iAN933d9J/V/R3U6mINVpwNXf/VXZ/KXpnX5tvVbv/Vcv3Jel3hP4ARgt/iL/3VQ8ARR2HhQ4PiNb2eQB/Yx1/iQ//Z3V3d1/2r5fXdrL9hxD3RctvZr93CE/V1D73biKICd5/k1ugDJvfb5pd+g//lr//lxx+WalVyeLwB8d/qnh3p+l3o5MPWBJ/iCd/UkT/hZ5wOZbvhJqARKCHuIj3iJ33WKr/i0x/i1Z3u2Z3qjD3qij/u5d4D6vVkMIHRDJOucZw+m33lQgpC/D3ypGHxQInzD53nEB/zB93vFF3zEF/zEd/zA/43KP3zAx3zLX/zIJ/z+wsf8w3/8vwf9zWf8xK/83zAA4Eh9Alj91Ed91m+V9HDoHeZhvl+F2Mf93C98zY99MyF84AXe3s/9y/f94Td+w28V3zeT4j9+4GB+57/85pf+6Z/+2ad9brd9g8D94CmY7u9+7p+K8Bd/7+/+UAL+HkH/9Ff/8W8V7gf/7w9/9x/+95f/9if/O05/96d/+7//9+f/9QcIAgMEEihocADChAgBMGy46iHEiBInUqxo8WJEhQQLDjy4USBCkARDJgRZsuRIjgMVrhR5smXHmAZTdlR50iRNmBx3yiSZcqaBnzpxqvRIEibLpA0aOsTo9CnUi0ynUq1q9SrWrFq3cu1r6vUr2LBYo5ItS1Ys2rRq17Jt65ah2bhyMb6ta/cu3rxN5/Lt6/cv4MCCBxMubPgw4sSKFzNu7Pgx5MiSJ1OubPky5syaN3Pu7Pkz6NCiR5Mubfo06tSqV7Nu7fo17NiyZ9Oubfs27ty4AwIAOw=="
savebutton = tk.Button(window_frame, command=imagesavefiledialog, relief="solid")
savebutton.img = tk.PhotoImage(data=saveimg).subsample(8,8)
savebutton["image"] = savebutton.img
savebutton.place(x=5, y=5, anchor="nw")
#open window
window.mainloop()
tkthread = threading.Thread(target=ViewInTkinter)
tkthread.start()
def _SaveRenderedShapefile(self, savepath):
if RENDERER == "tkinter":
raise AttributeError("The Tkinter map renderer does not have a function to save the map as an image \
due to the limited options of the Tkinter Canvas. If possible try using any of the other renderers instead")
else:
self.renderer.SaveImage(savepath)
############## FINALLY, DEFINE FRONT-END USER FUNCTIONS
#INTERACTIVE INPUT HELPERS
def AskNumber(text="unknown task"):
"""
Asks the user to interactively input a number (float or int) at any point in the script, and returns the input number.
| __option__ | __description__
| --- | ---
| *text | an optional string to identify for what purpose the chosen number will be used.
"""
def ValidateNumber(text):
try:
innumber = input("\n\nwrite a comma or integer number to use for "+str(text)+" (example: 15 or 15.83)\nnumber = ")
except NameError:
print("""\n---> unknown error""")
return ValidateNumber(text)
if not isinstance(innumber,(float,int)):
print("""\n---> error: the number must be either a floating point comma or integer number""")
return ValidateNumber(text)
return innumber
return ValidateNumber(text)
def AskString(text="unknown task"):
"""
Asks the user to interactively input a string at any point in the script, and returns the input string.
| __option__ | __description__
| --- | ---
| *text | an optional string to identify for what purpose the chosen string will be used.
"""
def ValidateString(text):
try:
instring = input("\n\nwrite a string to use for "+str(text)+", enclosed in quoatation marks (example: 'this is my string')\nstring = ")
except NameError:
print("""\n---> error: the string must be enclosed by quotation marks""")
return ValidateString(text)
if not isinstance(instring,basestring):
print("""\n---> error: the string must be enclosed by quotation marks""")
return ValidateString(text)
return instring
return ValidateString(text)
def AskShapefilePath(text="unknown task"):
"""
Pops up a temporary tk window asking user to visually choose a shapefile.
Returns the chosen shapefile path as a text string. Also prints it as text in case
the user wants to remember which shapefile was picked and hardcode it in the script.
| __option__ | __description__
| --- | ---
| *text | an optional string to identify what purpose the shapefile was chosen for when printing the result as text.
"""
tempwindow = tk.Tk()
tempwindow.state("withdrawn")
shapefilepath = tkFileDialog.askopenfilename(parent=tempwindow, filetypes=[("shapefile",".shp")], title="choose shapefile for "+text)
tempwindow.destroy()
print("you picked the following shapefile for <"+str(text)+">:\n"+str(shapefilepath)+"\n\n")
return shapefilepath
def AskFieldName(shapefilepath, text="unknown task"):
"""
Loads and prints the available fieldnames of a shapefile, and asks the user which one to choose.
Returns the chosen fieldname as a string.
| __option__ | __description__
| --- | ---
| *text | an optional string to identify for what purpose the chosen fieldname will be used.
"""
tempshapefile = Shapefile(shapefilepath)
print("valid fieldnames:")
for fieldname in tempshapefile.fieldnames:
print(" %s" % fieldname)
def ValidateFieldName(tempshapefile, text):
try:
fieldname = input("\n\nselecting fieldname for "+str(text)+"\nchoose from the above fieldnames (and surround by quotation marks)\nfield = ")
except NameError:
print("""\n---> error: the fieldname must be given as a string enclosed by quotation marks (example "field1" or 'field1')""")
return ValidateFieldName(tempshapefile, text)
if not isinstance(fieldname,basestring):
print("""\n---> error: the fieldname must be given as a string enclosed by quotation marks (example "field1" or 'field1')""")
return ValidateFieldName(tempshapefile, text)
if fieldname in tempshapefile.fieldnames:
del tempshapefile
return fieldname
else:
print("\n---> error: the specified fieldname does not match any of the valid fieldnames, try again")
return ValidateFieldName(tempshapefile, text)
return ValidateFieldName(tempshapefile, text)
def AskColor(text="unknown graphics"):
"""
Pops up a temporary tk window asking user to visually choose a color.
Returns the chosen color as a hex string. Also prints it as text in case
the user wants to remember which color was picked and hardcode it in the script.
| __option__ | __description__
| --- | ---
| *text | an optional string to identify what purpose the color was chosen for when printing the result as text.
"""
def askcolor():
tempwindow = tk.Tk()
tempwindow.state("withdrawn")
rgb,hexcolor = tkColorChooser.askcolor(parent=tempwindow, title="choose color for "+text) ;
tempwindow.destroy()
print("you picked the following color for "+str(text)+": "+str(hexcolor))
return hexcolor
hexcolor = askcolor()
return colour.Color(hexcolor).hex
#GENERAL UTILITIES
def _FolderLoop(folder, filetype=""):
"""
A generator that iterates through all files in a folder tree, either in a for loop or by using next() on it.
Filetype can be set to only grab files that have the specified file-extension. If filetype is a tuple then grabs all filetypes listed within it.
"""
alldirs = os.walk(folder)
# loop through and run unzip function
for eachdirinfo in alldirs:
eachdir = eachdirinfo[0]+"\\"
dirfiles = eachdirinfo[2]
for eachfile in dirfiles:
if eachfile.endswith(filetype):
eachfilename = ".".join(eachfile.split(".")[:-1])
eachfiletype = "." + eachfile.split(".")[-1]
yield (eachdir, eachfilename, eachfiletype)
def ShapefileFolder(folder):
"""
A generator that will loop through a folder and all its subfolder and return information of every shapefile it finds. Information returned is a tuple with the following elements (string name of current subfolder, string name of shapefile found, string of the shapefile's file extension(will always be '.shp'))
| __option__ | __description__
| --- | ---
| folder | a path string of the folder to check for shapefiles.
"""
for eachfolder, eachshapefile, eachfiletype in _FolderLoop(folder, filetype=".shp"):
yield (eachfolder, eachshapefile, eachfiletype)
class Layer:
"""
Creates and returns a thematic layer instance (a visual representation of a geographic file) that can be symbolized and used to add to a map.
| __option__ | __description__
| --- | ---
| filepath | the path string of the geographic file to add, including the file extension.
| **customoptions | any series of named arguments of how to style the shapefile visualization (optional). Valid arguments are: fillcolor, fillsize (determines the circle size for point shapefiles, line width for line shapefiles, and has no effect for polygon shapefiles), outlinecolor, outlinewidth. For more info see the special section on how to stylize a layer.
"""
def __init__(self, filepath, **customoptions):
self.filepath = filepath
self.fileobj = Shapefile(shapefilepath=filepath, progresstext="loading layer")
self.customoptions = _CheckOptions(customoptions)
self.classifier = None
def AddClassification(self, symboltype, valuefield, symbolrange=None, classifytype="equal interval", nrclasses=5):
"""
Adds a classification/instruction to the layer on how to symbolize a particular symbol part (e.g. fillcolor) based on a shapefile's attribute values.
| __option__ | __description__ | __input__
| --- | --- | ---
| symboltype | a string indicating which type of symbol the classification should apply to. | any of: "fillsize", "fillwidth", "fillheight", "fillcolor", "outlinewidth", "outlinecolor"
| valuefield | a string with the name of a shapefile attribute field whose values will be used to inform the classification. | string
| symbolrange | a list or tuple of the range of symbol values that should be used for the symbol type being classified. You only need to assign the edge/breakpoints in an imaginary gradient of symbol values representing the transition from low to high value classes; the values in between will be interpolated if needed. The symbol values must be floats or integers when classifying a size-based symbol type, or hex color strings when classifying a color-based symbol type. | list or tuple
| classifytype | a string with the name of the mathematical algorithm used to calculate the break points that separate the classes in the attribute values. | For valid classification type names see list below
| nrclasses | an integer or float for how many classes to subdivide the data and symbol values into. | Integer or float
Valid names for the classifytype option are:
- __"categorical"__
Assigns a unique class/symbol color to each unique attribute value, so can only be used when classifying color-based symbol types
- __"equal classes"__
Makes sure that there are equally many features in each class, which means that features with the same attribute values can be found in multiple classes
- __"equal interval"__
Classes are calculated so that each class only contains features that fall within a value range that is equally large for all classes
- __"natural breaks"__
The Fisher-Jenks natural breaks algorithm, adapted from the Python implementation by Daniel J. Lewis (http://danieljlewis.org/files/2010/06/Jenks.pdf), is used to find 'natural' breaks in the shapefile dataset, i.e. where the value range within each class is as similar as possible and where the classes are as different as possible from each other. This algorithm is notorious for being slow for large datasets, so for datasets larger than 1000 records the calculation will be limited to a random sample of 1000 records (thanks to Carston Farmer for that idea, see: http://www.carsonfarmer.com/2010/09/adding-a-bit-of-classification-to-qgis/), and in addition that calculation will be performed 6 times, with the final break points being the sample mean of all the calculations. For large datasets this means that the natural breaks algorithm and the resultant map classification may turn out differently each time; however, the results should be somewhat consistent especially due to the random nature of the approach and the multiple sample means
"""
if not self.classifier:
#create classifier if this is the first classification being added
self.classifier = _Classifier()
self.classifier.AddClassification(symboltype, valuefield, symbolrange=symbolrange, classifytype=classifytype, nrclasses=nrclasses)
def AssignTime(self, yearfield=0, monthfield=1, dayfield=1, hourfield=0, minutefield=0, secondfield=0):
"""
Assigns a field to contain the time dimension of a shapefile. Used by the NewMap SaveTimeSequence method to determine the time of multiple shapefiles simultaneously.
"""
self.fileobj.AssignTime(yearfield=yearfield, monthfield=monthfield, dayfield=dayfield, hourfield=hourfield, minutefield=minutefield, secondfield=secondfield)
#RENDERING OPTIONS
def SetRenderingOptions(renderer="not set", numpyspeed="not set", reducevectors="not set"):
"""
Sets certain rendering options that apply to all visualizations or map images.
| __option__ | __description__
| --- | ---
| *renderer | a string describing which Python module will be used for rendering. This means you need to have the specified module installed. Valid renderer values are 'aggdraw' (default), 'PIL', 'pycairo', 'tkinter'. Notes: If you have no renderers installed, then use Tkinter which comes with all Python installations, be aware that it is significantly slow, memory-limited, and cannot be used to save images. Currently PyCairo is not very well optimized, and is particularly slow to render line shapefiles.
| *numpyspeed | specifies whether to use numpy to speed up shapefile reading and coordinate-to-pixel conversion. Must be True (default) or False.
| *reducevectors | specifies whether to reduce the number of vectors to be rendered. This can speed up rendering time, but may lower the quality of the rendered image, especially for line shapefiles. Must be True or False (default).
"""
if renderer != "not set":
global RENDERER
RENDERER = renderer
if numpyspeed != "not set":
global NUMPYSPEED
NUMPYSPEED = numpyspeed
_UpdateMapDims() #this bc map dimensions have to be recalculated to/from numpyspeed format
if reducevectors != "not set":
global REDUCEVECTORS
REDUCEVECTORS = reducevectors
#STYLE CUSTOMIZING
def Color(basecolor, intensity="not specified", brightness="not specified", style=None):
"""
Returns a hex color string of the color options specified.
NOTE: New in v0.2.0, basecolor, intensity, and brightness no longer defaults to random, and it is no longer possible to call an empty Color() function (a basecolor must now always be specified).
| __option__ | __description__ | __input__
| --- | --- | ---
| basecolor | the human-like name of a color. Always required, but can also be set to 'random'. | string
| *intensity | how strong the color should be. Must be a float between 0 and 1, or set to 'random' (by default uses the 'strong' style values, see 'style' below). | float between 0 and 1
| *brightness | how light or dark the color should be. Must be a float between 0 and 1 , or set to 'random' (by default uses the 'strong' style values, see 'style' below). | float between 0 and 1
| *style | a named style that overrides the brightness and intensity options (optional). | For valid style names, see below.
Valid style names are:
- 'strong'
- 'dark'
- 'matte'
- 'bright'
- 'pastelle'
"""
#first check on intens/bright
if style and basecolor not in ("black","white","gray"):
#style overrides manual intensity and brightness options
intensity = COLORSTYLES[style]["intensity"]
brightness = COLORSTYLES[style]["brightness"]
else:
#special black,white,gray mode, bc random intens/bright starts creating colors, so have to be ignored
if basecolor in ("black","white","gray"):
if brightness == "random":
brightness = random.randrange(20,80)/100.0
#or normal
else:
if intensity == "random":
intensity = random.randrange(20,80)/100.0
elif intensity == "not specified":
intensity = 0.7
if brightness == "random":
brightness = random.randrange(20,80)/100.0
elif brightness == "not specified":
brightness = 0.5
#then assign colors
if basecolor in ("black","white","gray"):
#graymode
if brightness == "not specified":
return colour.Color(color=basecolor).hex
else:
#only listen to gray brightness if was specified by user or randomized
return colour.Color(color=basecolor, luminance=brightness).hex
elif basecolor == "random":
#random colormode
basecolor = random.randrange(300)
return colour.Color(pick_for=basecolor, saturation=intensity, luminance=brightness).hex
else:
#custom made color
return colour.Color(color=basecolor, saturation=intensity, luminance=brightness).hex
class _SymbolClass:
def __init__(self, classmin, classmax, minvalue, maxvalue, classvalue, classsymbol):
"_min and _max are not attr values meant for user, but used for membership determination internally and can be different from attr"
self._min = classmin
self._max = classmax
self.min = minvalue
self.max = maxvalue
self.classvalue = classvalue
self.classsymbol = classsymbol
class _Classifier:
"""
Internal use only
A classifier that holds a set of instructions on how to classify a shapefile's visual symbols based on its attribute values.
The classifier can hold multiple classifications, one for each symbol (e.g. fillsize and fillcolor), and these are added with the AddClassification method.
When a layer is passed to a rendering operations its classifier is used as the recipe on how to symbolize the shapefile.
This classifier is also needed to render a shapefile's legend.
*Takes no arguments*
"""
def __init__(self):
self.values = dict()
self.symbols = dict()
self.allclassifications = []
self.name = "unnamed classifier"
def AddClassification(self, symboltype, valuefield, symbolrange=None, classifytype="equal interval", nrclasses=5):
if not symbolrange and classifytype!="categorical":
raise TypeError("since you have chosen a gradual classification you must specify a range of symbol values to choose from")
classification = dict([("symboltype",symboltype),
("valuefield",valuefield),
("symbolrange",symbolrange),
("classifytype",classifytype),
("nrclasses",nrclasses) ])
self.allclassifications.append(classification)
def AddCustomClass(self, symboltype, valuefield, valuemin, valuemax):
#first loop through existing classes and delete/reset maxmin values to make room for the new class value range
#then create and insert class at appropriate position
pass
def AddValue(self, index, symboltype, value):
if self.values.get(index):
#add to dict if already exits
self.values[index][symboltype] = value
else:
#or create new dict if not
self.values[index] = dict([(symboltype, value)])
self.symbols[index] = dict()
def CalculateClasses(self, classification):
classifytype = classification.get("classifytype")
#calculate classes based on classifytype
if classifytype.lower() == "categorical":
self._UniqueCategories(classification)
self.__AssignMembershipByUnique(classification)
elif classifytype.lower() == "equal interval":
self._EqualInterval(classification)
self.__AssignMembershipByValue(classification)
elif classifytype.lower() == "equal classes":
self._EqualClasses(classification)
self.__AssignMembershipByIndex(classification)
elif classifytype.lower() == "natural breaks":
self._NaturalBreaks(classification)
self.__AssignMembershipByValue(classification)
else:
raise TypeError("classifytype must be one of: ...")
def GetSymbol(self, uniqid, symboltype):
#for each class test value for membership
featuresymbols = self.symbols.get(uniqid)
if featuresymbols:
symbol = featuresymbols.get(symboltype)
if symbol:
return symbol
def GetValues(self):
return self.sortedvalues
def GetClassifications(self):
return self.allclassifications
# INTERNAL USE ONLY
def __AssignMembershipByValue(self, classification):
symboltype = classification.get("symboltype")
classes = classification.get("classes")
#loop through values and assign class symbol to each for the specified symboltype
for uniqid, value in self.sortedvalues:
value = value[symboltype]
#for each class test value for membership
for eachclass in classes:
#membership true if within minmax range of that class
if value >= eachclass._min and value <= eachclass._max:
#assign classsymbol
self.symbols[uniqid][symboltype] = eachclass.classsymbol
break
def __AssignMembershipByIndex(self, classification):
symboltype = classification.get("symboltype")
classes = classification.get("classes")
#loop through values and assign class symbol to each for the specified symboltype
for index, (uniqid, value) in enumerate(self.sortedvalues):
value = value[symboltype]
#for each class test value for membership
for eachclass in classes:
#membership true if within minmax range of that class
if index >= eachclass._min and index <= eachclass._max:
#assign classsymbol
self.symbols[uniqid][symboltype] = eachclass.classsymbol
break
def __AssignMembershipByUnique(self, classification):
symboltype = classification.get("symboltype")
classes = (each for each in classification.get("classes"))
#loop through values and assign class symbol to each for the specified symboltype
oldvalue = None
for index, (uniqid, value) in enumerate(self.sortedvalues):
value = value[symboltype]
if value != oldvalue:
eachclass = next(classes)
self.symbols[uniqid][symboltype] = eachclass.classsymbol
oldvalue = value
def __CustomSymbolRange(self, classification):
symbolrange = classification.get("symbolrange")
nrclasses = classification.get("nrclasses")
#first create pool of possible symbols from raw inputted symbolrange
if isinstance(symbolrange[0], (int,float)):
#create interpolated or shrinked nr range
symbolrange = listy.Resize(symbolrange, nrclasses)
elif isinstance(symbolrange[0], basestring):
#create color gradient by blending color rgb values
rgbcolors = [colour.hex2rgb(eachhex) for eachhex in symbolrange]
rgbgradient = listy.Resize(rgbcolors, nrclasses)
symbolrange = [colour.rgb2hex(eachrgb) for eachrgb in rgbgradient]
#alternative color spectrum hsl interpolation
###rgbcolors = [colour.rgb2hsl(colour.hex2rgb(eachhex)) for eachhex in symbolrange]
###rgbgradient = listy.Resize(rgbcolors, nrclasses)
###symbolrange = [colour.rgb2hex(colour.hsl2rgb(eachrgb
#update classification with new symbolrange
classification["symbolrange"] = symbolrange
def _UniqueCategories(self, classification):
"""
Remember, with unique categories the symbolrange doesn't matter, and only works for colors
"""
symboltype = classification.get("symboltype")
classifytype = classification.get("classifytype")
if not "color" in symboltype:
raise TypeError("the categorical classification can only be used with color related symboltypes")
#initiate
self.sortedvalues = sorted([(uniqid, value) for uniqid, value in self.values.iteritems()], key=operator.itemgetter(1))
sortedvalues = [value[symboltype] for uniqid,value in self.sortedvalues]
#populate classes
classes = []
#then set symbols
olduniq = None
for index, uniq in enumerate(sortedvalues):
if uniq != olduniq:
classsymbol = Color("random")
classmin = uniq
classmax = uniq
minvalue = classmin
maxvalue = classmax
#create and add class
classes.append( _SymbolClass(classmin, classmax, minvalue, maxvalue, index, classsymbol) )
olduniq = uniq
classification["classes"] = classes
def _EqualInterval(self, classification):
symboltype = classification.get("symboltype")
symbolrange = classification.get("symbolrange")
classifytype = classification.get("classifytype")
nrclasses = classification.get("nrclasses")
#initiate
self.__CustomSymbolRange(classification)
symbolrange = classification["symbolrange"]
self.sortedvalues = sorted([(uniqid, value) for uniqid, value in self.values.iteritems()], key=operator.itemgetter(1))
sortedvalues = [value[symboltype] for uniqid,value in self.sortedvalues]
lowerbound = sortedvalues[0]
upperbound = sortedvalues[-1]
intervalsize = int( (upperbound-lowerbound)/float(nrclasses) )
#populate classes
classmin = lowerbound
classes = []
for index, classsymbol in enumerate(symbolrange):
if index == nrclasses-1:
classmax = upperbound
else:
classmax = classmin+intervalsize
#determine min and max value
minvalue = classmin
maxvalue = classmax
#create and add class
classes.append( _SymbolClass(classmin, classmax, minvalue, maxvalue, index, classsymbol) )
#prep for next
classmin = classmax
classification["classes"] = classes
def _EqualClasses(self, classification):
symboltype = classification.get("symboltype")
symbolrange = classification.get("symbolrange")
classifytype = classification.get("classifytype")
nrclasses = classification.get("nrclasses")
#initiate
self.__CustomSymbolRange(classification)
symbolrange = classification["symbolrange"]
self.sortedvalues = sorted([(uniqid, value) for uniqid, value in self.values.iteritems()], key=operator.itemgetter(1))
sortedvalues = [value[symboltype] for uniqid,value in self.sortedvalues]
classsize = int( len(sortedvalues)/float(nrclasses) )
#populate classes
classmin = 0
classes = []
for index, classsymbol in enumerate(symbolrange):
if index == nrclasses-1:
classmax = len(sortedvalues)-1
else:
classmax = classmin+classsize
#determine min and max value
minvalue = sortedvalues[classmin]
maxvalue = sortedvalues[classmax]
#create and add class
classes.append( _SymbolClass(classmin, classmax, minvalue, maxvalue, index, classsymbol) )
#prep for next
classmin = classmax
classification["classes"] = classes
def _NaturalBreaks(self, classification):
symboltype = classification.get("symboltype")
symbolrange = classification.get("symbolrange")
classifytype = classification.get("classifytype")
nrclasses = classification.get("nrclasses")
#initiate
self.__CustomSymbolRange(classification)
symbolrange = classification["symbolrange"]
self.sortedvalues = sorted([(uniqid, value) for uniqid, value in self.values.iteritems()], key=operator.itemgetter(1))
sortedvalues = [value[symboltype] for uniqid,value in self.sortedvalues]
lowerbound = sortedvalues[0]
upperbound = sortedvalues[-1]
def getJenksBreaks(dataList, numClass ):
"taken from http://danieljlewis.org/files/2010/06/Jenks.pdf"
dataList = sorted(dataList)
#in mat1, populate empty classlists of zeros
zeros = [0 for j in xrange(0,numClass+1)]
zeroandones = [0]
zeroandones.extend([1 for i in xrange(1,numClass+1)])
mat1 = [list(zeros), zeroandones]
mat1.extend([list(zeros) for i in xrange(2,len(dataList)+1)])
#...while classes in element 1 are set to 1, except for first class which remains zero
for i in xrange(1,numClass+1):
mat1[1][i] = 1
#in mat2, classes in element 0 and 1 are set to 0
mat2 = [list(zeros),list(zeros)]
#...while the classes in elements 2 and up are set to infinity, except for first class which is a zero
mat2classes = [0]
mat2classes.extend([float('inf') for i in xrange(1,numClass+1)])
mat2ext = [list(mat2classes) for j in xrange(2,len(dataList)+1)]
mat2.extend(mat2ext)
#then the main work (everything prior to this has been optimized/changed from original code)
v = 0.0
for l in xrange(2,len(dataList)+1):
s1 = 0.0
s2 = 0.0
w = 0.0
for m in xrange(1,l+1):
i3 = l - m + 1
val = float(dataList[i3-1])
s2 += val * val
s1 += val
w += 1
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in xrange(2,numClass+1):
if mat2[l][j] >= (v + mat2[i4][j - 1]):
mat1[l][j] = i3
mat2[l][j] = v + mat2[i4][j - 1]
mat1[l][1] = 1
mat2[l][1] = v
k = len(dataList)
kclass = []
for i in xrange(0,numClass+1):
kclass.append(dataList[0])
kclass[numClass] = float(dataList[-1])
countNum = numClass
while countNum >= 2:
#print "rank = " + str(mat1[k][countNum])
id = int((mat1[k][countNum]) - 2)
#print "val = " + str(dataList[id])
kclass[countNum - 1] = dataList[id]
k = int((mat1[k][countNum] - 1))
countNum -= 1
return kclass
#populate classes
highthresh = 1000
if len(sortedvalues) > highthresh:
#the idea of using random sampling for large datasets came from a blogpost by Carson Farmer. I just added the idea of calculating the breaks several times and using the sample means for the final break values.
#see: http://www.carsonfarmer.com/2010/09/adding-a-bit-of-classification-to-qgis/
allrandomsamples = []
samplestotake = 6
for _ in xrange(samplestotake):
randomsample = sorted(random.sample(sortedvalues, highthresh))
randomsample[0] = lowerbound
randomsample[-1] = upperbound
tempbreaks = getJenksBreaks(randomsample, nrclasses)
allrandomsamples.append(tempbreaks)
jenksbreaks = [sum(allbreakvalues)/float(len(allbreakvalues)) for allbreakvalues in itertools.izip(*allrandomsamples)]
else:
jenksbreaks = getJenksBreaks(sortedvalues, nrclasses)
breaksgen = (each for each in jenksbreaks[1:])
classmin = lowerbound
classes = []
for index, classsymbol in enumerate(symbolrange):
classmax = next(breaksgen)
#determine min and max value
minvalue = classmin
maxvalue = classmax
#create and add class
classes.append( _SymbolClass(classmin, classmax, minvalue, maxvalue, index, classsymbol) )
#prep for next
classmin = classmax
classification["classes"] = classes
def _CheckOptions(customoptions):
#types
customoptions = customoptions.copy()
#paramaters
if customoptions.get("fillcolor", "not specified") == "not specified":
customoptions["fillcolor"] = Color("random")
if not customoptions.get("fillsize"):
customoptions["fillsize"] = 0.4
if not customoptions.get("fillwidth"):
customoptions["fillwidth"] = 1.2
if not customoptions.get("fillheight"):
customoptions["fillheight"] = 0.8
if customoptions.get("outlinecolor", "not specified") == "not specified":
customoptions["outlinecolor"] = Color("black")
if not customoptions.get("outlinewidth"):
customoptions["outlinewidth"] = 0.09 #percent of map
return customoptions
def _CheckTextOptions(customoptions):
customoptions = customoptions.copy()
#text and font
if not customoptions.get("textfont"):
customoptions["textfont"] = "default"
if not customoptions.get("textsize"):
customoptions["textsize"] = MAPWIDTH*0.0055 #equivalent to textsize 7
else:
#input is percent textheight of MAPWIDTH
percentheight = customoptions["textsize"]
#so first get pixel height
pixelheight = MAPWIDTH*percentheight
#to get textsize
textsize = int(round(pixelheight*0.86))
customoptions["textsize"] = textsize
if not customoptions.get("textcolor"):
customoptions["textcolor"] = Color("black")
if not customoptions.get("textopacity"):
customoptions["textopacity"] = 255
if not customoptions.get("texteffect"):
customoptions["texteffect"] = None
if not customoptions.get("textanchor"):
customoptions["textanchor"] = "center"
#text background box
if not customoptions.get("textboxfillcolor"):
customoptions["textboxfillcolor"] = None
else:
if customoptions.get("textboxoutlinecolor","not specified") == "not specified":
customoptions["textboxoutlinecolor"] = Color("black")
if not customoptions.get("textboxfillsize"):
customoptions["textboxfillsize"] = 1.1 #proportion size of text bounding box
if not customoptions.get("textboxoutlinecolor"):
customoptions["textboxoutlinecolor"] = None
if not customoptions.get("textboxoutlinewidth"):
customoptions["textboxoutlinewidth"] = 1.0 #percent of fill, not of map
if not customoptions.get("textboxopacity"):
customoptions["textboxopacity"] = 0 #both fill and outline
return customoptions
def _ScreenToWorldCoords(xy):
"""
Internal use only.
Converts a screen pixel coordinate to world coordinate, takes only a single pixel point
"""
x,y = xy
relx = x/float(MAPWIDTH)
rely = y/float(MAPHEIGHT)
worldxy = (XMIN+relx*XWIDTH, YMIN+(1-rely)*YHEIGHT)
return worldxy
#QUICK TASKS
def ViewShapefile(shapefilepath, **customoptions):
"""
Quick task to visualize a shapefile and show it in a Tkinter window.
| __option__ | __description__
| --- | ---
| shapefilepath | the path string of the shapefile.
| **customoptions | any series of named arguments of how to style the shapefile visualization (optional). Valid arguments are: fillcolor, fillsize (determines the circle size for point shapefiles, line width for line shapefiles, and has no effect for polygon shapefiles), outlinecolor, outlinewidth.
"""
customoptions = _CheckOptions(customoptions)
renderer = _Renderer()
renderer.ViewShapefile(shapefilepath, customoptions)
def SaveShapefileImage(shapefilepath, savepath, **customoptions):
"""
Quick task to save a shapefile to an image.
| __option__ | __description__
| --- | ---
| shapefilepath | the path string of the shapefile.
| savepath | the path string of where to save the image, including the image type extension.
| **customoptions | any series of named arguments of how to style the shapefile visualization (optional). Valid arguments are: fillcolor, fillsize (determines the circle size for point shapefiles, line width for line shapefiles, and has no effect for polygon shapefiles), outlinecolor, outlinewidth.
"""
customoptions = _CheckOptions(customoptions)
renderer = _Renderer()
renderer.SaveShapefileImage(shapefilepath, savepath, customoptions)
#MAP BUILDING
class NewMap:
"""
Creates and returns a new map based on previously defined mapsettings.
*Takes no arguments*
"""
def __init__(self):
self.renderer = _Renderer()
def AddShape(self, shapeobj, **customoptions):
"""
This adds an individual shape instead of an entire file.
| __option__ | __description__
| --- | ---
| shapeobj | a shape instance, currently it only works with the PyShpShape instances that are returned when looping through the geovis Shapefile instance
| **customoptions | any number of named arguments to style the shape
"""
customoptions = _CheckOptions(customoptions)
if hasattr(shapeobj, "__geo_interface__"):
shapeobj = _GeojShape(shapeobj.__geo_interface__)
self.renderer._RenderShape(shapeobj, customoptions)
def AddToMap(self, layer):
"""
Add and render a layer instance to the map.
| __option__ | __description__
| --- | ---
| layer | the layer instance that you wish to add to the map
"""
if layer.classifier:
self._AutoClassifyShapefile(layer)
else:
self.renderer._RenderLayer(layer)
def AddLegend(self, layer, upperleft, bottomright, legendtitle="not specified", boxcolor=Color("gray",brightness=0.8), boxoutlinecolor=Color("black"), boxoutlinewidth=0.08):
"""
Draws a basic legend for a given layer.
| __option__ | __description__
| --- | ---
| layer | the layer instance whose legend you wish to add to the map
| upperleft | the upperleft corner of the legend as a list or tuple of the relative x and y position, each a float between 0-1
| bottomright | the bottomright corner of the legend as a list or tuple of the relative x and y position, each a float between 0-1
| legendtitle | the title of the legend as a string, by default uses the filename of the underlying shapefile
| boxcolor | the hex color of the rectangle box that contains the legend, set to None to not render the box, default is a lightgray.
| boxoutlinecolor | the hex color of the outline of the rectangle box that contains the legend, set to None to not render the outline, default is black.
| boxoutlinewidth | the thickness of the boxoutline color relative to the box size, so 0.10 is 10 percent of the box size
"""
classifier = layer.classifier
#first set positions
relx1,rely1 = upperleft
relx2,rely2 = bottomright
relxcenter = sum([relx1,relx2])/2.0
relycenter = sum([rely1,rely2])/2.0
xpad = 0.1
ypad = 0.2
legendwidth = (relx2-relx1)#*(1-xpad)
legendheight = (rely2-rely1)#*(1-ypad)
#multiple classifications are placed side by side
allclassifications = classifier.GetClassifications()
relxincr = legendwidth/float(len(allclassifications))
#draw legendbox and title if any
if boxcolor or boxoutlinecolor:
boxoptions = dict([("fillcolor",boxcolor),
("outlinecolor",boxoutlinecolor),
("outlinewidth",boxoutlinewidth)])
boxoptions = _CheckOptions(boxoptions)
self.DrawRectangle(upperleft, bottomright, **boxoptions)
if legendtitle:
if legendtitle == "not specified":
legendtitle = classifier.name
titleoptions = dict([("textsize",0.023),
("textboxfillcolor",Color("white")),
("textboxfillsize",1.2),
("textanchor","s")])
print("") #to fix weird tk text positioning error
self.AddText(relxcenter, rely1, legendtitle, **titleoptions)
#then...
symbolizer = classifier.symbolizer
relx = relx1+relxincr/2.0
for classification in allclassifications:
classes = classification.get("classes")
symboltype = classification.get("symboltype")
#classes are placed under each other
relyincr = legendheight/float(len(classes)+2)
#place symbol fieldname source
rely = rely1+(relyincr/2.0)
self.AddText(relx, rely, text=classification.get("valuefield"), textsize=0.0127, textboxfillcolor=None, textboxoutlinecolor=None)
#place symboltype text
rely += relyincr/2.0
self.AddText(relx, rely, text="(%s)"%symboltype, textsize=0.0127, textboxfillcolor=None, textboxoutlinecolor=None)
rely += relyincr/2.0
tempwidth = relxincr
xtox = (relx-tempwidth/2.0,relx+tempwidth/2.0)
ytoy = (rely,rely+relyincr)
partitions = guihelper.PartitionSpace(xtox,ytoy,partitions=2,padx=0.02,pady=0,direction="horizontal")
leftpart,rightpart = partitions
#place each class symbol and label text
for eachclass in reversed(classes):
symbol = eachclass.classsymbol
if symboltype == "fillcolor":
tempwidth = (relx+relxincr-relx)
classupperleft = (leftpart.w[0],rely)
classbottomright = (leftpart.e[0],rely+relyincr)
self.DrawRectangle(classupperleft, classbottomright, fillcolor=symbol)
#place label text
if eachclass.min != eachclass.max:
textlabel = "%s - %s" %(eachclass.min,eachclass.max)
else:
textlabel = "%s" %eachclass.min
self.AddText(rightpart.center[0],rely+relyincr/2.0,text=textlabel, textsize=0.0111)
elif symboltype == "fillsize":
if symbolizer:
if symbolizer == "circle":
tempoptions = _CheckOptions(dict(fillsize=symbol, fillcolor=None, symbolizer=symbolizer))
symbolheight = self.renderer._RelSizesToPixels(tempoptions)["fillsize"]/float(MAPHEIGHT)
temprelx,temprely = (leftpart.center[0], rely+relyincr*len(classes)-symbolheight/2.0)
xy = [[_ScreenToWorldCoords((temprelx*MAPWIDTH, temprely*MAPHEIGHT))]]
shape = _PyShpShape(shapefile=None, fieldnames=None, uniqid=None, coords=xy, shapetype="point")
self.renderer._RenderShape(shape, tempoptions)
rely -= relyincr
#place label text
if eachclass.min != eachclass.max:
textlabel = "%s - %s" %(eachclass.min,eachclass.max)
else:
textlabel = "%s" %eachclass.min
self.AddText(rightpart.center[0], rely+relyincr*(len(classes)+1)-symbolheight, text=textlabel, textsize=0.0111)
elif symbolizer == "square":
tempoptions = _CheckOptions(dict(fillsize=symbol, fillcolor=None, symbolizer=symbolizer))
symbolheight = self.renderer._RelSizesToPixels(tempoptions)["fillsize"]/float(MAPHEIGHT)
temprelx,temprely = (leftpart.center[0], rely+relyincr*len(classes)-symbolheight/2.0)
xy = [[_ScreenToWorldCoords((temprelx*MAPWIDTH, temprely*MAPHEIGHT))]]
shape = _PyShpShape(shapefile=None, fieldnames=None, uniqid=None, coords=xy, shapetype="point")
self.renderer._RenderShape(shape, tempoptions)
rely -= relyincr
#place label text
if eachclass.min != eachclass.max:
textlabel = "%s - %s" %(eachclass.min,eachclass.max)
else:
textlabel = "%s" %eachclass.min
self.AddText(rightpart.center[0], rely+relyincr*(len(classes)+1)-symbolheight, text=textlabel, textsize=0.0111)
elif symbolizer == "pyramid":
tempoptions = _CheckOptions(dict(fillsize=symbol, fillcolor=None, symbolizer=symbolizer))
symbolheight = self.renderer._RelSizesToPixels(tempoptions)["fillsize"]/float(MAPHEIGHT)
temprelx,temprely = (leftpart.center[0], rely+relyincr*len(classes))
xy = [[_ScreenToWorldCoords((temprelx*MAPWIDTH, temprely*MAPHEIGHT))]]
shape = _PyShpShape(shapefile=None, fieldnames=None, uniqid=None, coords=xy, shapetype="point")
self.renderer._RenderShape(shape, tempoptions)
rely -= relyincr
#place label text
if eachclass.min != eachclass.max:
textlabel = "%s - %s" %(eachclass.min,eachclass.max)
else:
textlabel = "%s" %eachclass.min
self.AddText(rightpart.center[0], rely+relyincr*(len(classes)+1)-symbolheight, text=textlabel, textsize=0.0111)
else:
tempoptions = _CheckOptions(dict(fillsize=symbol, fillcolor=None, symbolizer=symbolizer))
symbolheight = self.renderer._RelSizesToPixels(tempoptions)["fillsize"]/float(MAPHEIGHT)
temprelx,temprely = (leftpart.center[0], rely+relyincr*len(classes)-symbolheight/2.0)
xy = [[_ScreenToWorldCoords((temprelx*MAPWIDTH, temprely*MAPHEIGHT))]]
shape = _PyShpShape(shapefile=None, fieldnames=None, uniqid=None, coords=xy, shapetype="point")
self.renderer._RenderShape(shape, tempoptions)
rely -= relyincr
#place label text
if eachclass.min != eachclass.max:
textlabel = "%s - %s" %(eachclass.min,eachclass.max)
else:
textlabel = "%s" %eachclass.min
self.AddText(rightpart.center[0], rely+relyincr*(len(classes)+1)-symbolheight, text=textlabel, textsize=0.0111)
elif symboltype == "outlinecolor":
tempwidth = (relx+relxincr-relx)/3.0
startpos, endpos = ((leftpart.w[0],rely+relyincr/2.0),(leftpart.e[0],rely+relyincr/2.0))
self.DrawLine(startpos, endpos, fillcolor=symbol)
#place label text
if eachclass.min != eachclass.max:
textlabel = "%s - %s" %(eachclass.min,eachclass.max)
else:
textlabel = "%s" %eachclass.min
self.AddText(rightpart.center[0],rely+relyincr/2.0,text=textlabel, textsize=0.0111)
elif symboltype == "outlinewidth":
tempwidth = (relx+relxincr-relx)/3.0
startpos, endpos = ((leftpart.w[0],rely+relyincr/2.0),(leftpart.e[0],rely+relyincr/2.0))
self.DrawLine(startpos, endpos, fillcolor=Color("black"), fillsize=symbol)
#place label text
if eachclass.min != eachclass.max:
textlabel = "%s - %s" %(eachclass.min,eachclass.max)
else:
textlabel = "%s" %eachclass.min
self.AddText(rightpart.center[0],rely+relyincr/2.0,text=textlabel, textsize=0.0111)
rely += relyincr
relx += relxincr
def AddText(self, relx, rely, text, **textoptions):
"""
Writes text on the map.
| __option__ | __description__
| --- | ---
| relx | the relative x position of the text's centerpoint, a float between 0-1
| rely | the relative y position of the text's centerpoint, a float between 0-1
| text | the text to add to the map, as a string
| **customoptions | any number of named arguments to style the text
"""
textoptions = _CheckTextOptions(textoptions)
self.renderer._RenderText(relx, rely, text, textoptions)
def DrawRectangle(self, upperleft, bottomright, **customoptions):
"""
Draws a rectangle on the map.
| __option__ | __description__
| --- | ---
| upperleft | the upperleft corner of the rectangle as a list or tuple of the relative x and y position, each a float between 0-1
| bottomright | the bottomright corner of the rectangle as a list or tuple of the relative x and y position, each a float between 0-1
| **customoptions | any number of named arguments to style the rectangle
"""
customoptions = _CheckOptions(customoptions)
self.renderer._RenderRectangle(upperleft, bottomright, customoptions)
def DrawCircle(self, relx, rely, fillsize, **customoptions):
"""
Draws a circle on the map.
| __option__ | __description__
| --- | ---
| relx | the relative x position of the circle's centerpoint, a float between 0-1
| rely | the relative y position of the circle's centerpoint, a float between 0-1
| **customoptions | any number of named arguments to style the line
"""
customoptions = _CheckOptions(customoptions)
self.renderer._RenderCircle(relx, rely, fillsize, customoptions)
def DrawLine(self, startpos, stoppos, **customoptions):
"""
Draws a line on the map.
| __option__ | __description__
| --- | ---
| startpos | a list or tuple of the relative x and y position where the line should start, each a float between 0-1
| stoppos | a list or tuple of the relative x and y position where the line should end, each a float between 0-1
| **customoptions | any number of named arguments to style the line
"""
customoptions = _CheckOptions(customoptions)
self.renderer._RenderLine(startpos, stoppos, customoptions)
def ViewMap(self):
"""
View the created map embedded in a Tkinter window. Map image can be panned, but not zoomed. Offers a 'save image' button to allow to interactively save the image.
*Takes no arguments*
"""
self.renderer._ViewRenderedShapefile()
def SaveMap(self, savepath):
"""
Save the map to an image file.
| __option__ | __description__
| --- | ---
| savepath | the string path for where you wish to save the map image. Image type extension must be specified ('.png','.gif',...)
"""
self.renderer._SaveRenderedShapefile(savepath)
def SaveTimeSequence(self, timelayers, savefolder, starttime, endtime, timeinterval):
"""
Cycles through time at specified intervals, and saves map images displaying the shape features that match each of the given time periods.
Note: In order for shapes to vary over time, their shapefiles must have been time-defined with the AssignTime method. No constant layers are currently allowed.
Still experimental: Currently creates new map for each time period (so previous drawings or text won't show), and does not allow for constant layers...
| __option__ | __description__
| --- | ---
| timelayers | a list of time-enabled layer instances to draw across time. Layers can be time-enabled by using their AssignTime method.
| savefolder | the string path for where you wish to save all the map images. Images are automatically saved as .png.
| starttime | a datetime.datetime instance of where to start the time sequence
| endtime | a datetime.datetime instance of where to end the time sequence, the timepoint itself will not be included
| intervaltime | a datetime.timedelta instance of how much time to increase for each time frame in the sequence, ie how large of a timeperiod each map will represent
"""
#first create folder if not exists
if not os.path.lexists(savefolder):
os.makedirs(savefolder)
#then loop through time
global curtimemin,curtimemax
curtimemin = starttime
curtimemax = starttime + timeinterval
while curtimemin < endtime:
newmap = NewMap()
for layer in timelayers:
shapefile = layer.fileobj
shapefile.SelectByQuery("shape.GetTime() >= curtimemin and shape.GetTime() < curtimemax")
newmap.AddToMap(layer)
shapefile.ClearSelection()
newmap.SaveMap(savefolder+"/"+str(curtimemin).replace("-","_").replace(":","_")+".png")
curtimemin += timeinterval
curtimemax += timeinterval
###INTERNAL USE ONLY
def _AutoClassifyShapefile(self, layer):
shapefilepath = layer.filepath
classifier = layer.classifier
options = layer.customoptions
####CLASSIFY ONE SHAPEFILE OPTION
allclassifications = classifier.allclassifications
#create shapefile
shapefile = layer.fileobj
classifier.name = shapefile.filename
classifier.symbolizer = options.get("symbolizer")
#exclude values if specified
excludequery = options.get("excludequery")
if excludequery:
shapefile.SelectByQuery(excludequery, inverted=True)
#classify values into symbols
shapefile.progresstext = "classifying"
#first populate values from classification fields
for eachshape in shapefile:
row = dict(zip(shapefile.fieldnames, eachshape.GetAttributes()))
for classification in allclassifications:
field_to_classify = classification["valuefield"]
attributevalue = row[field_to_classify]
classifier.AddValue(eachshape.id, classification["symboltype"], attributevalue)
#then calculate classes
for classification in allclassifications:
classifier.CalculateClasses(classification)
#then send classifier to renderer so can remember its layer and legend properties
self.renderer._AddLayerInfo(shapefile.filename, classifier)
####RENDER THAT CLASSIFIED SHAPEFILE
#loop sorted/classified ids and get and render each
shapefile.progresstext = "rendering shapes"
for shape in shapefile:
classificationsuccess = False
#populate a custom options dict based on classifications
for classification in allclassifications:
symboltype = classification["symboltype"]
#retrieve class color for each shape id
symbol = classifier.GetSymbol(shape.id, symboltype)
if symbol:
options[symboltype] = symbol
classificationsuccess = True #as long as at least one classification was successful it will display it later, but only those shapes that were not excluded will be given symbols based on classification algorithm (the rest will only use default)
#render only if at least one of the options were successful
if classificationsuccess:
self.renderer._RenderShape(shape, options)
####OR RENDER IN SORTED ORDER (NEED TO ADD CUSTOM SHAPE RENDERING ORDER AND SPECIAL SHAPE CACHING TO FILE BEFORE USING THIS BC GRABBING ONE SHAPE AT A TIME IS CURRENTLY QUITE SLOW)
"finish this later..."
## #loop sorted/classified ids and get and render each
## for uniqid, value in messages.ProgressReport(classifier.GetValues(), text="rendering shapes"):
## #populate a custom options dict based on classifications
## for classification in allclassifications:
## symboltype = classification["symboltype"]
## #retrieve class color for each shape id
## symbol = classifier.GetSymbol(uniqid, symboltype)
## if symbol:
## options[symboltype] = symbol
## #render only if at least one of the options were successful
## if options:
## shape = shapefile.GetShape(uniqid)
## self.renderer._RenderShape(shape, options)
####FINALLY ADD LABELS FOR THAT SHAPEFILE
"need more work here........."
## #loop through shapefile
## shapefile.progresstext = "adding labels"
## for eachshape in shapefile:
## #populate a custom options dict based on classifications
## for classification in allclassifications:
## symboltype = classification["symboltype"]
## #retrieve class color for each shape id
## symbol = classifier.GetSymbol(eachshape.id, symboltype)
## if symbol:
## options[symboltype] = symbol
## #render only if at least one of the options were successful
## if options:
## #LABEL TEXT
## textoptions = _CheckTextOptions(dict([("textsize",20)]))
## x,y = eachshape._MapCoords([eachshape.GetCenter()])
## relx, rely = (x/float(MAPWIDTH), y/float(MAPHEIGHT))
## self.AddText(relx, rely, str(eachshape.id), **textoptions)
#MAP SPECS
def SetMapDimensions(width, height):
"""
Sets the width and height of the next map image. At startup the width and height are set to the dimensions of the window screen.
| __option__ | __description__
| --- | ---
| width | the pixel width of the final map image to be rendered, an integer.
| height | the pixel height of the final map image to be rendered, an integer.
"""
global MAPWIDTH, MAPHEIGHT
MAPWIDTH = width
MAPHEIGHT = height
_UpdateMapDims()
def SetMapBackground(mapbackground):
"""
Sets the mapbackground of the next map to be made. At startup the mapbackground is transparent (None).
| __option__ | __description__
| --- | ---
| mapbackground | takes a hex color string, as can be created with the Color function. It can also be None for a transparent background (default).
"""
global MAPBACKGROUND
MAPBACKGROUND = mapbackground
def SetMapZoom(x2x, y2y):
"""
Zooms the map to the given mapextents.
| __option__ | __description__
| --- | ---
| x2x | a two-item list of the x-extents in longitude format, from the leftmost to the rightmost longitude, default is full extent [-180, 180]
| y2y | a two-item list of the y-extents in latitude format, from the bottommost to the topmost latitude, default is full extent [-90, 90]
"""
global XMIN,XMAX,YMIN,YMAX
XMIN,XMAX = (x2x[0],x2x[1])
YMIN,YMAX = (y2y[0],y2y[1])
global XWIDTH,YHEIGHT,XOFFSET,YOFFSET
inxwidth = XMAX-XMIN
inyheight = YMAX-YMIN
#SUMTIN WEIRD, BOTH ZOOMS NEED TO BE TRUE AND MAYBE CHANGED, NOT JUST ONE, SO MAYBE ALWAYS TIMES UP AND NEVER DIVIDE DOWN
if inxwidth > inyheight*PROJ_XYRATIO:
#automatically override xlimits to be centered middle of given extents, but with a new width thats proportional to the projection widt/height ratio
midx = sum(x2x)/float(len(x2x))
halfxwidth = inyheight/PROJ_XYRATIO/2.0
XMIN,XMAX = (midx-halfxwidth, midx+halfxwidth)
elif inyheight*PROJ_XYRATIO > inxwidth:
#automatically override ylimits to be centered middle of given extents, but with a new height thats proportional to the projection widt/height ratio
midy = sum(y2y)/float(len(y2y))
halfyheight = inxwidth/PROJ_XYRATIO/2.0
YMIN,YMAX = (midy-halfyheight, midy+halfyheight)
nw = (-1*min(x2x),max(y2y))
#cant use old width/height from original input but instead recalculate using the updated X/YMAX/MIN bc they were changed to preserve a certain ratio
XWIDTH = XMAX-XMIN
YHEIGHT = YMAX-YMIN
XOFFSET = nw[0]
YOFFSET = nw[1]
_UpdateMapDims()
### END OF SCRIPT ###
|
word2vec.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec mini-batched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does traditional minibatching.
The key ops used are:
* placeholder for feeding in tensors for each example.
* embedding_lookup for fetching rows from the embedding matrix.
* sigmoid_cross_entropy_with_logits to calculate the loss.
* GradientDescentOptimizer for optimizing the loss.
* skipgram custom op that does input processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
word2vec = tf.load_op_library(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'word2vec_ops.so'))
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "File consisting of analogies of four tokens."
"embedding 2 - embedding 1 + embedding 3 should be close "
"to embedding 4."
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 100,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 16,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval).")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval).")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [vocab_size].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.multiply(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise labels for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(true_logits), logits=true_logits)
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(sampled_logits), logits=sampled_logits)
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram_word2vec(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.summary.scalar("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
return
print("unknown")
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
demo_neptusinterface.py | # Copyright (c) 2018, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
if __name__ == "__main__":
import threading
import fire_rs.neptus_interface as nifc
def give_me_a_plan():
import numpy as np
import fire_rs.uav_planning as op
from fire_rs.firemodel import propagation
from fire_rs.geodata.geo_data import TimedPoint, GeoData
from fire_rs.planning.planning import FireMapper, FlightConf, Planner, PlanningEnvironment, \
UAVConf, Waypoint
from fire_rs.planning.display import TrajectoryDisplayExtension, plot_plan_trajectories
# Geographic environment (elevation, landcover, wind...)
wind = (10., np.pi / 4) # 10m/s = 36km/h
area = ((478500.0, 483500.0), (6210000.0, 6215000.0))
env = PlanningEnvironment(area, wind_speed=wind[0], wind_dir=wind[1],
planning_elevation_mode='flat', flat_altitude=0)
# Fire applied to the previous environment
ignition_point = TimedPoint(area[0][0] + 2500.0, area[1][0] + 2100.0, 0)
fire = propagation.propagate_from_points(env, ignition_point, 120 * 60) # 60 minutes
# Configure some flight
base_wp_1 = Waypoint(area[0][1] - 150., area[1][1] - 100., 0., 0.)
start_t = 60 * 60 # 30 minutes after the ignition
uavconf = UAVConf.x8()
uavconf.max_flight_time = 30 * 60
fgconf_1 = FlightConf(uavconf, start_t, base_wp_1)
# Write down the desired VNS configuration
conf_vns = {
"full": {
"max_restarts": 5,
"max_time": 10.0,
"neighborhoods": [
{"name": "dubins-opt",
"max_trials": 100,
"generators": [
{"name": "MeanOrientationChangeGenerator"},
{"name": "RandomOrientationChangeGenerator"},
{"name": "FlipOrientationChangeGenerator"}]},
{"name": "one-insert",
"max_trials": 100,
"select_arbitrary_trajectory": True,
"select_arbitrary_position": False},
]
}
}
conf = {
'min_time': fgconf_1.start_time,
'max_time': fgconf_1.start_time + fgconf_1.uav.max_flight_time,
'save_every': 1,
'save_improvements': True,
'discrete_elevation_interval': 0,
'vns': conf_vns['full']
}
conf['vns']['configuration_name'] = 'full'
####################################
# 1st PLAN
fire1 = fire.ignitions()
pl = Planner(env, fire1, [fgconf_1], conf)
pl.compute_plan()
return pl.search_result.final_plan()
f1 = lambda x: print(x)
f2 = lambda x: None
imccomm = nifc.IMCComm()
gcs = None
def gcs_run():
global gcs
gcs = nifc.GCS(imccomm, f1, f2)
t_imc = threading.Thread(target=imccomm.run, daemon=True)
t_gcs = threading.Thread(target=gcs_run, daemon=True)
t_imc.run()
t_gcs.run()
print("IMCComm esta funcionando")
a_plan = give_me_a_plan()
k = nifc.GCSCommandOutcome.Unknown
retries = 2
while retries > 0 and k != nifc.GCSCommandOutcome.Success:
k = gcs.load(a_plan, 0, a_plan.name(), "x8-02")
if k != nifc.GCSCommandOutcome.Success:
print("Load plan failed")
print("Retrying")
retries -= 1
else:
print("Plan loaded")
k = gcs.start("saop_" + a_plan.name(), "x8-02")
if k != nifc.GCSCommandOutcome.Success:
print("Start plan failed")
else:
print("Plan started")
while True:
lll = input("e for exit")
if lll == "e":
break
|
radio_session.py | import time
import datetime
from datetime import datetime
import serial
import threading
import json
import traceback
import queue
import requests
import subprocess
import glob
import os
import pty
from multiprocessing import Process, Queue
from elasticsearch import Elasticsearch
from .data_consumers import Datastore, Logger
from .http_cmd import create_radio_session_endpoint
from tlm.oauth2 import *
from .uplink_timer import UplinkTimer
class RadioSession(object):
'''
Represents a connection session with a Flight Computer's Quake radio.
This class is used by the simulation software and user command prompt to read and write to a
flight computer's Quake radio. The connection is fundamentally stateless, since the state of the
connection (connected or disconnected) is managed by Iridium.
TODO this class needs to be thread-safe. Protect uplinks via a lock, and protect data sharing
between the check_for_downlink and the read_state functions.
'''
def __init__(self, device_name, imei, uplink_console, port, send_queue_duration,
send_lockout_duration, simulation_run_dir, tlm_config):
'''
Initializes state session with the Quake radio.
'''
# Device connection
self.device_name = device_name
self.imei=imei
self.port=port
# Uplink timer
self.timer = UplinkTimer(send_queue_duration, self.send_uplink)
# Radio session and the http endpoints communicate information about the state of the timer
# by passing messages over the queue.
q = Queue()
self.check_queue_msgs = True
self.check_queue_thread = threading.Thread(target=self.check_queue, args=(q,), name="Uplink queue check thread")
self.check_queue_thread.start()
# HTTP Endpoints
self.flask_app=create_radio_session_endpoint(self, q)
self.flask_app.config["uplink_console"] = uplink_console
self.flask_app.config["imei"] = imei
try:
self.http_thread = Process(name=f"{self.device_name} HTTP Command Endpoint", target=self.flask_app.run, kwargs={"port": str(self.port)})
self.http_thread.start()
print(f"{self.device_name} HTTP command endpoint is running at http://localhost:{self.port}")
except:
print(f"Unable to start {self.device_name} HTTP command endpoint at http://localhost:{self.port}")
traceback.print_exc()
self.send_queue_duration = send_queue_duration
self.send_lockout_duration = send_lockout_duration
if send_lockout_duration > send_queue_duration:
# TODO shift this logic down into write_state.
print("Error: send_lockout_duration is greater than send_queue_duration.")
# Uplink console
self.uplink_console = uplink_console
#Flask server connection
self.flask_server=tlm_config["webservice"]["server"]
self.flask_port=tlm_config["webservice"]["port"]
#email
self.username=tlm_config["email_username"]
self.password=tlm_config["email_password"]
def check_queue(self, queue):
'''
Continuously reads and carries out requests
from the HTTP endpoints.
'''
while self.check_queue_msgs:
msg = queue.get()
if msg == "time":
time_left = self.timer.time_left()
queue.put(str(time_left))
elif msg == "pause":
if not self.timer.is_alive():
queue.put("Timer not running")
elif self.timer.run_time() < self.send_queue_duration-self.send_lockout_duration:
if self.timer.pause():
queue.put("Paused timer")
else:
queue.put("Unable to pause timer")
else:
queue.put("Unable to pause timer")
elif msg == "resume":
if self.timer.is_alive():
queue.put("Timer already running")
elif self.timer.resume():
queue.put("Resumed timer")
else:
queue.put("Unable to resume timer")
elif msg == "view":
if not os.path.exists("uplink.json"):
queue.put("No queued uplink")
else:
with open('uplink.json', 'r') as telem_file:
queued_uplink = json.load(telem_file)
queue.put(queued_uplink)
def read_state(self, field, timeout=None):
'''
Read state by posting a request for data to the Flask server
'''
headers = {
'Accept': 'text/html',
}
payload = {
"index" : "statefield_report_"+str(self.imei),
"field" : str(field)
}
response = requests.get('http://'+self.flask_server+':'+str(self.flask_port)+'/search-es', params=payload, headers=headers)
return response.text
def write_multiple_states(self, fields, vals, timeout=None):
'''
Uplink multiple state variables. Return success of write.
Reads from the most recent Iridium Report whether or
not RadioSession is able to send uplinks
'''
assert len(fields) == len(vals)
if self.uplink_queued():
return False
# Create dictionary object with new fields and vals
updated_fields={}
for i in range(len(fields)):
updated_fields[fields[i]]=self.uplink_console.get_val(vals[i])
# Create a JSON file to hold the uplink
with open('uplink.json', 'w') as telem_file:
json.dump(updated_fields, telem_file)
# Start the timer. Timer will send uplink once after waiting for the
# configured send queue duration.
t = threading.Thread(target=self.timer.start, name="Uplink timer thread")
t.start()
return True
def write_state(self, field, val, timeout=None):
'''
Uplink one state variable. Return success of write.
'''
return self.write_multiple_states([field], [val], timeout)
def uplink_queued(self):
'''
Check if an uplink is currently queued to be sent by Iridium
(i.e. if the most recently sent uplink was confirmed to be
received by the spacecraft). Can be used by ptest to determine
whether or not to send an uplink autonomously.
'''
headers = {
'Accept': 'text/html',
}
payload = {
"index" : "iridium_report_"+str(self.imei),
"field" : "send-uplinks"
}
tlm_service_active = self.flask_server != ""
if tlm_service_active:
response = requests.get(
'http://'+self.flask_server+':'+str(self.flask_port)+'/search-es',
params=payload, headers=headers)
if tlm_service_active and response.text.lower()=="true":
return False
return True
def send_uplink(self):
if not os.path.exists("uplink.json"):
return False
# Extract the json telemetry data from the queued uplink json file
with open("uplink.json", 'r') as uplink:
queued_uplink = json.load(uplink)
# Get an updated list of the field and values
fields, vals = queued_uplink.keys(), queued_uplink.values()
# Create an uplink packet
success = self.uplink_console.create_uplink(fields, vals, "uplink.sbd") and os.path.exists("uplink.sbd")
if success:
# Send the uplink to Iridium
to = "data@sbd.iridium.com"
sender = "pan.ssds.qlocate@gmail.com"
subject = self.imei
msgHtml = ""
msgPlain = ""
SendMessage(sender, to, subject, msgHtml, msgPlain, 'uplink.sbd')
# Remove uplink files/cleanup
os.remove("uplink.sbd")
os.remove("uplink.json")
return True
else:
os.remove("uplink.json")
return False
def disconnect(self):
'''Quits the Quake connection, and stores message log and field telemetry to file.'''
print(
f' - Terminating console connection to and saving logging/telemetry data for radio connection to {self.device_name}.'
)
self.check_queue_msgs = False
self.check_queue_thread.join()
self.http_thread.terminate()
self.http_thread.join()
|
main_vec.py | import sys
import json
# local
# sys.path.append('/Users/zixianma/Desktop/Sophomore/Summer/CURIS/PIC/multiagent-particle-envs')
# server
sys.path.append('/sailhome/zixianma/PIC/multiagent-particle-envs')
import argparse
import math
from collections import namedtuple
from itertools import count
import numpy as np
from eval import eval_model_q
import copy
import torch
from ddpg_vec import DDPG
from ddpg_vec_hetero import DDPGH
import random
from replay_memory import ReplayMemory, Transition
from utils import *
import os
import time
from utils import n_actions, copy_actor_policy
from ddpg_vec import hard_update
import torch.multiprocessing as mp
from multiprocessing import Queue
from multiprocessing.sharedctypes import Value
import sys
from torch.utils.tensorboard import SummaryWriter
parser = argparse.ArgumentParser(description='PyTorch REINFORCE example')
parser.add_argument('--scenario', required=True,
help='name of the environment to run')
parser.add_argument('--gamma', type=float, default=0.95, metavar='G',
help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.01, metavar='G',
help='discount factor for model (default: 0.001)')
parser.add_argument('--ou_noise', type=bool, default=True)
parser.add_argument('--param_noise', type=bool, default=False)
parser.add_argument('--train_noise', default=False, action='store_true')
parser.add_argument('--noise_scale', type=float, default=0.3, metavar='G',
help='initial noise scale (default: 0.3)')
parser.add_argument('--final_noise_scale', type=float, default=0.3, metavar='G',
help='final noise scale (default: 0.3)')
parser.add_argument('--exploration_end', type=int, default=60000, metavar='N',
help='number of episodes with noise (default: 100)')
parser.add_argument('--seed', type=int, default=9, metavar='N',
help='random seed (default: 4)')
parser.add_argument('--batch_size', type=int, default=1024, metavar='N',
help='batch size (default: 128)')
parser.add_argument('--num_steps', type=int, default=25, metavar='N',
help='max episode length (default: 1000)')
parser.add_argument('--num_episodes', type=int, default=100000, metavar='N',
help='number of episodes (default: 1000)')
parser.add_argument('--hidden_size', type=int, default=128, metavar='N',
help='number of episodes (default: 128)')
parser.add_argument('--updates_per_step', type=int, default=8, metavar='N',
help='model updates per simulator step (default: 5)')
parser.add_argument('--critic_updates_per_step', type=int, default=8, metavar='N',
help='model updates per simulator step (default: 5)')
parser.add_argument('--replay_size', type=int, default=1000000, metavar='N',
help='size of replay buffer (default: 1000000)')
parser.add_argument('--actor_lr', type=float, default=1e-4,
help='(default: 1e-4)')
parser.add_argument('--critic_lr', type=float, default=1e-3,
help='(default: 1e-3)')
parser.add_argument('--fixed_lr', default=False, action='store_true')
parser.add_argument('--num_eval_runs', type=int, default=1000, help='number of runs per evaluation (default: 5)')
parser.add_argument("--exp_name", type=str, help="name of the experiment")
# local
# parser.add_argument("--save_dir", type=str, default="./ckpt_plot",
# help="directory in which training state and model should be saved")
# server
parser.add_argument("--save_dir", type=str, default="/scr/zixianma/pic",
help="directory in which training state and model should be saved")
parser.add_argument('--static_env', default=False, action='store_true')
parser.add_argument('--critic_type', type=str, default='mlp', help="Supports [mlp, gcn_mean, gcn_max]")
parser.add_argument('--actor_type', type=str, default='mlp', help="Supports [mlp, gcn_max]")
parser.add_argument('--critic_dec_cen', default='cen')
parser.add_argument("--env_agent_ckpt", type=str, default='ckpt_plot/simple_tag_v5_al0a10_4/agents.ckpt')
parser.add_argument('--shuffle', default=None, type=str, help='None|shuffle|sort')
parser.add_argument('--episode_per_update', type=int, default=4, metavar='N',
help='max episode length (default: 1000)')
parser.add_argument('--episode_per_actor_update', type=int, default=4)
parser.add_argument('--episode_per_critic_update', type=int, default=4)
parser.add_argument('--steps_per_actor_update', type=int, default=100)
parser.add_argument('--steps_per_critic_update', type=int, default=100)
#parser.add_argument('--episodes_per_update', type=int, default=4)
parser.add_argument('--target_update_mode', default='soft', help='soft | hard | episodic')
parser.add_argument('--cuda', default=False, action='store_true')
parser.add_argument('--eval_freq', type=int, default=1000)
parser.add_argument('--benchmark', type=bool, default=True)
# alignment policy specific
# parser.add_argument('--extra_rew', type=float, default=0.0)
args = parser.parse_args()
if args.exp_name is None:
args.exp_name = args.scenario + '_' + args.critic_type + '_' + args.target_update_mode + '_hiddensize' \
+ str(args.hidden_size) + '_' + str(args.seed)
print("=================Arguments==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
torch.set_num_threads(1)
device = torch.device("cuda:0" if torch.cuda.is_available() and args.cuda else "cpu")
env = make_env(args.scenario, None, benchmark=args.benchmark)
n_agents = env.n
env.seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
num_adversaries = env.world.num_adversaries if hasattr(env.world, 'num_adversaries') else 0
# extra_rew = args.extra_rew
n_actions = n_actions(env.action_space)
obs_dims = [env.observation_space[i].shape[0] for i in range(n_agents)]
obs_dims.insert(0, 0)
if 'hetero' in args.scenario:
import multiagent.scenarios as scenarios
groups = scenarios.load(args.scenario + ".py").Scenario().group
agent = DDPGH(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, device, groups=groups)
eval_agent = DDPGH(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, 'cpu', groups=groups)
else:
agent = DDPG(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.actor_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, device)
eval_agent = DDPG(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.actor_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, 'cpu')
memory = ReplayMemory(args.replay_size)
feat_dims = []
for i in range(n_agents):
feat_dims.append(env.observation_space[i].shape[0])
# Find main agents index
unique_dims = list(set(feat_dims))
agents0 = [i for i, feat_dim in enumerate(feat_dims) if feat_dim == unique_dims[0]]
if len(unique_dims) > 1:
agents1 = [i for i, feat_dim in enumerate(feat_dims) if feat_dim == unique_dims[1]]
main_agents = agents0 if len(agents0) >= len(agents1) else agents1
else:
main_agents = agents0
rewards = []
benchmarks = []
total_numsteps = 0
updates = 0
exp_save_dir = os.path.join(args.save_dir, args.exp_name)
os.makedirs(exp_save_dir, exist_ok=True)
best_eval_reward, best_good_eval_reward, best_adversary_eval_reward = -1000000000, -1000000000, -1000000000
start_time = time.time()
copy_actor_policy(agent, eval_agent)
torch.save({'agents': eval_agent}, os.path.join(exp_save_dir, 'agents_best.ckpt'))
# log
with open(os.path.join(exp_save_dir, 'args.json'), 'w') as args_file:
json.dump(args.__dict__, args_file)
writer = SummaryWriter(exp_save_dir)
# for mp test
test_q = Queue()
done_training = Value('i', False)
p = mp.Process(target=eval_model_q, args=(test_q, done_training, args))
p.start()
for i_episode in range(args.num_episodes):
obs_n = env.reset()
episode_reward = 0
agents_benchmark = [[] for _ in range(n_agents)]
episode_step = 0
agents_rew = [[] for _ in range(n_agents)]
if 'simple_tag' in args.scenario:
episode_benchmark = [0 for _ in range(2)]
elif 'simple_coop_push' in args.scenario or 'spread' in args.scenario:
episode_benchmark = [0 for _ in range(3)]
while True:
# action_n_1 = [agent.select_action(torch.Tensor([obs]).to(device), action_noise=True, param_noise=False).squeeze().cpu().numpy() for obs in obs_n]
action_n = agent.select_action(torch.Tensor(obs_n).to(device), action_noise=True,
param_noise=False).squeeze().cpu().numpy()
next_obs_n, reward_n, done_n, info = env.step(action_n)
benchmark_n = np.asarray(info['n'])
total_numsteps += 1
episode_step += 1
terminal = (episode_step >= args.num_steps)
action = torch.Tensor(action_n).view(1, -1)
mask = torch.Tensor([[not done for done in done_n]])
next_x = torch.Tensor(np.concatenate(next_obs_n, axis=0)).view(1, -1)
reward = torch.Tensor([reward_n])
x = torch.Tensor(np.concatenate(obs_n, axis=0)).view(1, -1)
memory.push(x, action, mask, next_x, reward)
for i, r in enumerate(reward_n):
agents_rew[i].append(r)
episode_reward += np.sum(reward_n)
if "simple_tag" in args.scenario:
# collisions for adversaries only
episode_benchmark[0] += sum(benchmark_n[:num_adversaries, 0])
# min distance for good agents only
episode_benchmark[1] += sum(benchmark_n[num_adversaries:, 1])
elif 'simple_coop_push' in args.scenario or 'spread' in args.scenario:
for i in range(len(episode_benchmark)):
episode_benchmark[i] += sum(benchmark_n[:, i])
obs_n = next_obs_n
n_update_iter = 5
if len(memory) > args.batch_size:
if total_numsteps % args.steps_per_actor_update == 0:
for _ in range(args.updates_per_step):
# transitions = memory.sample(args.batch_size)
transitions, indice = memory.sample(args.batch_size)
batch = Transition(*zip(*transitions))
# batch = process_fn(batch, memory.memory, indice, extra_rew=extra_rew, num_agents=n_agents, num_adversaries=num_adversaries)
policy_loss = agent.update_actor_parameters(batch, i, args.shuffle)
updates += 1
writer.add_scalar('policy_loss/train', policy_loss, i_episode)
print('episode {}, p loss {}, p_lr {}'.
format(i_episode, policy_loss, agent.actor_lr))
if total_numsteps % args.steps_per_critic_update == 0:
value_losses = []
for _ in range(args.critic_updates_per_step):
# transitions = memory.sample(args.batch_size)
transitions, indice = memory.sample(args.batch_size)
batch = Transition(*zip(*transitions))
# batch = process_fn(batch, memory.memory, indice,extra_rew=extra_rew, num_agents=n_agents,num_adversaries=num_adversaries)
value_losses.append(agent.update_critic_parameters(batch, i, args.shuffle))
updates += 1
value_loss = np.mean(value_losses)
writer.add_scalar('value_loss/train', value_loss, i_episode)
print('episode {}, q loss {}, q_lr {}'.
format(i_episode, value_loss, agent.critic_optim.param_groups[0]['lr']))
if args.target_update_mode == 'episodic':
hard_update(agent.critic_target, agent.critic)
if done_n[0] or terminal:
print('train episode reward:', episode_reward)
for i, b in enumerate(episode_benchmark):
print('train episode benchmark %d: %.2f' % (i, b))
episode_step = 0
break
if not args.fixed_lr:
agent.adjust_lr(i_episode)
writer.add_scalar('reward/train', episode_reward, i_episode)
if "simple_tag" in args.scenario:
writer.add_scalar('collision/train', episode_benchmark[0], i_episode)
writer.add_scalar('dist/train', episode_benchmark[1], i_episode)
elif 'simple_coop_push' in args.scenario:
writer.add_scalar('collision/train', episode_benchmark[0], i_episode)
writer.add_scalar('avg_dist/train', episode_benchmark[1], i_episode)
writer.add_scalar('occupied_target/train', episode_benchmark[2], i_episode)
elif 'spread' in args.scenario:
writer.add_scalar('collision/train', episode_benchmark[0], i_episode)
writer.add_scalar('min_dist/train', episode_benchmark[1], i_episode)
writer.add_scalar('occupied_target/train', episode_benchmark[2], i_episode)
rewards.append(episode_reward)
benchmarks.append(episode_benchmark)
# if (i_episode + 1) % 1000 == 0 or ((i_episode + 1) >= args.num_episodes - 50 and (i_episode + 1) % 4 == 0):
if (i_episode + 1) % args.eval_freq == 0:
tr_log = {'num_adversary': num_adversaries,
'best_good_eval_reward': best_good_eval_reward,
'best_adversary_eval_reward': best_adversary_eval_reward,
'exp_save_dir': exp_save_dir, 'total_numsteps': total_numsteps,
'value_loss': value_loss, 'policy_loss': policy_loss,
'i_episode': i_episode, 'start_time': start_time}
copy_actor_policy(agent, eval_agent)
test_q.put([eval_agent, tr_log])
env.close()
time.sleep(5)
done_training.value = True
|
ProXPN.py | #!/usr/bin/env python2
import os, sys, subprocess, time, datetime
import multiprocessing as mp
CONFIG=''
ACCOUNT=''
CONFDIR='<your/home/directory/>'
AUTHFILE=CONFDIR + 'auth.txt'
COMMAND='sudo /usr/sbin/openvpn --config %s' % (CONFDIR)
LOCKDIR='<your/home/directory/>/.log/'
LOCKFILE=LOCKDIR + 'openvpn.lock'
LINE=(os.system("printf '%*s\n' \"${COLUMNS:-$(tput cols)}\" '' | tr ' ' -"))
r=0
print("It is preferable not to travel with a dead man.\n\t--Henri Michaux")
if not os.path.exists(LOCKDIR):
os.makedirs(LOCKDIR)
if not os.path.exists(CONFDIR):
print("Configuration directory doesn't exist.")
print("Creating '%s' now" % CONFDIR)
os.makedirs(CONFDIR)
print("You'll probably want to populate it from here:")
print("http://proxpn.com/updater/locations.html")
exit(0)
def EXTIP():
IP=subprocess.Popen("curl -s http://ipecho.net/plain", shell=True, stdout=subprocess.PIPE).stdout
CURRENTIP = IP.read()
return CURRENTIP.decode()
def DATED():
DATE=str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
return DATE
def getIP():
retries = 2
while retries > 0:
try:
while os.path.exists(LOCKFILE):
CURSORUP = '\033[F'
ERASELN = '\033[K'
print(CURSORUP + ERASELN + DATED() + ": Current external IP: %s" % (EXTIP()))
time.sleep(5)
except:
print("error getting IP.")
retries = retries - 1
exit(0)
def startVPN():
try:
if (len(sys.argv) != 2):
print 'Australia - 1'
print 'New York - 2'
print 'Seattle - 3'
print 'Frankfurt - 4'
print 'Los Angeles - 5'
print 'Miami - 6'
print 'Amsterdam - 7'
print 'Sweden - 8'
print 'London - 9'
else:
ACCOUNT=sys.argv[1]
if os.path.isfile(LOCKFILE):
L=open(LOCKFILE,'r')
LPID=L.readline()
print ("already running %s" % (LPID))
sys.exit(0)
else:
os.getpid()
L=open(LOCKFILE,'w')
L.write('%s' % (os.getpid()))
L.close()
#print 'RESOLV.CONF:'
#os.system("cat /etc/resolv.conf")
# 'Australia - 1'
if (ACCOUNT == '1'):
CONFIG='udp-au3_udp.ovpn'
# 'New York - 2'
elif (ACCOUNT == '2'):
CONFIG='udp-bny2_udp.ovpn'
# 'Los Angeles - 3'
elif (ACCOUNT == '3'):
CONFIG='udp-la3_udp.ovpn'
# 'Zurich - 4'
elif (ACCOUNT == '4'):
CONFIG='udp-zch1_udp.ovpn'
# 'Toronto - 5'
elif (ACCOUNT == '5'):
CONFIG='udp-tor1_udp.ovpn'
# 'Miami - 6'
elif (ACCOUNT == '6'):
CONFIG='udp-mfl2_udp.ovpn'
# 'Amsterdam - 7'
elif (ACCOUNT == '7'):
CONFIG='udp-ams1_udp.ovpn'
# 'Sweden - 8'
elif (ACCOUNT == '8'):
CONFIG='udp-swe1_udp.ovpn'
# 'London - 9'
elif (ACCOUNT == '9'):
CONFIG='udp-uk1_udp.ovpn'
else:
print 'no such VPN profile exists'
sys.exit(0)
print CONFIG
print ('%s%s' % (COMMAND,CONFIG))
os.system('sudo mv /etc/resolv.conf /etc/resolv.conf.old')
os.system('sudo cp /etc/resolv.conf.DEFAULT /etc/resolv.conf;sudo chmod a+r /etc/resolv.conf')
os.system('%s%s' % (COMMAND,CONFIG))
os.system('sudo mv /etc/resolv.conf.old /etc/resolv.conf')
time.sleep(1)
print 'RESOLV.CONF:'
os.system("cat /etc/resolv.conf")
secs = 5
print ''
os.system("rfkill block 2")
time.sleep(2)
os.system("rm -f %s" % LOCKFILE)
os.system("rfkill unblock 2")
while ( secs > 0 ):
CURSORUP = '\033[F'
ERASELN = '\033[K'
discoball = str(secs)
print(CURSORUP + ERASELN + discoball)
time.sleep(1)
secs=secs - 1
print('Thanks for calling!')
except:
print('OOPS!')
print('Thanks for calling!')
# The "reset" command may be needed to allow the shell to work properly -issues on Kali Linux & other Debian/Ubuntu distros.)
#os.system('reset')
if __name__=='__main__':
if (os.path.exists(LOCKFILE)):
print("removing lockfile: %s" % LOCKFILE)
os.system("rm -f %s" % LOCKFILE)
else:
p1 = mp.Process(target = getIP)
p1.start()
p2 = mp.Process(target = startVPN)
p2.start()
exit(0)
|
worker.py | """
Background job workers
"""
import logging
import traceback
from datetime import timedelta
from multiprocessing import Process
from sched import scheduler
from time import monotonic, sleep
import sentry_sdk
from google.protobuf import empty_pb2
from couchers.db import get_engine, session_scope
from couchers.jobs.definitions import JOBS, SCHEDULE
from couchers.jobs.enqueue import queue_job
from couchers.metrics import jobs_counter
from couchers.models import BackgroundJob, BackgroundJobState
logger = logging.getLogger(__name__)
def process_job():
"""
Attempt to process one job from the job queue. Returns False if no job was found, True if a job was processed,
regardless of failure/success.
"""
logger.debug(f"Looking for a job")
with session_scope(isolation_level="REPEATABLE READ") as session:
# a combination of REPEATABLE READ and SELECT ... FOR UPDATE SKIP LOCKED makes sure that only one transaction
# will modify the job at a time. SKIP UPDATE means that if the job is locked, then we ignore that row, it's
# easier to use SKIP LOCKED vs NOWAIT in the ORM, with NOWAIT you get an ugly exception from deep inside
# psycopg2 that's quite annoying to catch and deal with
job = (
session.query(BackgroundJob).filter(BackgroundJob.ready_for_retry).with_for_update(skip_locked=True).first()
)
if not job:
logger.debug(f"No pending jobs")
return False
# we've got a lock for a job now, it's "pending" until we commit or the lock is gone
logger.info(f"Job #{job.id} grabbed")
job.try_count += 1
message_type, func = JOBS[job.job_type]
try:
ret = func(message_type.FromString(job.payload))
job.state = BackgroundJobState.completed
jobs_counter.labels(job.job_type.name, job.state.name, str(job.try_count), "").inc()
logger.info(f"Job #{job.id} complete on try number {job.try_count}")
except Exception as e:
logger.exception(e)
sentry_sdk.capture_exception(e)
if job.try_count >= job.max_tries:
# if we already tried max_tries times, it's permanently failed
job.state = BackgroundJobState.failed
logger.info(f"Job #{job.id} failed on try number {job.try_count}")
else:
job.state = BackgroundJobState.error
# exponential backoff
job.next_attempt_after += timedelta(seconds=15 * (2 ** job.try_count))
logger.info(f"Job #{job.id} error on try number {job.try_count}, next try at {job.next_attempt_after}")
# add some info for debugging
jobs_counter.labels(job.job_type.name, job.state.name, str(job.try_count), type(e).__name__).inc()
job.failure_info = traceback.format_exc()
# exiting ctx manager commits and releases the row lock
return True
def service_jobs():
"""
Service jobs in an infinite loop
"""
# multiprocessing uses fork() which in turn copies file descriptors, so the engine may have connections in its pool
# that we don't want to reuse. This is the SQLALchemy-recommended way of clearing the connection pool in this thread
get_engine().dispose()
# This line is commented out because it is possible that this code runs twice
# That leads to a crash because 8001 is already in use
# We should fix that problem soon
# start_http_server(8001, registry=jobs_process_registry)
while True:
# if no job was found, sleep for a second, otherwise query for another job straight away
if not process_job():
sleep(1)
def _run_job_and_schedule(sched, schedule_id):
job_type, frequency = SCHEDULE[schedule_id]
logger.info(f"Processing job of type {job_type}")
# wake ourselves up after frequency
sched.enter(
frequency.total_seconds(),
1,
_run_job_and_schedule,
argument=(
sched,
schedule_id,
),
)
# queue the job
queue_job(job_type, empty_pb2.Empty())
def run_scheduler():
"""
Schedules jobs according to schedule in .definitions
"""
# multiprocessing uses fork() which in turn copies file descriptors, so the engine may have connections in its pool
# that we don't want to reuse. This is the SQLALchemy-recommended way of clearing the connection pool in this thread
get_engine().dispose()
sched = scheduler(monotonic, sleep)
for schedule_id, (job_type, frequency) in enumerate(SCHEDULE):
sched.enter(
0,
1,
_run_job_and_schedule,
argument=(
sched,
schedule_id,
),
)
sched.run()
def _run_forever(func):
while True:
try:
logger.critical("Background worker starting")
func()
except Exception as e:
logger.critical("Unhandled exception in background worker", exc_info=e)
# cool off in case we have some programming error to not hammer the database
sleep(60)
def start_jobs_scheduler():
scheduler = Process(target=_run_forever, args=(run_scheduler,))
scheduler.start()
return scheduler
def start_jobs_worker():
worker = Process(target=_run_forever, args=(service_jobs,))
worker.start()
return worker
|
main.py | import re
from flask import Flask, render_template, request, redirect, url_for, session
from flask_mysqldb import MySQL
import MySQLdb.cursors
from prometheus_flask_exporter import PrometheusMetrics
from pager import Pager
import os
import pymongo
import sys
import yaml
import logging
import pickle
from PIL import Image as im
import json
import threading
import time
# Make connection to MongoDB with photo data
with open("api_config.yaml") as yaml_file:
config_dict = yaml.load(yaml_file)["config_dictionary"]
for i in os.listdir('static/images'):
if 'png' in i:
os.system(f"rm static/images/{i}")
db = pymongo.MongoClient(
'mongo1:27017',
username=config_dict['mongo_user'],
password=config_dict['mongo_password'],
authSource=config_dict['mongo_database'],
authMechanism='SCRAM-SHA-256')[config_dict['mongo_database']]
try:
db.list_collections()
except Exception as e:
logging.error(f"Problem with connection to MongoDB\n{e.args}")
sys.exit(2)
collection_photos = db[config_dict['collection_photos']]
collection_labels = db[config_dict['collection_labels']]
user_history = {}
app = Flask(__name__)
metrics = PrometheusMetrics(app)
metrics.info('app_info', 'Application info', version='1.0.3')
# Change this to your secret key (can be anything, it's for extra protection)
app.secret_key = 'your secret key'
# Enter your database connection details below
app.config['MYSQL_HOST'] = 'mysql-db'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = 'toor'
app.config['MYSQL_DB'] = 'users'
# Intialize MySQL
mysql = MySQL(app)
# http://localhost:5000/pythonlogin/ - this will be the login page, we need to use both GET and POST requests
@app.route('/', methods=['GET', 'POST'])
@app.route('/pythonlogin/', methods=['GET', 'POST'])
def login():
# Output message if something goes wrong...
msg = ''
# Check if "username" and "password" POST requests exist (user submitted form)
if request.method == 'POST' and 'username' in request.form and 'password' in request.form:
# Create variables for easy access
username = request.form['username']
password = request.form['password']
# Check if account exists using MySQL
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM accounts WHERE username = %s AND password = %s', (username, password,))
# Fetch one record and return result
account = cursor.fetchone()
# If account exists in accounts table in out database
if account:
# Create session data, we can access this data in other routes
session['loggedin'] = True
session['id'] = account['id']
session['username'] = account['username']
# Redirect to home page
return redirect(url_for('home'))
else:
# Account doesnt exist or username/password incorrect
msg = 'Incorrect username/password!'
# Show the login form with message (if any)
return render_template('index.html', msg=msg)
# http://localhost:5000/python/logout - this will be the logout page
@app.route('/pythonlogin/logout')
def logout():
# Remove session data, this will log the user out
session.pop('loggedin', None)
session.pop('id', None)
session.pop('username', None)
# Redirect to login page
return redirect(url_for('login'))
# http://localhost:5000/pythinlogin/register - this will be the registration page, we need to use both GET and POST
# requests
@app.route('/pythonlogin/register', methods=['GET', 'POST'])
def register():
# Output message if something goes wrong...
msg = ''
# Check if "username", "password" and "email" POST requests exist (user submitted form)
if request.method == 'POST' and 'username' in request.form and 'password' in request.form and 'email' in request.form:
# Create variables for easy access
username = request.form['username']
password = request.form['password']
email = request.form['email']
# Check if account exists using MySQL
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM accounts WHERE username = %s', (username,))
account = cursor.fetchone()
# If account exists show error and validation checks
if account:
msg = 'Account already exists!'
elif not re.match(r'[^@]+@[^@]+\.[^@]+', email):
msg = 'Invalid email address!'
elif not re.match(r'^[A-Za-z0-9]+$', username):
msg = 'Username must contain only characters and numbers!'
elif not username or not password or not email:
msg = 'Please fill out the form!'
else:
# Account doesnt exists and the form data is valid, now insert new account into accounts table
cursor.execute('INSERT INTO accounts VALUES (NULL, %s, %s, %s)', (username, password, email,))
mysql.connection.commit()
msg = 'You have successfully registered!'
elif request.method == 'POST':
# Form is empty... (no POST data)
msg = 'Please fill out the form!'
# Show registration form with message (if any)
return render_template('register.html', msg=msg)
# http://localhost:5000/pythinlogin/home - this will be the home page, only accessible for loggedin users
@app.route('/pythonlogin/home')
def home():
# Check if user is loggedin
if 'loggedin' in session:
# User is loggedin show them the home page
return render_template('home.html', username=session['username'])
# User is not loggedin redirect to login page
return redirect(url_for('login'))
# http://localhost:5000/pythinlogin/profile - this will be the profile page, only accessible for loggedin users
@app.route('/pythonlogin/profile')
def profile():
# Check if user is loggedin
if 'loggedin' in session:
# We need all the account info for the user so we can display it on the profile page
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM accounts WHERE id = %s', (session['id'],))
account = cursor.fetchone()
# Show the profile page with account info
return render_template('profile.html', account=account)
# User is not loggedin redirect to login page
return redirect(url_for('login'))
@app.route('/pythonlogin/find_by_tag/<index>')
def browser(index):
# We need user
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM accounts WHERE id = %s', (session['id'],))
account = cursor.fetchone()
user = account['username']
index = int(index)
#case no pictures found
if not user_history[user]:
return redirect(url_for('home'))
photo_date = user_history[user][index]
#Render a browser with image
return render_template("browser.html", data=photo_date[0], all_info=photo_date[1],
next_pic=f"{(index + 1) % len(user_history[user])}",
previous_pic=f"{(index - 1) % len(user_history[user])}")
# Function for choose only matching elements form photo
def get_info(founded, labels):
#Catched - all object in the photo
catched = founded["labels"]
info = []
for i in catched:
# i[:4] tagi zaczynaja sie od 5 elementu
if set(labels)&set(i[4:]):
info.append(i)
#Returns a list of elements in the photo, that link to the tag
return info
# Function for save matching images
def get_photos(labels, found, user):
text_data = {}
for i in found[:20]:
#Saving photo form database
photo = pickle.loads(collection_photos.find_one({"id": i['id']})['photo'])
photo = im.fromarray(photo)
b, g, r = photo.split()
photo = im.merge("RGB", (r, g, b))
photo.save(f'static/images/{i["id"]}.png')
#We need info about all matching elements in the photo
info = get_info(i, labels)
user_history[user].append([i["id"], info])
# http://localhost/goto - Inizialize the acquisition of photos and redirect to /pythonlogin/find_by_tag/0
@app.route('/goto', methods=['POST', 'GET'])
def goto():
# We need user
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM accounts WHERE id = %s', (session['id'],))
account = cursor.fetchone()
user = account['username']
user_history[user] = []
text = request.form['index']
# Colects all matching information from database
labels = [x.strip().lower() for x in text.split(',')]
big_labels = [x[0].upper()+x[1:] for x in labels]
labels = list(set(labels+big_labels))
found = [*collection_labels.find({'labels': {"$elemMatch": {"$elemMatch": {"$in": labels}}}})]
# Starts the thread which supports saving images
try:
t = threading.Thread(target=get_photos, args=(labels, found, user))
t.start()
except:
logging.error(f"Unsuccessful initialization of downloading photos")
time.sleep(2)
#Redirect to first matching element
return redirect('/pythonlogin/find_by_tag/0')
@app.route('/add_stream', methods=['POST', 'GET'])
def add_stream():
# We need user
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM accounts WHERE id = %s', (session['id'],))
account = cursor.fetchone()
user = account['username']
text = request.form['index'].strip()
print(db["app_settings"].find({"url":text}))
if [*db["app_settings"].find({"url":text})]:
print('not added')
return redirect('/pythonlogin/profile')
record = {"url":text,"user":user,"inserted":int(time.time())}
try:
db["app_settings"].insert_one(record)
logging.info(f"{user} has added url {text} to db")
except:
logging.error(f"Unsuccessful insertion of {text} for user {user}")
return redirect('/pythonlogin/profile')
if __name__ == '__main__':
with open('/etc/hostname', 'r') as f:
hostname = f.read().strip()
app.run(host=hostname, port=80)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.