source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
client.py | import json
import socket
import time
from threading import Thread
import pygame
class Client(object):
def __init__(self, server_path, name):
self.players_props = {}
self.name = name
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.ip, self.port = server_path.split(':')
def _send_to_server(self, data):
self.socket.sendto(data, (self.ip, int(self.port)))
def _handle_client_events(self):
event = pygame.event.wait()
if event.type == pygame.QUIT:
return False
elif event.type == pygame.KEYDOWN:
payload = {'name': self.name}
if event.key == 273:
payload['action'] = 'up'
elif event.key == 274:
payload['action'] = 'down'
elif event.key == 275:
payload['action'] = 'right'
elif event.key == 276:
payload['action'] = 'left'
elif event.key == 32:
payload['action'] = 'deploy_bomb'
if 'action' in payload:
self._send_to_server(json.dumps(payload).encode('utf-8'))
return True
def _bomb_cycle(self, name):
time.sleep(3)
payload = {'name': name, 'action': 'explode_bomb'}
self._send_to_server(json.dumps(payload).encode('utf-8'))
def _update_player(self, decoded_data):
player_name = decoded_data['name']
server_x = decoded_data['x']
server_y = decoded_data['y']
bombs = decoded_data['bombs']
if player_name not in self.players_props:
self.players_props[player_name] = {'bombs': []}
self.players_props[player_name]['x'] = server_x
self.players_props[player_name]['y'] = server_y
if len(bombs) > len(self.players_props[player_name]['bombs']):
self.players_props[player_name]['bombs'] = bombs
bomb_cycle = Thread(target=self._bomb_cycle, args=(player_name, ))
bomb_cycle.start()
elif len(bombs) < len(self.players_props[player_name]['bombs']):
try:
self.players_props[player_name]['bombs'].pop(0)
except IndexError:
pass
def _rcv_from_server(self):
while True:
data, _ = self.socket.recvfrom(4096)
decoded_data = json.loads(data)
print(decoded_data)
self._update_player(decoded_data)
def _draw_map(self, window):
window.fill((0, 255, 0, 255))
surface = pygame.Surface((40, 40))
for i in range(15):
window.blit(surface, (i * 40, 0))
window.blit(surface, (0, i * 40))
window.blit(surface, (14 * 40, i * 40))
window.blit(surface, (i * 40, 14 * 40))
def _draw(self, window):
while True:
self._draw_map(window)
for player_name in self.players_props:
pos_x = self.players_props[player_name]['x']
pos_y = self.players_props[player_name]['y']
bombs = self.players_props[player_name]['bombs']
pygame.draw.circle(
window,
(255, 255, 255, 255),
(pos_x, pos_y),
20)
if bombs:
for bomb in bombs:
pygame.draw.circle(
window,
(0, 0, 0),
(bomb['x'], bomb['y']),
20)
pygame.display.update()
def run(self):
pygame.init()
window = pygame.display.set_mode((600, 600))
rcv = Thread(target=self._rcv_from_server)
rcv.start()
draw = Thread(target=self._draw, args=(window, ))
draw.start()
while True:
keep_running = self._handle_client_events()
if not keep_running:
break
print(self.players_props)
pygame.quit()
|
executor_service.py | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A service wrapper around an executor that makes it accessible over gRPC."""
import asyncio
import functools
import queue
import sys
import threading
import traceback
from typing import Iterable
import uuid
import weakref
from absl import logging
import grpc
from tensorflow_federated.proto.v0 import executor_pb2
from tensorflow_federated.proto.v0 import executor_pb2_grpc
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.common_libs import tracing
from tensorflow_federated.python.core.impl.executors import executor_factory
from tensorflow_federated.python.core.impl.executors import executor_serialization
def _set_invalid_arg_err(context: grpc.ServicerContext, err):
logging.error(traceback.format_exc())
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(str(err))
def _set_unknown_err(context: grpc.ServicerContext, err):
logging.error(traceback.format_exc())
context.set_code(grpc.StatusCode.UNKNOWN)
context.set_details(str(err))
class ExecutorService(executor_pb2_grpc.ExecutorServicer):
"""A wrapper around a target executor that makes it into a gRPC service."""
def __init__(self, ex_factory: executor_factory.ExecutorFactory, *args,
**kwargs):
py_typecheck.check_type(ex_factory, executor_factory.ExecutorFactory)
super().__init__(*args, **kwargs)
self._ex_factory = ex_factory
self._executor = None
self._lock = threading.Lock()
# The keys in this dictionary are value ids (the same as what we return
# in the gRPC responses), and the values are `concurrent.futures.Future`
# instances (this may, and probably will change as we flesh out the rest
# of this implementation).
self._values = {}
def run_loop(loop):
loop.run_forever()
loop.close()
self._event_loop = asyncio.new_event_loop()
self._event_loop.set_task_factory(
tracing.propagate_trace_context_task_factory)
self._thread = threading.Thread(
target=functools.partial(run_loop, self._event_loop), daemon=True)
self._thread.start()
def finalize(loop, thread):
loop.call_soon_threadsafe(loop.stop)
thread.join()
weakref.finalize(self, finalize, self._event_loop, self._thread)
def _run_coro_threadsafe_with_tracing(self, coro):
"""Runs `coro` on `self._event_loop` inside the current trace spans."""
with tracing.with_trace_context_from_rpc():
return asyncio.run_coroutine_threadsafe(
tracing.wrap_coroutine_in_current_trace_context(coro),
self._event_loop)
@property
def executor(self):
if self._executor is None:
raise RuntimeError('The executor service has not yet been configured '
'with cardinalities and cannot execute any '
'concrete requests.')
return self._executor
async def _HandleRequest(
self,
req: executor_pb2.ExecuteRequest,
context: grpc.ServicerContext,
response_queue: queue.Queue,
):
try:
which = req.WhichOneof('request')
logging.debug('Received request of type %s, seq_no %s', which,
req.sequence_number)
if not which:
raise RuntimeError('Must set a request type')
if which == 'create_value':
response = executor_pb2.ExecuteResponse(
create_value=self.CreateValue(req.create_value, context))
elif which == 'create_call':
response = executor_pb2.ExecuteResponse(
create_call=self.CreateCall(req.create_call, context))
elif which == 'create_struct':
response = executor_pb2.ExecuteResponse(
create_struct=self.CreateStruct(req.create_struct, context))
elif which == 'create_selection':
response = executor_pb2.ExecuteResponse(
create_selection=self.CreateSelection(req.create_selection,
context))
elif which == 'compute':
response = executor_pb2.ExecuteResponse(
compute=await self._Compute(req.compute, context))
elif which == 'dispose':
response = executor_pb2.ExecuteResponse(
dispose=self.Dispose(req.dispose, context))
elif which == 'set_cardinalities':
response = executor_pb2.ExecuteResponse(
set_cardinalities=self.SetCardinalities(req.set_cardinalities,
context))
else:
raise RuntimeError('Unknown request type')
response.sequence_number = req.sequence_number
response_queue.put_nowait(response)
except Exception as err: # pylint:disable=broad-except
_set_unknown_err(context, err)
response = executor_pb2.ExecuteResponse()
response_queue.put_nowait(response)
def Execute(
self,
request_iter: Iterable[executor_pb2.ExecuteRequest],
context: grpc.ServicerContext,
) -> Iterable[executor_pb2.ExecuteResponse]:
"""Yields responses to streaming requests."""
logging.debug('Bidi Execute stream created')
response_queue = queue.Queue()
class RequestIterFinished:
"""Marker object indicating how many requests were received."""
def __init__(self, n_reqs):
self._n_reqs = n_reqs
def get_n_reqs(self):
return self._n_reqs
class RequestIterBroken:
"""Marker object indicating breakage in the request iterator."""
pass
def request_thread_fn():
n_reqs = 0
try:
for req in request_iter:
asyncio.run_coroutine_threadsafe(
self._HandleRequest(req, context, response_queue),
self._event_loop)
n_reqs += 1
response_queue.put_nowait(RequestIterFinished(n_reqs))
except Exception as e: # pylint: disable=broad-except
logging.exception(
'Exception %s caught in request thread; closing stream.', e)
response_queue.put_nowait(RequestIterBroken())
return
threading.Thread(target=request_thread_fn, daemon=True).start()
# This generator is finished when the request iterator is finished and we
# have yielded a response for every request.
n_responses = 0
target_responses = sys.maxsize
while n_responses < target_responses:
response = response_queue.get()
if isinstance(response, executor_pb2.ExecuteResponse):
n_responses += 1
logging.debug('Returning response of type %s with sequence no. %s',
response.WhichOneof('response'), response.sequence_number)
yield response
elif isinstance(response, RequestIterFinished):
target_responses = response.get_n_reqs()
elif isinstance(response, RequestIterBroken):
# If the request iterator is broken, we want to break out of the loop
# without waiting for the remainder of the responses; the executors can
# be in an arbitrary state at this point.
break
else:
raise ValueError('Illegal response object: {}'.format(response))
logging.debug('Closing bidi Execute stream')
def SetCardinalities(
self,
request: executor_pb2.SetCardinalitiesRequest,
context: grpc.ServicerContext,
) -> executor_pb2.SetCardinalitiesResponse:
"""Sets the cartinality for the executor service."""
py_typecheck.check_type(request, executor_pb2.SetCardinalitiesRequest)
try:
cardinalities_dict = executor_serialization.deserialize_cardinalities(
request.cardinalities)
self._executor = self._ex_factory.create_executor(cardinalities_dict)
return executor_pb2.SetCardinalitiesResponse()
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.SetCardinalitiesResponse()
def ClearExecutor(
self,
request: executor_pb2.ClearExecutorRequest,
context: grpc.ServicerContext,
) -> executor_pb2.ClearExecutorResponse:
"""Clears the service Executor-related state."""
py_typecheck.check_type(request, executor_pb2.ClearExecutorRequest)
self._executor = None
self._ex_factory.clean_up_executors()
return executor_pb2.ClearExecutorResponse()
def CreateValue(
self,
request: executor_pb2.CreateValueRequest,
context: grpc.ServicerContext,
) -> executor_pb2.CreateValueResponse:
"""Creates a value embedded in the executor."""
py_typecheck.check_type(request, executor_pb2.CreateValueRequest)
try:
with tracing.span('ExecutorService.CreateValue', 'deserialize_value'):
value, value_type = (
executor_serialization.deserialize_value(request.value))
value_id = str(uuid.uuid4())
coro = self.executor.create_value(value, value_type)
future_val = self._run_coro_threadsafe_with_tracing(coro)
with self._lock:
self._values[value_id] = future_val
return executor_pb2.CreateValueResponse(
value_ref=executor_pb2.ValueRef(id=value_id))
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.CreateValueResponse()
def CreateCall(
self,
request: executor_pb2.CreateCallRequest,
context: grpc.ServicerContext,
) -> executor_pb2.CreateCallResponse:
"""Creates a call embedded in the executor."""
py_typecheck.check_type(request, executor_pb2.CreateCallRequest)
try:
function_id = str(request.function_ref.id)
argument_id = str(request.argument_ref.id)
with self._lock:
function_val = self._values[function_id]
argument_val = self._values[argument_id] if argument_id else None
async def _processing():
function = await asyncio.wrap_future(function_val)
argument = await asyncio.wrap_future(
argument_val) if argument_val is not None else None
return await self.executor.create_call(function, argument)
coro = _processing()
result_fut = self._run_coro_threadsafe_with_tracing(coro)
result_id = str(uuid.uuid4())
with self._lock:
self._values[result_id] = result_fut
return executor_pb2.CreateCallResponse(
value_ref=executor_pb2.ValueRef(id=result_id))
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.CreateCallResponse()
def CreateStruct(
self,
request: executor_pb2.CreateStructRequest,
context: grpc.ServicerContext,
) -> executor_pb2.CreateStructResponse:
"""Creates a struct embedded in the executor."""
py_typecheck.check_type(request, executor_pb2.CreateStructRequest)
try:
with self._lock:
elem_futures = [self._values[e.value_ref.id] for e in request.element]
elem_names = [
str(elem.name) if elem.name else None for elem in request.element
]
async def _processing():
elem_values = await asyncio.gather(
*[asyncio.wrap_future(v) for v in elem_futures])
elements = list(zip(elem_names, elem_values))
struct = structure.Struct(elements)
return await self.executor.create_struct(struct)
result_fut = self._run_coro_threadsafe_with_tracing(_processing())
result_id = str(uuid.uuid4())
with self._lock:
self._values[result_id] = result_fut
return executor_pb2.CreateStructResponse(
value_ref=executor_pb2.ValueRef(id=result_id))
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.CreateStructResponse()
def CreateSelection(
self,
request: executor_pb2.CreateSelectionRequest,
context: grpc.ServicerContext,
) -> executor_pb2.CreateSelectionResponse:
"""Creates a selection embedded in the executor."""
py_typecheck.check_type(request, executor_pb2.CreateSelectionRequest)
try:
with self._lock:
source_fut = self._values[request.source_ref.id]
async def _processing():
source = await asyncio.wrap_future(source_fut)
which_selection = request.WhichOneof('selection')
if which_selection == 'name':
coro = self.executor.create_selection(source, name=request.name)
else:
coro = self.executor.create_selection(source, index=request.index)
return await coro
result_fut = self._run_coro_threadsafe_with_tracing(_processing())
result_id = str(uuid.uuid4())
with self._lock:
self._values[result_id] = result_fut
return executor_pb2.CreateSelectionResponse(
value_ref=executor_pb2.ValueRef(id=result_id))
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.CreateSelectionResponse()
def Compute(
self,
request: executor_pb2.ComputeRequest,
context: grpc.ServicerContext,
) -> executor_pb2.ComputeResponse:
"""Computes a value embedded in the executor."""
return self._run_coro_threadsafe_with_tracing(
self._Compute(request, context)).result()
async def _Compute(
self,
request: executor_pb2.ComputeRequest,
context: grpc.ServicerContext,
) -> executor_pb2.ComputeResponse:
"""Asynchronous implemention of `Compute`."""
py_typecheck.check_type(request, executor_pb2.ComputeRequest)
try:
value_id = str(request.value_ref.id)
with self._lock:
future_val = asyncio.wrap_future(self._values[value_id])
val = await future_val
result_val = await val.compute()
val_type = val.type_signature
value_proto, _ = executor_serialization.serialize_value(
result_val, val_type)
return executor_pb2.ComputeResponse(value=value_proto)
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.ComputeResponse()
def Dispose(
self,
request: executor_pb2.DisposeRequest,
context: grpc.ServicerContext,
) -> executor_pb2.DisposeResponse:
"""Disposes of a value, making it no longer available for future calls."""
py_typecheck.check_type(request, executor_pb2.DisposeRequest)
try:
with self._lock:
for value_ref in request.value_ref:
del self._values[value_ref.id]
except KeyError as err:
_set_invalid_arg_err(context, err)
return executor_pb2.DisposeResponse()
|
server.py | #!/usr/bin/env python3
import binascii
import threading
from time import *
import socketserver
from string import hexdigits
from Crypto.Util.number import getPrime, inverse, bytes_to_long, long_to_bytes
banner = """
Welcome to my supreme signing server!
Send me a signed command, I will verify and do it for you, I will also sign your commands, but don't tinker too much with them though!
I'm not Blind, I can see through your cunning ruse, sometimes!
"""
FLAG_FILE = "flag.txt"
class RSA:
def __init__(self):
self.e = 0x10001
p = getPrime(1024)
q = getPrime(1024)
self.n = p * q
phi = (p - 1) * (q - 1)
self.d = inverse(self.e, phi)
def get_public_key(self):
return (self.e, self.n)
def sign(self, msg):
hex_str_of_peek = binascii.hexlify("peek".encode()).decode()
if msg.startswith(hex_str_of_peek):
return -1
msg = bytes_to_long(binascii.unhexlify(msg.encode()))
return pow(msg, self.d, self.n)
def verify(self, msg):
msg = bytes_to_long(binascii.unhexlify(msg.encode()))
return pow(msg, self.e, self.n)
class Service(socketserver.BaseRequestHandler):
#handle() will always run first
def handle(self):
self.get_flag()
rsa = RSA()
self.send(banner)
while True:
choice = self.receive("1. Sign\n2. Verify\nYour choice: ").decode()
if choice == "1":
cmd = self.receive("Command to sign: ").decode()
if not self.assure_hex(cmd):
self.send("Please send a hex string!\n")
continue
signed_msg = rsa.sign(cmd)
if signed_msg != -1:
self.send("Message signed successfully!\n" + self.num_to_hex_str(signed_msg))
else:
self.send("Ah ah, don't tinker with the commands!")
elif choice == "2":
cmd = self.receive("Command to verify: ").decode()
if not self.assure_hex(cmd):
self.send("Please send a hex string!\n")
continue
verified_cmd = rsa.verify(cmd)
verified_cmd = long_to_bytes(verified_cmd)
try:
#could be jibberish ¯\_(ツ)_/¯
verified_cmd = verified_cmd.decode()
if verified_cmd == "peek flag":
self.send("Here is the flag!\n" + self.flag)
break
elif verified_cmd == "get pubkey":
self.send("Here is the public key!\n" + str(rsa.get_public_key()) + "\n")
else:
self.send("Command executed!")
break
except:
self.send("There's something wrong with your command!")
break
else:
break
def num_to_hex_str(self, num):
return binascii.hexlify(long_to_bytes(num)).decode()
def hex_str_to_num(self, string):
return bytes_to_long(binascii.unhexlify(string.encode()))
def assure_hex(self, string):
return all(c in hexdigits for c in string)
def get_flag(self):
with open(FLAG_FILE, "r") as f:
self.flag = f.read()
def send(self, string, newline=True):
if type(string) is str:
string = string.encode("utf-8")
if newline:
string = string + b"\n"
self.request.sendall(string)
def receive(self, prompt=": "):
self.send(prompt, newline=False)
return self.request.recv(1000).strip()
class ThreadedService(
socketserver.ThreadingMixIn,
socketserver.TCPServer,
socketserver.DatagramRequestHandler,
):
pass
def main():
port = 20314
host = "103.245.249.107"
service = Service
server = ThreadedService((host, port), service)
server.allow_reuse_address = True
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
print("Server started on " + str(server.server_address) + "!")
# Now let the main thread just wait...
while True:
sleep(10)
if __name__ == "__main__":
main() |
deribit_interface.py | # AUTOGENERATED! DO NOT EDIT! File to edit: 00_deribit_interface.ipynb (unless otherwise specified).
__all__ = ['thread_decor', 'Deribit']
# Cell
from websocket import create_connection
import websocket
from datetime import datetime
import json
import threading
# Cell
def thread_decor(my_func):
def wrapper(*args, **kwargs):
my_thread = threading.Thread(target=my_func, args=args, kwargs=kwargs)
my_thread.start()
return wrapper
# Cell
class Deribit:
def __init__(self, test, only_public=False, client_ID=False, client_secret=False):
if client_ID or client_secret: only_public = False
if only_public:
self.logwritter(msg='WARNING! Only public methods available!')
return
self.WSS_url = 'wss://test.deribit.com/ws/api/v2' if test else 'wss://deribit.com/ws/api/v2'
self._auth(client_ID, client_secret, self.WSS_url)
def logwritter(self, msg, filename='log.log'):
out = datetime.now().strftime("\n[%Y%m%d,%H:%M:%S] ")+str(msg)
print(out)
open(filename, 'a').write(out)
def _auth(self, client_ID, client_secret, WSS_url):
try:
self._WSS = create_connection(WSS_url)
msg = {"jsonrpc" : "2.0",
"id" : 9929,
"method" : "public/auth",
"params" : {
"grant_type" : "client_credentials",
"client_id" : client_ID,
"client_secret" : client_secret
}
}
self.logwritter('Auth OK\n############')
return self._sender(msg)
except Exception as er:
self.logwritter('auth error:'+str(er))
def _sender(self, msg):
try:
self.logwritter(msg['method'])
self._WSS.send(json.dumps(msg))
out = json.loads(self._WSS.recv())
return out['result']
except Exception as er:
self.logwritter(str(out))
self.logwritter('_sender error: '+str(er))
def make_order(self,
side,
instrument_name,
amount,
type_ord='limit',
label=None,
price=None,
time_in_force='good_til_cancelled',
max_show=False,
post_only=True,
reduce_only=False,
trigger=None):
if not side=='buy' and not side=='sell':
self.logwritter('ERROR: incorect param "side" for make_order')
return
msg ={
"jsonrpc": "2.0",
"id" : 5275,
"method": "private/"+str(side),
"params": {
"instrument_name" : instrument_name,
"amount": amount,
"type": type_ord,
"label": label,
"price": price,
"time_in_force": time_in_force,
"post_only": post_only,
"reduce_only": reduce_only,
"trigger":trigger}
}
if max_show: msg['params']['max_show'] = max_show
return self._sender(msg)
def edit_order(self,
order_id,
amount,
price,
post_only=True,
stop_price=None):
msg ={
"jsonrpc": "2.0",
"id" : 5275,
"method": "private/edit",
"params": {
"order_id": order_id,
"amount": amount,
"price": price,
#"post_only": post_only,
#"stop_price": stop_price
}
}
return self._sender(msg)
def cancel_order(self, order_id):
msg ={
"jsonrpc": "2.0",
"id" : 5275,
"method": "private/cancel",
"params": {
"order_id": order_id}
}
return self._sender(msg)
def get_order_state(self, order_id):
msg ={
"jsonrpc": "2.0",
"id" : 5275,
"method": "private/get_order_state",
"params": {
"order_id": order_id}
}
return self._sender(msg)
def get_order_book(self, instrument_name, depth=1):
msg ={
"jsonrpc": "2.0",
"id" : None,
"method": "public/get_order_book",
"params": {
"instrument_name": instrument_name,
"depth": depth}
}
return self._sender(msg)
def get_chart_data(self, instrument_name,
start_timestamp, end_timestamp,
resolution):
msg ={
"jsonrpc": "2.0",
"id" : None,
"method": "public/get_tradingview_chart_data",
"params": {
"instrument_name": instrument_name,
"start_timestamp": start_timestamp,
'end_timestamp': end_timestamp,
'resolution': resolution}
}
return self._sender(msg)
@thread_decor
def start_orderbook_update(self, instrument_name='BTC-PERPETUAL'): # current orderbook contain in 'Orderbook'
self.__first = True
msg = {"jsonrpc": "2.0",
"method": "public/subscribe",
"id": 42,
"params": {
"channels": ["book."+str(instrument_name)+".none.1.100ms"]}
}
try:
def on_message(ws, message):
print(message)
if self.__first: self.__first=False; return
#self.logwritter('Orderbook')
self.Orderbook = json.loads(message)['params']['data']
#if func_for_quoting: func_for_quoting() # Запуск вспомогательной функции, если она есть.
def on_error(ws, error):
self.logwritter('Orderbook updater error: '+str(error))
def on_close(ws):
self.logwritter('Orderbook updater error:closed connect')
def on_open(ws):
ws.send(json.dumps(msg))
websocket.enableTrace(True)
ws = websocket.WebSocketApp(self.WSS_url,
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.run_forever()
except Exception as er:
self.logwritter('Orderbook updater error: '+str(er)) |
MainScreen.py | __author__ = 'heroico'
import os
import logging
import weakref
import tkFileDialog
import tkMessageBox
import Tkinter
from subprocess import call
from threading import Thread
import metax.MainScreenView as MainScreenView
import metax.MetaXcanUITask as MetaXcanUITask
import metax.Exceptions as Exceptions
import MetaXcan
import metax.Formats as Formats
import metax.GWASUtilities as GWASUtilities
import metax.ZScoreCalculation as ZScoreCalculation
import metax.Normalization as Normalization
from Utilities import TS
from Utilities import checkSubdirectorySanity
GWAS_INPUT_DEFAULT = "data/GWAS"
BETA_FOLDER = "intermediate/beta"
COVARIANCE_FILE = "data/covariance.DGN-WB_0.5.txt.gz"
WEIGHT_DB_PATH = "data/DGN-WB_0.5.db"
OUTPUT_PATH = "results/zscores.csv"
SKIP = "Skip"
class MainScreen(object):
def __init__(self, root, app):
self.running = False
self.root = root
self.app = weakref.ref(app)
self.process = None
self.monitor = None
self.poll = None
self.cwd = os.getcwd()
self.gwas_folder = "."
if os.path.exists(GWAS_INPUT_DEFAULT):
self.gwas_folder = GWAS_INPUT_DEFAULT
self.beta_folder = BETA_FOLDER
self.weight_db_path = "."
if os.path.exists(WEIGHT_DB_PATH):
self.weight_db_path = WEIGHT_DB_PATH
self.output_path = OUTPUT_PATH
# self.compressed_on = Tkinter.BooleanVar()
# self.compressed_on.set(False)
self.gwas_file_pattern_value = Tkinter.StringVar()
self.separator_value = Tkinter.StringVar()
self.snp_value = Tkinter.StringVar()
self.snp_value.set("SNP")
self.non_effect_allele_value = Tkinter.StringVar()
self.non_effect_allele_value.set("A2")
self.effect_allele_value = Tkinter.StringVar()
self.effect_allele_value.set("A1")
self.or_on = Tkinter.BooleanVar()
self.or_on.set(False)
self.or_value = Tkinter.StringVar()
self.or_value.set("OR")
self.beta_on = Tkinter.BooleanVar()
self.beta_on.set(False)
self.beta_value = Tkinter.StringVar()
self.beta_value.set("BETA")
self.beta_sign_on = Tkinter.BooleanVar()
self.beta_sign_on.set(False)
self.beta_sign_value = Tkinter.StringVar()
self.beta_sign_value.set("BETA_SIGN")
self.beta_z_on = Tkinter.BooleanVar()
self.beta_z_on.set(False)
self.beta_z_value = Tkinter.StringVar()
self.beta_z_value.set("Z")
self.p_on = Tkinter.BooleanVar()
self.p_on.set(False)
self.p_value = Tkinter.StringVar()
self.p_value.set("P")
self.se_on = Tkinter.BooleanVar()
self.se_on.set(False)
self.se_value = Tkinter.StringVar()
self.se_value.set("SE")
self.frequency_on = Tkinter.BooleanVar()
self.frequency_on.set(True)
self.frequency_value = Tkinter.StringVar()
self.frequency_value.set("")
self.covariance_file = "."
if os.path.exists(COVARIANCE_FILE):
self.covariance_file = COVARIANCE_FILE
self.view = MainScreenView.MainScreenView(root, self)
###########
# UI events
###########
def exclusiveToggle(self, boolean_var, entry, exclusive_vars, exclusive_entries):
self.toggleOption(boolean_var, entry)
on = boolean_var.get()
for i,v in enumerate(exclusive_vars):
e = exclusive_entries[i]
if on:
self.setOption(v, e, False)
def setOption(self, boolean_var, entry, state):
boolean_var.set(state)
self.toggleOption(boolean_var, entry)
def toggleOption(self, boolean_var, entry):
on = boolean_var.get()
entry.config(state=(Tkinter.NORMAL if on else Tkinter.DISABLED))
def gwasFolderButtonPressed(self):
rel = self.folderButtonPressed(self.view.gwas_button, True)
if rel:
self.gwas_folder = rel
def betaFolderButtonPressed(self):
rel = self.folderButtonPressed(self.view.beta_folder_button, False)
if rel:
self.beta_folder = rel
def weightDBButtonPressed(self):
rel = self.openFileButtonPressed(self.view.weight_db_button)
if rel:
self.weight_db_path = rel
def outputButtonPressed(self):
rel = self.saveFileButtonPressed(self.view.output_button)
if rel:
self.output_path = rel
def covarianceFileButtonPressed(self):
rel = self.openFileButtonPressed(self.view.covariance_file_button)
if rel:
self.covariance_file = rel
def quitButtonPressed(self):
self.root.quit()
def actionButtonPressed(self):
if not self.running:
self.run()
else:
self.interrupt()
def folderButtonPressed(self, button, must_exist):
dir = tkFileDialog.askdirectory(mustexist=must_exist)
if len(dir) == 0:
return None
rel = os.path.relpath(dir, self.cwd)
button.config(text=rel)
return rel
def saveFileButtonPressed(self, button):
file = tkFileDialog.asksaveasfilename()
if len(file) == 0:
return None
rel = os.path.relpath(file, self.cwd)
button.config(text=rel)
return rel
def openFileButtonPressed(self, button):
file = tkFileDialog.askopenfilename()
if len(file) == 0:
return None
rel = os.path.relpath(file, self.cwd)
button.config(text=rel)
return rel
#task
def run(self):
should_run = self.checkClearToRun()
if not should_run:
return
self.running = True
self.view.runMode()
self.launchTask()
def checkClearToRun(self):
# sane = checkSubdirectorySanity(self.cwd, self.beta_folder)
# if not sane:
# tkMessageBox.showwarning( "Beta Folder", "Beta folder cannot be current directory, or ancestor.")
# return False
# clear, message, clean_up_beta, clean_up_results = self.checkGenerated()
# if not clear:
# answer = tkMessageBox.askokcancel(TS("Warning!"), message, icon=tkMessageBox.ERROR)
# if answer:
# self.cleanUpGenerated(clean_up_beta, clean_up_results)
# else:
# return False
return True
def checkGenerated(self):
clear = True
beta_empty = True
if os.path.exists(self.beta_folder):
beta_empty = len(os.listdir(self.beta_folder)) == 0
results_clear = not os.path.exists(self.output_path)
clean_up_beta = False
clean_up_results = False
message = None
if not beta_empty and not results_clear:
clear = False
clean_up_beta = True
clean_up_results = True
message = TS("Path for results already exists, and intermediate folder is already occupied."
"Should we delete them to move forward (potentially dangerous), or do you wish to cancel?")
elif not results_clear:
clear = False
clean_up_results = True
message = TS("Path for results already exists. Do you wish to cancel, or should we delete it to move forward (potentially dangerous)?")
elif not beta_empty:
clear = False
clean_up_beta = True
message = TS("Intermediate folder is already occupied. Should we delete it to move forward (potentially dangerous), or do you wish to cancel? (dangerous)")
return clear, message, clean_up_beta, clean_up_results
def cleanUpGenerated(self, clean_up_beta, clean_up_results):
if clean_up_beta:
command = "rm -rf " + self.beta_folder
call(command.split())
if clean_up_results:
command = "rm -rf " + self.output_path
call(command.split())
def interrupt(self):
logging.info("interrupting")
if self.process and self.process.is_alive():
self.process.terminate()
while(True):
code = self.process.exitcode
if code != None and not self.process.is_alive():
break
self.stop()
def stop(self):
if not self.running:
logging.info("short stop")
return
logging.info("stopping")
self.running = False
self.view.configureMode()
if self.monitor:
self.monitor.give_up = True
self.monitor = None
self.process = None
def launchTask(self):
self.view.text.delete("1.0", Tkinter.END)
self.running = True
work = self.buildWork()
process, monitor = MetaXcanUITask.runWork(work, self.taskCallback)
self.process = process
self.monitor = monitor
poll = Thread(target=self.pollProcess)
self.poll = poll
poll.daemon = True
poll.start()
def taskCallback(self, queue):
self.view.text.insert(Tkinter.END, queue.get())
self.view.text.see(Tkinter.END)
def pollProcess(self):
code = None
while(self.running):
if self.process and self.process.is_alive():
code = self.process.exitcode
if code:
break
else:
break
logging.info("poll stopping %s", str(code) if code else "-")
self.stop()
# MetaxCan process
def buildWork(self):
class MetaXcanArgs(object):
def __init__(self, source):
self.verbosity = "10"
self.weight_db_path = source.weight_db_path
self.gwas_folder = source.gwas_folder
self.snp_column = source.snp_value.get()
self.non_effect_allele_column = source.non_effect_allele_value.get()
self.effect_allele_column = source.effect_allele_value.get()
self.or_column = source.or_value.get() if source.or_on.get() else None
self.beta_column = source.beta_value.get() if source.beta_on.get() else None
self.beta_sign_column = source.beta_sign_value.get() if source.beta_sign_on.get() else None
self.zscore_column = source.beta_z_value.get() if source.beta_z_on.get() else None
self.frequency_column = source.frequency_value.get() if source.frequency_on.get() else None
self.se_column = source.se_value.get() if source.se_on.get() else None
self.pvalue_column = source.p_value.get() if source.p_on.get() else None
self.gwas_file_pattern = source.gwas_file_pattern_value.get() if len(source.gwas_file_pattern_value.get()) else None
self.separator = source.separator_value.get() if len(source.separator_value.get()) else None
# TODO: implement this
self.skip_until_header = None
self.throw = True
self.verbosity = "10"
self.keep_ens_version = False
self.model_db_path = source.weight_db_path
self.output_file = source.output_path
self.covariance = source.covariance_file
self.throw = True
self.overwrite = True
beta_args = MetaXcanArgs(source=self)
#TODO: maybe connect stuff together so that M03 passes stuff to M04
class MetaxcanWorkWrapper(object):
def __init__(self,args):
self.args = args
def run(self):
try:
MetaXcan.run(self.args)
except Exceptions.ReportableException, e:
logging.error(e.msg)
except Exception as e:
logging.info("Exception when running task: %s", str(e))
finally:
pass
class WorkWrapper(object):
def __init__(self, works):
self.works = works
def run(self):
try:
#delete as we go so that stuff gets deleted
self.works = list(reversed(self.works))
for i in xrange(len(self.works) - 1, -1, -1):
work = self.works[i]
work.run()
del self.works[i]
except Exceptions.ReportableException, e:
logging.error(e.msg)
except Exception as e:
logging.info("Exception when running task: %s", str(e))
finally:
pass
work = MetaxcanWorkWrapper(beta_args)
return work |
test_crud.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
System tests for Create, Update, Delete. (CRUD)
"""
import datetime
import functools
import operator
import os
import random
import threading
import zlib
try:
from unittest import mock
except ImportError:
import mock
import pytest
import test_utils.system
from google.cloud import ndb
from google.cloud.ndb import _cache
from google.cloud.ndb import global_cache as global_cache_module
from tests.system import KIND, eventually
USE_REDIS_CACHE = bool(os.environ.get("REDIS_CACHE_URL"))
def _equals(n):
return functools.partial(operator.eq, n)
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
def test_retrieve_entity_with_caching(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
assert key.get() is entity
def test_retrieve_entity_with_global_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
assert cache_key in cache_dict
patch = mock.patch("google.cloud.ndb._datastore_api._LookupBatch.add")
patch.side_effect = Exception("Shouldn't call this")
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_retrieve_entity_with_redis_cache(ds_entity, redis_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
assert redis_context.global_cache.redis.get(cache_key) is not None
patch = mock.patch("google.cloud.ndb._datastore_api._LookupBatch.add")
patch.side_effect = Exception("Shouldn't call this")
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_not_found(ds_entity):
entity_id = test_utils.system.unique_resource_id()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_nested_tasklet(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
@ndb.tasklet
def get_foo(key):
entity = yield key.get_async()
raise ndb.Return(entity.foo)
key = ndb.Key(KIND, entity_id)
assert get_foo(key).result() == 42
@pytest.mark.usefixtures("client_context")
def test_retrieve_two_entities_in_parallel(ds_entity):
entity1_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity1_id, foo=42, bar="none")
entity2_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity2_id, foo=65, bar="naan")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key1 = ndb.Key(KIND, entity1_id)
key2 = ndb.Key(KIND, entity2_id)
@ndb.tasklet
def get_two_entities():
entity1, entity2 = yield key1.get_async(), key2.get_async()
raise ndb.Return(entity1, entity2)
entity1, entity2 = get_two_entities().result()
assert isinstance(entity1, SomeKind)
assert entity1.foo == 42
assert entity1.bar == "none"
assert isinstance(entity2, SomeKind)
assert entity2.foo == 65
assert entity2.bar == "naan"
@pytest.mark.usefixtures("client_context")
def test_insert_entity(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
# Make sure strings are stored as strings in datastore
ds_entity = ds_client.get(key._key)
assert ds_entity["bar"] == "none"
@pytest.mark.usefixtures("client_context")
def test_insert_entity_with_stored_name_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
bar = ndb.StringProperty(name="notbar")
entity = SomeKind(foo="something", bar="or other")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == "something"
assert retrieved.bar == "or other"
ds_entity = ds_client.get(key._key)
assert ds_entity["notbar"] == "or other"
@pytest.mark.usefixtures("client_context")
def test_insert_roundtrip_naive_datetime(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty()
entity = SomeKind(foo=datetime.datetime(2010, 5, 12, 2, 42))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2010, 5, 12, 2, 42)
@pytest.mark.usefixtures("client_context")
def test_datetime_w_tzinfo(dispose_of, ds_client):
class timezone(datetime.tzinfo):
def __init__(self, offset):
self.offset = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.offset
def dst(self, dt):
return datetime.timedelta(0)
mytz = timezone(-4)
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty(tzinfo=mytz)
bar = ndb.DateTimeProperty(tzinfo=mytz)
entity = SomeKind(
foo=datetime.datetime(2010, 5, 12, 2, 42, tzinfo=timezone(-5)),
bar=datetime.datetime(2010, 5, 12, 2, 42),
)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2010, 5, 12, 3, 42, tzinfo=mytz)
assert retrieved.bar == datetime.datetime(2010, 5, 11, 22, 42, tzinfo=mytz)
def test_parallel_threads(dispose_of, namespace):
client = ndb.Client(namespace=namespace)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def insert(foo):
with client.context(cache_policy=False):
entity = SomeKind(foo=foo, bar="none")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
assert retrieved.bar == "none"
thread1 = threading.Thread(target=insert, args=[42], name="one")
thread2 = threading.Thread(target=insert, args=[144], name="two")
thread1.start()
thread2.start()
thread1.join()
thread2.join()
@pytest.mark.usefixtures("client_context")
def test_large_json_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.JsonProperty()
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_json_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.JsonProperty(compressed=True)
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_blob_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.BlobProperty(compressed=True)
foo = b"abc" * 100
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_repeated_local_structured_property(dispose_of, ds_client):
class Dog(ndb.Model):
name = ndb.StringProperty()
class House(ndb.Model):
dogs = ndb.LocalStructuredProperty(Dog, repeated=True, compressed=True)
entity = House()
dogs = [Dog(name="Mika"), Dog(name="Mocha")]
entity.dogs = dogs
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.dogs == dogs
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_compressed_property(
ds_entity_with_meanings,
):
class SomeKind(ndb.Model):
blob = ndb.BlobProperty()
value = b"abc" * 1000
compressed_value = zlib.compress(value)
entity_id = test_utils.system.unique_resource_id()
ds_entity_with_meanings(
{"blob": (22, compressed_value)},
KIND,
entity_id,
**{"blob": compressed_value}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.blob == value
@pytest.mark.usefixtures("client_context")
def test_large_pickle_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.PickleProperty()
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_key_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.KeyProperty()
key_value = ndb.Key("Whatevs", 123)
entity = SomeKind(foo=key_value)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == key_value
@pytest.mark.usefixtures("client_context")
def test_multiple_key_properties(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.KeyProperty(kind="Whatevs")
bar = ndb.KeyProperty(kind="Whatevs")
foo = ndb.Key("Whatevs", 123)
bar = ndb.Key("Whatevs", 321)
entity = SomeKind(foo=foo, bar=bar)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
assert retrieved.bar == bar
assert retrieved.foo != retrieved.bar
def test_insert_entity_with_caching(client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
with client_context.new(cache_policy=False).use():
# Sneaky. Delete entity out from under cache so we know we're getting
# cached copy.
key.delete()
eventually(key.get, _equals(None))
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
def test_insert_entity_with_global_cache(dispose_of, client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
assert not cache_dict
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
assert cache_key in cache_dict
entity.foo = 43
entity.put()
# This is py27 behavior. I can see a case being made for caching the
# entity on write rather than waiting for a subsequent lookup.
assert cache_key not in cache_dict
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_insert_entity_with_redis_cache(dispose_of, redis_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
assert redis_context.global_cache.redis.get(cache_key) is None
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
assert redis_context.global_cache.redis.get(cache_key) is not None
entity.foo = 43
entity.put()
# This is py27 behavior. I can see a case being made for caching the
# entity on write rather than waiting for a subsequent lookup.
assert redis_context.global_cache.redis.get(cache_key) is None
@pytest.mark.usefixtures("client_context")
def test_update_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
entity.foo = 56
entity.bar = "high"
assert entity.put() == key
retrieved = key.get()
assert retrieved.foo == 56
assert retrieved.bar == "high"
@pytest.mark.usefixtures("client_context")
def test_insert_entity_in_transaction(dispose_of):
commit_callback = mock.Mock()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def save_entity():
ndb.get_context().call_on_commit(commit_callback)
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
return key
key = ndb.transaction(save_entity)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
commit_callback.assert_called_once_with()
@pytest.mark.usefixtures("client_context")
def test_update_entity_in_transaction(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def update_entity():
key = ndb.Key(KIND, entity_id)
entity = key.get()
entity.foo = 56
entity.bar = "high"
assert entity.put() == key
return key
key = ndb.transaction(update_entity)
retrieved = key.get()
assert retrieved.foo == 56
assert retrieved.bar == "high"
@pytest.mark.usefixtures("client_context")
def test_parallel_transactions():
def task(delay):
@ndb.tasklet
def callback():
transaction = ndb.get_context().transaction
yield ndb.sleep(delay)
assert ndb.get_context().transaction == transaction
raise ndb.Return(transaction)
return callback
future1 = ndb.transaction_async(task(0.1))
future2 = ndb.transaction_async(task(0.06))
ndb.wait_all((future1, future2))
assert future1.get_result() != future2.get_result()
@pytest.mark.usefixtures("client_context")
def test_delete_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
assert key.delete() is None
assert key.get() is None
assert key.delete() is None
def test_delete_entity_with_caching(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
client_context.set_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
assert key.delete() is None
assert key.get() is None
assert key.delete() is None
def test_delete_entity_with_global_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
cache_key = _cache.global_cache_key(key._key)
global_cache = global_cache_module._InProcessGlobalCache()
cache_dict = global_cache_module._InProcessGlobalCache.cache
with client_context.new(global_cache=global_cache).use():
assert key.get().foo == 42
assert cache_key in cache_dict
assert key.delete() is None
assert cache_key not in cache_dict
# This is py27 behavior. Not entirely sold on leaving _LOCKED value for
# Datastore misses.
assert key.get() is None
assert cache_dict[cache_key][0] == b"0"
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_delete_entity_with_redis_cache(ds_entity, redis_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
cache_key = _cache.global_cache_key(key._key)
assert key.get().foo == 42
assert redis_context.global_cache.redis.get(cache_key) is not None
assert key.delete() is None
assert redis_context.global_cache.redis.get(cache_key) is None
# This is py27 behavior. Not entirely sold on leaving _LOCKED value for
# Datastore misses.
assert key.get() is None
assert redis_context.global_cache.redis.get(cache_key) == b"0"
@pytest.mark.usefixtures("client_context")
def test_delete_entity_in_transaction(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
def delete_entity():
assert key.delete() is None
assert key.get().foo == 42 # not deleted until commit
ndb.transaction(delete_entity)
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_delete_entity_in_transaction_then_rollback(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
def delete_entity():
assert key.delete() is None
raise Exception("Spurious error")
with pytest.raises(Exception):
ndb.transaction(delete_entity)
assert key.get().foo == 42
@pytest.mark.usefixtures("client_context")
def test_allocate_ids():
class SomeKind(ndb.Model):
pass
keys = SomeKind.allocate_ids(5)
assert len(keys) == 5
for key in keys:
assert key.id()
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_get_by_id(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
entity = SomeKind.get_by_id(entity_id)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_get(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
ds_entity(KIND, name, foo=42)
entity = SomeKind.get_or_insert(name, foo=21)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_insert(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
entity = SomeKind.get_or_insert(name, foo=21)
dispose_of(entity._key._key)
assert entity.foo == 21
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_get_in_transaction(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
def do_the_thing():
ds_entity(KIND, name, foo=42)
return SomeKind.get_or_insert(name, foo=21)
entity = ndb.transaction(do_the_thing)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_insert_entity_with_structured_property(dispose_of):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
entity = SomeKind(foo=42, bar=OtherKind(one="hi", two="mom"))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
def test_insert_entity_with_structured_property_legacy_data(
client_context, dispose_of, ds_client
):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
with client_context.new(legacy_data=True).use():
entity = SomeKind(foo=42, bar=OtherKind(one="hi", two="mom"))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
ds_entity = ds_client.get(key._key)
assert ds_entity["foo"] == 42
assert ds_entity["bar.one"] == "hi"
assert ds_entity["bar.two"] == "mom"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_structured_property(ds_entity):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
entity_id = test_utils.system.unique_resource_id()
ds_entity(
KIND, entity_id, **{"foo": 42, "bar.one": "hi", "bar.two": "mom"}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_repeated_structured_property(ds_entity):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind, repeated=True)
entity_id = test_utils.system.unique_resource_id()
ds_entity(
KIND,
entity_id,
**{"foo": 42, "bar.one": ["hi", "hello"], "bar.two": ["mom", "dad"]}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar[0].one == "hi"
assert retrieved.bar[0].two == "mom"
assert retrieved.bar[1].one == "hello"
assert retrieved.bar[1].two == "dad"
assert isinstance(retrieved.bar[0], OtherKind)
assert isinstance(retrieved.bar[1], OtherKind)
@pytest.mark.usefixtures("client_context")
def test_insert_expando(dispose_of):
class SomeKind(ndb.Expando):
foo = ndb.IntegerProperty()
entity = SomeKind(foo=42)
entity.expando_prop = "exp-value"
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.expando_prop == "exp-value"
@pytest.mark.usefixtures("client_context")
def test_insert_polymodel(dispose_of):
class Animal(ndb.PolyModel):
one = ndb.StringProperty()
class Feline(Animal):
two = ndb.StringProperty()
class Cat(Feline):
three = ndb.StringProperty()
entity = Cat(one="hello", two="dad", three="i'm in jail")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved, Animal)
assert isinstance(retrieved, Cat)
assert retrieved.one == "hello"
assert retrieved.two == "dad"
assert retrieved.three == "i'm in jail"
@pytest.mark.usefixtures("client_context")
def test_insert_autonow_property(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
created_at = ndb.DateTimeProperty(indexed=True, auto_now_add=True)
updated_at = ndb.DateTimeProperty(indexed=True, auto_now=True)
entity = SomeKind(foo="bar")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.created_at, datetime.datetime)
assert isinstance(retrieved.updated_at, datetime.datetime)
@pytest.mark.usefixtures("client_context")
def test_insert_nested_autonow_property(dispose_of):
class OtherKind(ndb.Model):
created_at = ndb.DateTimeProperty(indexed=True, auto_now_add=True)
updated_at = ndb.DateTimeProperty(indexed=True, auto_now=True)
class SomeKind(ndb.Model):
other = ndb.StructuredProperty(OtherKind)
entity = SomeKind(other=OtherKind())
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.other.created_at, datetime.datetime)
assert isinstance(retrieved.other.updated_at, datetime.datetime)
@pytest.mark.usefixtures("client_context")
def test_uninitialized_property(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.StringProperty(required=True)
entity = SomeKind()
with pytest.raises(ndb.exceptions.BadValueError):
entity.put()
@mock.patch(
"google.cloud.ndb._datastore_api.make_call",
mock.Mock(side_effect=Exception("Datastore shouldn't get called.")),
)
def test_crud_without_datastore(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
context.set_datastore_policy(False) # Don't use Datastore
key = ndb.Key(KIND, entity_id)
SomeKind(foo=42, bar="none", baz="night", _key=key).put()
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
key.delete()
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_computed_key_property(dispose_of):
"""Regression test for #284.
https://github.com/googleapis/python-ndb/issues/284
"""
class AModel(ndb.Model):
s_foo = ndb.StringProperty()
class BModel(ndb.Model):
s_bar = ndb.StringProperty()
key_a = ndb.KeyProperty(kind="AModel", indexed=True)
class CModel(ndb.Model):
s_foobar = ndb.StringProperty()
key_b = ndb.KeyProperty(kind="BModel", indexed=True)
key_a = ndb.ComputedProperty( # Issue here
lambda self: self.key_b.get().key_a if self.key_b else None,
)
key_a = AModel(s_foo="test").put()
dispose_of(key_a._key)
key_b = BModel(s_bar="test", key_a=key_a).put()
dispose_of(key_b._key)
key_c = CModel(s_foobar="test", key_b=key_b).put()
dispose_of(key_c._key)
entity = key_c.get()
assert entity.key_a == key_a
assert entity.key_b == key_b
@pytest.mark.usefixtures("client_context")
def test_user_property(dispose_of):
class SomeKind(ndb.Model):
user = ndb.UserProperty()
user = ndb.User("somebody@example.com", "gmail.com")
entity = SomeKind(user=user)
key = entity.put()
dispose_of(key._key)
retreived = key.get()
assert retreived.user.email() == "somebody@example.com"
assert retreived.user.auth_domain() == "gmail.com"
@pytest.mark.usefixtures("client_context")
def test_user_property_different_user_class(dispose_of):
class SomeKind(ndb.Model):
user = ndb.UserProperty()
class User(object):
def email(self):
return "somebody@example.com"
def auth_domain(self):
return "gmail.com"
def user_id(self):
return None
entity = SomeKind(user=User())
key = entity.put()
dispose_of(key._key)
retreived = key.get()
assert retreived.user.email() == "somebody@example.com"
assert retreived.user.auth_domain() == "gmail.com"
@pytest.mark.usefixtures("client_context")
def test_repeated_empty_strings(dispose_of):
"""Regression test for issue # 300.
https://github.com/googleapis/python-ndb/issues/300
"""
class SomeKind(ndb.Model):
foo = ndb.StringProperty(repeated=True)
entity = SomeKind(foo=["", ""])
key = entity.put()
dispose_of(key._key)
retreived = key.get()
assert retreived.foo == ["", ""]
@pytest.mark.usefixtures("redis_context")
def test_multi_get_weirdness_with_redis(dispose_of):
"""Regression test for issue #294.
https://github.com/googleapis/python-ndb/issues/294
"""
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
objects = [SomeKind(foo=str(i)) for i in range(10)]
keys = ndb.put_multi(objects)
for key in keys:
dispose_of(key._key)
ndb.get_multi(keys)
one_object = random.choice(keys).get()
one_object.foo = "CHANGED"
one_object.put()
objects_upd = ndb.get_multi(keys)
keys_upd = [obj.key for obj in objects_upd]
assert len(keys_upd) == len(keys)
assert len(set(keys_upd)) == len(set(keys))
assert set(keys_upd) == set(keys)
@pytest.mark.usefixtures("client_context")
def test_multi_with_lots_of_keys(dispose_of):
"""Regression test for issue #318.
https://github.com/googleapis/python-ndb/issues/318
"""
N = 1001
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
foos = list(range(N))
entities = [SomeKind(foo=foo) for foo in foos]
keys = ndb.put_multi(entities)
dispose_of(*(key._key for key in keys))
assert len(keys) == N
entities = ndb.get_multi(keys)
assert [entity.foo for entity in entities] == foos
ndb.delete_multi(keys)
entities = ndb.get_multi(keys)
assert entities == [None] * N
@pytest.mark.usefixtures("client_context")
def test_allocate_a_lot_of_keys():
N = 1001
class SomeKind(ndb.Model):
pass
keys = SomeKind.allocate_ids(N)
assert len(keys) == N
@pytest.mark.usefixtures("client_context")
def test_delete_multi_with_transactional(dispose_of):
"""Regression test for issue #271
https://github.com/googleapis/python-ndb/issues/271
"""
N = 10
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
@ndb.transactional()
def delete_them(entities):
ndb.delete_multi([entity.key for entity in entities])
foos = list(range(N))
entities = [SomeKind(foo=foo) for foo in foos]
keys = ndb.put_multi(entities)
dispose_of(*(key._key for key in keys))
entities = ndb.get_multi(keys)
assert [entity.foo for entity in entities] == foos
assert delete_them(entities) is None
entities = ndb.get_multi(keys)
assert entities == [None] * N
@pytest.mark.usefixtures("client_context")
def test_compressed_text_property(dispose_of, ds_client):
"""Regression test for #277
https://github.com/googleapis/python-ndb/issues/277
"""
class SomeKind(ndb.Model):
foo = ndb.TextProperty(compressed=True)
entity = SomeKind(foo="Compress this!")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == "Compress this!"
ds_entity = ds_client.get(key._key)
assert zlib.decompress(ds_entity["foo"]) == b"Compress this!"
def test_insert_entity_with_repeated_local_structured_property_legacy_data(
client_context, dispose_of, ds_client
):
"""Regression test for #326
https://github.com/googleapis/python-ndb/issues/326
"""
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.LocalStructuredProperty(OtherKind, repeated=True)
with client_context.new(legacy_data=True).use():
entity = SomeKind(
foo=42,
bar=[
OtherKind(one="hi", two="mom"),
OtherKind(one="and", two="dad"),
],
)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar[0].one == "hi"
assert retrieved.bar[0].two == "mom"
assert retrieved.bar[1].one == "and"
assert retrieved.bar[1].two == "dad"
assert isinstance(retrieved.bar[0], OtherKind)
assert isinstance(retrieved.bar[1], OtherKind)
|
threading_test.py | import threading
import requests
from queue import Queue
request_lock = threading.Lock()
q = Queue()
dvds = []
def main():
titles = [
'Ferdinand',
'Victoria. The complete second season',
'The man who invented Christmas',
'Black Panther',
'Star Wars. The last Jedi',
'Coco',
'The shape of water',
'All the money in the world',
'The breadwinner',
'The Post'
]
for x in range(4):
t = threading.Thread(target=threader)
t.daemon = True
t.return_queue = q
t.start()
# print('main t.return_queue.get():', t.return_queue.get())
for title in titles:
q.put(title)
q.join()
print('main dvds:', dvds)
def search_wiki(title):
url = 'https://en.wikipedia.org/w/api.php?action=opensearch&format=json&search=' + title + ' film'
with request_lock:
res = requests.get(url)
print('title:', title, 'res.json()[0]:', res.json()[0])
data = res.json()
hits = []
for idx, hit in enumerate(data[1]):
hits.append({
'title': hit,
'description': data[2][idx],
'wiki_url': data[3][idx]
})
return hits
def threader():
while True:
title = q.get()
dvds.append(search_wiki(title))
q.task_done()
# return hits
# print('threader hits:', hits)
if __name__ == '__main__':
main()
|
test_selenium.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
example.py
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: 2019 Miller
:license: BSD-3-Clause
"""
# Known bugs that can't be fixed here:
# - synopsis() cannot be prevented from clobbering existing
# loaded modules.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
# --------------------------------------------------------- common routines
import re
import threading
import time
import unittest
from selenium import webdriver
from booktags.flaskapp import create_app, db
from booktags import fake
from booktags.flaskapp.model.models import Role, User, Post
class SeleniumTestCase(unittest.TestCase):
client = None
@classmethod
def setUpClass(cls):
# start Chrome
options = webdriver.ChromeOptions()
options.add_argument('headless')
try:
cls.client = webdriver.Chrome(chrome_options=options)
except:
pass
# skip these tests if the browser could not be started
if cls.client:
# create the application
cls.app = create_app('testing')
cls.app_context = cls.app.app_context()
cls.app_context.push()
# suppress logging to keep unittest output clean
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel("ERROR")
# create the database and populate with some fake data
db.create_all()
Role.insert_roles()
fake.users(10)
fake.posts(10)
# add an administrator user
admin_role = Role.query.filter_by(name='Administrator').first()
admin = User(email='john@example.com',
username='john', password='cat',
role=admin_role, confirmed=True)
db.session.add(admin)
db.session.commit()
# start the Flask server in a thread
cls.server_thread = threading.Thread(target=cls.app.run,kwargs={'debug': False})
cls.server_thread.start()
# give the server a second to ensure it is up
time.sleep(1)
@classmethod
def tearDownClass(cls):
if cls.client:
# stop the flask server and the browser
cls.client.get('http://localhost:5000/shutdown')
cls.client.quit()
cls.server_thread.join()
# destroy database
db.drop_all()
db.session.remove()
# remove application context
cls.app_context.pop()
def setUp(self):
if not self.client:
self.skipTest('Web browser not available')
def tearDown(self):
pass
def test_admin_home_page(self):
# navigate to home page
self.client.get('http://localhost:5000/')
self.assertTrue(re.search('Hello,\s+Stranger!',self.client.page_source))
# navigate to login page
self.client.find_element_by_link_text('Log In').click()
self.assertIn('<h1>Login</h1>', self.client.page_source)
# login
self.client.find_element_by_name('email').\
send_keys('john@example.com')
self.client.find_element_by_name('password').send_keys('cat')
self.client.find_element_by_name('submit').click()
self.assertTrue(re.search('Hello,\s+john!', self.client.page_source))
# navigate to the user's profile page
self.client.find_element_by_link_text('Profile').click()
self.assertIn('<h1>john</h1>', self.client.page_source) |
02_v4l2_common_feed_pipes.py | #!/usr/bin/env python3
from flask import Flask, send_file, make_response, Response, g, request, stream_with_context
from io import BytesIO
import atexit
import errno
import os
import subprocess
import threading
INPUT = '/dev/video0'
FFMPEG = "/home/test/ffmpeg-nvenc/ffmpeg"
app = Flask(__name__)
@app.route('/pic')
def pic():
cmd = [FFMPEG, '-s', 'uhd2160', '-i', INPUT,
'-vframes', '1', '-vcodec', 'png', '-f', 'image2pipe', '-']
app.logger.debug('exec: {}'.format(' '.join(cmd)))
p = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
ec = p.wait()
if ec == 0:
return send_file(BytesIO(stdout), mimetype="image/png")
else:
return make_response("<pre>{}</pre>".format(stderr.decode('utf-8', 'replace')), 500)
@app.route('/mpjpeg')
def mpjpeg():
cmd = [FFMPEG, '-s', 'uhd2160', '-i', INPUT,
'-f', 'mpjpeg', '-s', 'hd720',
'-qmin', '1', '-qmax', '6', '-r', '15', '-']
return Response(_stream(cmd), mimetype="multipart/x-mixed-replace;boundary=ffserver")
@app.route('/ts')
def ts():
cmd = [FFMPEG, '-s', 'uhd2160', '-i', INPUT,
'-f', 'mpegts', '-s', 'hd720',
'-vcodec', 'h264_nvenc', '-qp', '23',
'-g', '30', '-bf', '0', '-zerolatency', '1',
'-strict_gop', '1', '-sc_threshold', '0', '-']
return Response(_stream(cmd), mimetype="video/ts")
@atexit.register
def teardown(*args):
app.logger.debug('teardown')
app.logger.debug(global_ctx)
global_ctx.close()
def _stream(cmd):
app.logger.debug('stream: {}'.format(' '.join(cmd)))
def generate():
with global_ctx.feed(cmd) as feed:
rpipe = feed.new_reader()
try:
while True:
chunk = os.read(rpipe, 10240)
if not chunk:
break
yield chunk
finally:
os.close(rpipe)
return stream_with_context(generate())
class _GlobalContext:
def __init__(self):
app.logger.debug('_GlobalContext')
self._feeds = {}
self._feed_lock = threading.Lock()
def feed(self, cmd):
with self._feed_lock:
feed_id = ' '.join(cmd)
feed = self._feeds.get(feed_id)
if feed is None:
feed = _Feed(cmd)
self._feeds[feed_id] = feed
return feed
def close(self):
with self._feed_lock:
for feed in self._feeds.values():
feed._close()
self._feeds = {}
class _Feed:
def __init__(self, cmd):
self._acquired = 0
self._lock = threading.Lock()
self._process = None
self._rpipe = None
self._cmd = cmd
self._buffer = None
self._thread = None
self._closed = False
def new_reader(self):
app.logger.debug("feed new reader")
return self._buffer.new_reader()
def _open(self):
app.logger.debug("feed open")
self._closed = False
self._buffer = _MultiClientBuffer()
self._rpipe, wpipe = os.pipe()
try:
try:
self._process = subprocess.Popen(self._cmd, stdin=None, stdout=wpipe, stderr=subprocess.DEVNULL, close_fds=True)
finally:
os.close(wpipe)
thread = threading.Thread(target=self._buffer_loop)
thread.daemon = True
thread.start()
self._thread = thread
except:
if self._rpipe is not None:
os.close(self._rpipe)
self._rpipe = None
self._closed = True
raise
def _close(self):
app.logger.debug("feed close")
self._buffer.close()
self._closed = True
p = self._process
if p:
p.terminate()
try:
p.wait(1.0)
except subprocess.TimeoutExpired:
p.kill()
p.wait()
self._process = None
if self._rpipe:
os.close(self._rpipe)
self._rpipe = None
thread = self._thread
self._thread = None
if thread:
thread.join()
def _buffer_loop(self):
while not self._closed:
chunk = os.read(self._rpipe, 10240)
if not chunk:
break
self._buffer.write(chunk)
def __enter__(self):
with self._lock:
if self._acquired == 0:
self._open()
self._acquired += 1
app.logger.debug("feed enter {}".format(self._acquired))
return self
def __exit__(self, *args):
with self._lock:
app.logger.debug("feed exit {}".format(self._acquired))
self._acquired -= 1
if self._acquired <= 0:
self._close()
class _MultiClientBuffer:
def __init__(self):
self._pipes = []
self._pipes_lock = threading.Lock()
self._closed = False
def new_reader(self):
with self._pipes_lock:
if self._closed:
raise IOError(errno.EIO, "already closed")
rpipe, wpipe = os.pipe()
self._pipes.append((rpipe, wpipe))
return rpipe
def write(self, chunk):
if self._closed:
return
pipes_to_del = []
try:
with self._pipes_lock:
pipes = list(self._pipes)
for idx, (_, wpipe) in enumerate(pipes):
try:
os.write(wpipe, chunk)
except BrokenPipeError:
pipes_to_del.append(idx)
os.close(wpipe)
except Exception:
pipes_to_del = range(len(pipes))
raise
finally:
with self._pipes_lock:
for pipe_idx in reversed(pipes_to_del):
del self._pipes[pipe_idx]
def close(self):
with self._pipes_lock:
self._closed = True
for _, wpipe in self._pipes:
os.close(wpipe)
self._pipes = []
global_ctx = _GlobalContext()
|
base_crash_reporter.py | # Electrum - lightweight Bitcoin client
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import json
import locale
import traceback
import sys
from .version import ELECTRUM_VERSION
from . import constants
from .i18n import _
from .util import make_aiohttp_session
from .logging import describe_os_version, Logger, get_git_version
class BaseCrashReporter(Logger):
report_server = "https://crashhub.electrum.org"
config_key = "show_crash_reporter"
issue_template = """<h2>Traceback</h2>
<pre>
{traceback}
</pre>
<h2>Additional information</h2>
<ul>
<li>Xazab Electrum version: {app_version}</li>
<li>Python version: {python_version}</li>
<li>Operating system: {os}</li>
<li>Wallet type: {wallet_type}</li>
<li>Locale: {locale}</li>
</ul>
"""
CRASH_MESSAGE = _('Something went wrong while executing Xazab Electrum.')
CRASH_TITLE = _('Sorry!')
REQUEST_HELP_MESSAGE = _('To help us diagnose and fix the problem, you can send us a bug report that contains '
'useful debug information:')
DESCRIBE_ERROR_MESSAGE = _("Please briefly describe what led to the error (optional):")
ASK_CONFIRM_SEND = _("Do you want to send this report?")
def __init__(self, exctype, value, tb):
Logger.__init__(self)
self.exc_args = (exctype, value, tb)
def send_report(self, asyncio_loop, proxy, endpoint="/crash", *, timeout=None):
if constants.net.GENESIS[-4:] not in ["4943", "e26f"] and ".electrum.org" in BaseCrashReporter.report_server:
# Gah! Some kind of altcoin wants to send us crash reports.
raise Exception(_("Missing report URL."))
report = self.get_traceback_info()
report.update(self.get_additional_info())
report = json.dumps(report)
coro = self.do_post(proxy, BaseCrashReporter.report_server + endpoint, data=report)
response = asyncio.run_coroutine_threadsafe(coro, asyncio_loop).result(timeout)
return response
async def do_post(self, proxy, url, data):
async with make_aiohttp_session(proxy) as session:
async with session.post(url, data=data, raise_for_status=True) as resp:
return await resp.text()
def get_traceback_info(self):
exc_string = str(self.exc_args[1])
stack = traceback.extract_tb(self.exc_args[2])
readable_trace = "".join(traceback.format_list(stack))
id = {
"file": stack[-1].filename,
"name": stack[-1].name,
"type": self.exc_args[0].__name__
}
return {
"exc_string": exc_string,
"stack": readable_trace,
"id": id
}
def get_additional_info(self):
args = {
"app_version": get_git_version() or ELECTRUM_VERSION,
"python_version": sys.version,
"os": describe_os_version(),
"wallet_type": "unknown",
"locale": locale.getdefaultlocale()[0] or "?",
"description": self.get_user_description()
}
try:
args["wallet_type"] = self.get_wallet_type()
except:
# Maybe the wallet isn't loaded yet
pass
return args
def _get_traceback_str(self) -> str:
return "".join(traceback.format_exception(*self.exc_args))
def get_report_string(self):
info = self.get_additional_info()
info["traceback"] = self._get_traceback_str()
return self.issue_template.format(**info)
def get_user_description(self):
raise NotImplementedError
def get_wallet_type(self) -> str:
raise NotImplementedError
def trigger_crash():
# note: do not change the type of the exception, the message,
# or the name of this method. All reports generated through this
# method will be grouped together by the crash reporter, and thus
# don't spam the issue tracker.
class TestingException(Exception):
pass
def crash_test():
raise TestingException("triggered crash for testing purposes")
import threading
t = threading.Thread(target=crash_test)
t.start()
|
py3test_runners.py | # -*- coding: utf-8 -*-
"""
Testing connection observer runner API that should be fullfilled by any runner
- submit
- wait_for
This integration tests check cooperation of the 3 players:
connection_observer - runner - connection
Main focus is on runner and it's correctness.
"""
__author__ = 'Grzegorz Latuszek'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = 'grzegorz.latuszek@nokia.com'
import re
import threading
import time
import platform
import importlib
import asyncio
import mock
import pytest
import contextlib
import datetime
from moler.connection_observer import ConnectionObserver
from moler.util.loghelper import disabled_logging
# --------------------------------------------------------------------
# Testing data path from connection to connection observer
# Runner is involved in data path establishing/dropping/securing
# --------------------------------------------------------------------
@pytest.mark.asyncio
async def test_observer_gets_all_data_of_connection_after_it_is_submitted_to_background(observer_runner):
# another words: after returning from runner.submit() no data can be lost, no races
# Raw 'def' usage note:
# This functionality works as well when runner is used inside raw def function
# since it only uses runner.submit() + awaiting time
# another words - runner is running over some time period
# The only difference is that raw def function may use only standalone_runner (which is subset of observer_runner)
# and inside test you exchange 'await asyncio.sleep()' with 'time.sleep()'
from moler.threaded_moler_connection import ThreadedMolerConnection
with disabled_logging():
durations = []
for n in range(20): # need to test multiple times to ensure there are no thread races
moler_conn = ThreadedMolerConnection()
net_down_detector = NetworkDownDetector(connection=moler_conn, runner=observer_runner)
connection = net_down_detector.connection
start_time = net_down_detector.life_status.start_time = time.time()
observer_runner.submit(net_down_detector)
durations.append(time.time() - start_time)
connection.data_received("61 bytes", datetime.datetime.now())
connection.data_received("62 bytes", datetime.datetime.now())
connection.data_received("ping: Network is unreachable", datetime.datetime.now())
assert net_down_detector.all_data_received == ["61 bytes", "62 bytes", "ping: Network is unreachable"]
print("\n{}.submit() duration == {}".format(observer_runner.__class__.__name__,
float(sum(durations))/len(durations)))
def test_runner_secures_observer_against_additional_data_after_observer_is_done(observer_runner):
"""Done observer should not get data even before unsubscribe from moler-connection"""
# correctly written observer looks like:
#
# def data_received(self, data, recv_time):
# if not self.done():
# parse(data)
#
# This test checks if runners secure wrong-written-observers with missing 'if not self.done():'
from moler.threaded_moler_connection import ThreadedMolerConnection
with disabled_logging():
for n in range(20): # need to test multiple times to ensure there are no thread races
moler_conn = ThreadedMolerConnection()
net_down_detector = NetworkDownDetector(connection=moler_conn, runner=observer_runner)
net_down_detector.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
connection = net_down_detector.connection
net_down_detector.life_status.start_time = time.time()
observer_runner.submit(net_down_detector)
connection.data_received("61 bytes", datetime.datetime.now())
connection.data_received("ping: Network is unreachable", datetime.datetime.now())
connection.data_received("62 bytes", datetime.datetime.now())
assert net_down_detector.all_data_received == ["61 bytes", "ping: Network is unreachable"]
def test_runner_secures_observer_against_additional_data_after_runner_shutdown(observer_runner):
"""In-shutdown runner should not pass data to observer even before unsubscribe from moler-connection"""
# Even without running background feeder
# we can use correctly constructed secure.data_received(data, datetime.datetime.now())
# to block passing data from connection to observer while runner is in-shutdown state
from moler.threaded_moler_connection import ThreadedMolerConnection
moler_conn = ThreadedMolerConnection()
# check if shutdown stops all observers running inside given runner
net_down_detector1 = NetworkDownDetector(connection=moler_conn, runner=observer_runner)
net_down_detector2 = NetworkDownDetector(connection=moler_conn, runner=observer_runner)
net_down_detector1.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
net_down_detector2.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
connection = moler_conn
observer_runner.submit(net_down_detector1)
observer_runner.submit(net_down_detector2)
connection.data_received("61 bytes", datetime.datetime.now())
observer_runner.shutdown()
connection.data_received("62 bytes", datetime.datetime.now())
assert net_down_detector1.all_data_received == ["61 bytes"]
assert net_down_detector2.all_data_received == ["61 bytes"]
@pytest.mark.asyncio
async def test_runner_unsubscribes_from_connection_after_runner_shutdown(observer_runner):
# see - Raw 'def' usage note
from moler.threaded_moler_connection import ThreadedMolerConnection
moler_conn = ThreadedMolerConnection()
# check if shutdown unsubscribes all observers running inside given runner
net_down_detector1 = NetworkDownDetector(connection=moler_conn, runner=observer_runner)
net_down_detector2 = NetworkDownDetector(connection=moler_conn, runner=observer_runner)
net_down_detector1.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
net_down_detector2.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
assert len(moler_conn._observers) == 0
observer_runner.submit(net_down_detector1)
observer_runner.submit(net_down_detector2)
assert len(moler_conn._observers) == 2
observer_runner.shutdown()
await asyncio.sleep(0.1)
assert len(moler_conn._observers) == 0
# TODO: test_runner_unsubscribes_from_connection_after_observer_is_done
@pytest.mark.asyncio
async def test_runner_doesnt_break_on_exception_raised_inside_observer(observer_runner):
"""Runner should be secured against 'wrongly written' connection-observer"""
# see - Raw 'def' usage note
with failing_net_down_detector(fail_on_data="zero bytes",
fail_by_raising=Exception("unknown format"),
runner=observer_runner) as conn_observer:
connection = conn_observer.connection
conn_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
observer_runner.submit(conn_observer)
connection.data_received("61 bytes", datetime.datetime.now())
connection.data_received("zero bytes", datetime.datetime.now())
connection.data_received("ping: Network is unreachable", datetime.datetime.now())
assert conn_observer.all_data_received == ["61 bytes"]
# --------------------------------------------------------------------
# Testing exceptions handling
# Runner is involved in data path securing
# --------------------------------------------------------------------
# TODO: correct handling/storage of stack-trace of caught exceptions
@pytest.mark.asyncio
async def test_runner_sets_observer_exception_result_for_exception_raised_inside_observer(observer_runner):
"""Runner should correct behaviour of 'wrongly written' connection-observer"""
# Correctly written observer should not allow exceptions escaping from data_received().
# Such exceptions should be caught and stored inside observer via set_exception()
# see - Raw 'def' usage note
unknown_format_exception = Exception("unknown format")
with failing_net_down_detector(fail_on_data="zero bytes",
fail_by_raising=unknown_format_exception,
runner=observer_runner) as conn_observer:
connection = conn_observer.connection
conn_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
observer_runner.submit(conn_observer)
connection.data_received("61 bytes", datetime.datetime.now())
connection.data_received("zero bytes", datetime.datetime.now())
connection.data_received("ping: Network is unreachable", datetime.datetime.now())
assert conn_observer._exception is unknown_format_exception
@pytest.mark.asyncio
async def test_future_is_not_exception_broken_when_observer_is_exception_broken(observer_runner):
# Runner created future is involved in data path handling.
# That handling includes catching/storing exceptions. But such exception is exception of connection_observer
# and not future itself - future behaviour is OK when it can correctly handle exception of observer.
# see - Raw 'def' usage note
with failing_net_down_detector(fail_on_data="zero bytes",
fail_by_raising=Exception("unknown format"),
runner=observer_runner) as conn_observer:
connection = conn_observer.connection
conn_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(conn_observer)
connection.data_received("61 bytes", datetime.datetime.now())
connection.data_received("zero bytes", datetime.datetime.now())
await asyncio.sleep(0.2)
assert future.exception() is None # assumption here: used future has .exceptions() API
@pytest.mark.asyncio
async def test_future_doesnt_return_result_of_observer(net_down_detector):
"""Future just returns None when it is done"""
# see - Raw 'def' usage note
observer_runner = net_down_detector.runner
connection = net_down_detector.connection
net_down_detector.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(net_down_detector)
connection.data_received("61 bytes", datetime.datetime.now())
connection.data_received("ping: Network is unreachable", datetime.datetime.now())
await asyncio.sleep(0.2)
assert future.result() is None
# --------------------------------------------------------------------
# Testing timeouts handling
#
# Part I - future's reaction on timeout
# Future is a result produced by runner.submit(). Future expresses
# "background life" of connection observer. In part I we test
# pure-background-life without impact of wait_for() API - means
# just send it to background and wait till timeout
# --------------------------------------------------------------------
@pytest.mark.asyncio
async def test_future_timeouts_after_timeout_of_observer(connection_observer):
"""Observer has .timeout member"""
# see - Raw 'def' usage note
from moler.exceptions import ResultNotAvailableYet, MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.1
connection_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(connection_observer)
with pytest.raises(ResultNotAvailableYet):
connection_observer.result()
await asyncio.sleep(0.2)
with pytest.raises(MolerTimeout):
connection_observer.result() # we should have exception in connection_observer
assert future.done()
if not future.cancelled(): # future for timeouted observer should be either cancelled
assert future.exception() is None # or done with no exception inside future itself
@pytest.mark.asyncio
async def test_future_accommodates_to_extending_timeout_of_observer(connection_observer):
# see - Raw 'def' usage note
import logging
from moler.exceptions import ResultNotAvailableYet, MolerTimeout
logger = logging.getLogger('moler.runner')
observer_runner = connection_observer.runner
connection_observer.timeout = 0.2
connection_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
observer_runner.submit(connection_observer)
with pytest.raises(ResultNotAvailableYet): # not timed out yet
connection_observer.result()
logger.debug("first await asyncio.sleep(0.1)")
await asyncio.sleep(0.1)
logger.debug("after first await asyncio.sleep(0.1)")
with pytest.raises(ResultNotAvailableYet): # not timed out yet
connection_observer.result()
connection_observer.timeout = 0.5 # EXTEND
logger.debug("second await asyncio.sleep(0.1)")
await asyncio.sleep(0.1)
logger.debug("after second await asyncio.sleep(0.1)")
with pytest.raises(ResultNotAvailableYet): # not timed out yet
connection_observer.result()
logger.debug("final await asyncio.sleep(0.4)")
await asyncio.sleep(0.4)
logger.debug("after final await asyncio.sleep(0.4)")
with pytest.raises(MolerTimeout): # should time out
connection_observer.result()
@pytest.mark.asyncio
async def test_future_accommodates_to_shortening_timeout_of_observer(connection_observer):
# see - Raw 'def' usage note
from moler.exceptions import ResultNotAvailableYet, MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.2
connection_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
observer_runner.submit(connection_observer)
with pytest.raises(ResultNotAvailableYet): # not timed out yet
connection_observer.result()
await asyncio.sleep(0.08)
with pytest.raises(ResultNotAvailableYet): # not timed out yet
connection_observer.result()
connection_observer.timeout = 0.1 # SHORTEN
await asyncio.sleep(0.04)
with pytest.raises(MolerTimeout): # should time out
connection_observer.result()
# --------------------------------------------------------------------
# Testing timeouts handling
#
# Part II - timeouts while inside wait_for()
# wait_for() API takes observer from background-life into foreground-life
# testing here:
# being inside blocking wait_for() - escape it on timeout
# --------------------------------------------------------------------
def test_wait_for__times_out_on_constructor_timeout(connection_observer):
from moler.exceptions import MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.2
start_time = connection_observer.life_status.start_time = time.time()
future = observer_runner.submit(connection_observer)
with pytest.raises(MolerTimeout):
observer_runner.wait_for(connection_observer, future,
timeout=None) # means: use .timeout of observer
connection_observer.result() # should raise Timeout
duration = time.time() - start_time
assert duration >= 0.2
assert duration < 0.25
time.sleep(0.1) # future may be 'not done yet' (just after timeout) - it should be "in exiting of feed"
assert future.done()
if not future.cancelled(): # future for timeouted observer should be either cancelled
assert future.exception() is None # or done with no exception inside future itself
def test_wait_for__times_out_on_specified_timeout(connection_observer):
from moler.exceptions import MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 1.5
connection_observer.terminating_timeout = 0.0
start_time = connection_observer.life_status.start_time = time.time()
future = observer_runner.submit(connection_observer)
time.sleep(0.1)
with pytest.raises(MolerTimeout):
wait4_start_time = time.time() # wait_for() timeout is counted from wait_for() line in code
observer_runner.wait_for(connection_observer, future,
timeout=0.2) # means: use timeout of wait_for (shorter then initial one)
connection_observer.result() # should raise Timeout
now = time.time()
observer_life_duration = now - start_time
wait4_duration = now - wait4_start_time
assert wait4_duration >= 0.2
assert wait4_duration < 0.3
assert observer_life_duration >= 0.3
assert observer_life_duration < 0.4
def test_wait_for__times_out_on_earlier_timeout(connection_observer):
from moler.exceptions import MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.3
start_time = connection_observer.life_status.start_time = time.time()
future = observer_runner.submit(connection_observer)
with pytest.raises(MolerTimeout):
wait4_start_time = time.time() # wait_for() timeout is counted from wait_for() line in code
observer_runner.wait_for(connection_observer, future,
timeout=0.5) # means: timeout of wait_for longer then initial one
connection_observer.result() # should raise Timeout
now = time.time()
observer_life_duration = now - start_time
wait4_duration = now - wait4_start_time
assert observer_life_duration >= 0.3
assert observer_life_duration < 0.35
assert wait4_duration < 0.5
def test_wait_for__tracks_changes_of_observer_timeout__extension(connection_observer):
from moler.exceptions import MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.2
start_time = connection_observer.life_status.start_time = time.time()
future = observer_runner.submit(connection_observer)
def modify_observer_timeout():
time.sleep(0.15)
connection_observer.timeout = 0.35 # extend while inside wait_for()
threading.Thread(target=modify_observer_timeout).start()
with pytest.raises(MolerTimeout):
observer_runner.wait_for(connection_observer, future,
timeout=None)
connection_observer.result() # should raise Timeout
duration = time.time() - start_time
assert duration >= 0.35
assert duration < 0.4
def test_wait_for__tracks_changes_of_observer_timeout__shortening(connection_observer):
from moler.exceptions import MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.35
start_time = connection_observer.life_status.start_time = time.time()
future = observer_runner.submit(connection_observer)
def modify_observer_timeout():
time.sleep(0.05)
connection_observer.timeout = 0.2 # shorten while inside wait_for()
threading.Thread(target=modify_observer_timeout).start()
with pytest.raises(MolerTimeout):
observer_runner.wait_for(connection_observer, future,
timeout=None)
connection_observer.result() # should raise Timeout
duration = time.time() - start_time
assert duration >= 0.2
assert duration < 0.25
def test_wait_for__direct_timeout_takes_precedence_over_extended_observer_timeout(connection_observer):
# this is another variant of test_wait_for__times_out_on_earlier_timeout
from moler.exceptions import MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.2
connection_observer.terminating_timeout = 0.0
start_time = connection_observer.life_status.start_time = time.time()
future = observer_runner.submit(connection_observer)
def modify_observer_timeout():
time.sleep(0.15)
connection_observer.timeout = 0.4 # extend while inside wait_for()
threading.Thread(target=modify_observer_timeout).start()
with pytest.raises(MolerTimeout):
wait4_start_time = time.time() # wait_for() timeout is counted from wait_for() line in code
observer_runner.wait_for(connection_observer, future,
timeout=0.25) # should take precedence, means: 0.25 sec from now
connection_observer.result() # should raise Timeout
now = time.time()
observer_life_duration = now - start_time
wait4_duration = now - wait4_start_time
assert wait4_duration >= 0.25
assert wait4_duration < 0.35
assert observer_life_duration > 0.2
assert observer_life_duration < 0.4
# --------------------------------------------------------------------
# Testing timeouts handling
#
# Part III - on_timeout() callback
# --------------------------------------------------------------------
def test_observer__on_timeout__is_called_once_at_timeout(connection_observer):
from moler.exceptions import MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.33
connection_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(connection_observer)
with mock.patch.object(connection_observer, "on_timeout") as timeout_callback:
with pytest.raises(MolerTimeout):
observer_runner.wait_for(connection_observer, future,
timeout=0.33)
connection_observer.result() # should raise Timeout
timeout_callback.assert_called_once()
def test_runner_shutdown_cancels_remaining_active_feeders_inside_main_thread(async_runner):
from moler.threaded_moler_connection import ThreadedMolerConnection
connection_observer = NetworkDownDetector(connection=ThreadedMolerConnection(), runner=async_runner)
connection_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
future = async_runner.submit(connection_observer)
future._loop.run_until_complete(asyncio.sleep(1.0)) # feeder will start processing inside loop
# time.sleep(0.5)
async_runner.shutdown()
assert connection_observer.cancelled()
def test_runner_shutdown_cancels_remaining_inactive_feeders_inside_main_thread(observer_runner):
from moler.threaded_moler_connection import ThreadedMolerConnection
connection_observer = NetworkDownDetector(connection=ThreadedMolerConnection(), runner=observer_runner)
connection_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(connection_observer)
time.sleep(0.2) # won't enter event loop of future - feeder won't start processing
observer_runner.shutdown()
assert connection_observer.cancelled()
def test_runner_shutdown_cancels_remaining_feeders_inside_threads(observer_runner):
from moler.threaded_moler_connection import ThreadedMolerConnection
observers_pool = []
for idx in range(3):
connection_observer = NetworkDownDetector(connection=ThreadedMolerConnection(), runner=observer_runner)
observers_pool.append(connection_observer)
def submit_feeder(connection_observer):
connection_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(connection_observer)
while not future.done():
time.sleep(0.1)
th_pool = [threading.Thread(target=submit_feeder, args=(connection_observer,)) for connection_observer in observers_pool]
for th in th_pool:
th.start()
# loop.run_until_complete(remaining_tasks) # let it enter feeder
time.sleep(0.5)
observer_runner.shutdown()
for th in th_pool:
th.join()
assert observers_pool[0].cancelled()
assert observers_pool[1].cancelled()
assert observers_pool[2].cancelled()
# def test_observer__on_timeout__is_called_once_at_timeout_threads_races(observer_runner):
# from moler.exceptions import MolerTimeout
# from moler.threaded_moler_connection import ThreadedMolerConnection
#
# with disabled_logging():
# observers_pool = []
# for idx in range(200):
# connection_observer = NetworkDownDetector(connection=ThreadedMolerConnection(), runner=observer_runner)
# connection_observer.timeout = 0.33
# connection_observer.on_timeout = mock.MagicMock()
# observers_pool.append(connection_observer)
#
# def await_on_timeout(connection_observer):
# connection_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
# future = observer_runner.submit(connection_observer)
# with pytest.raises(MolerTimeout):
# observer_runner.wait_for(connection_observer, future, timeout=0.33)
# connection_observer.result() # should raise Timeout
#
# th_pool = [threading.Thread(target=await_on_timeout, args=(connection_observer,)) for connection_observer in observers_pool]
# for th in th_pool:
# th.start()
# for th in th_pool:
# th.join()
#
# for connection_observer in observers_pool:
# timeout_callback = connection_observer.on_timeout
# timeout_callback.assert_called_once()
# --------------------------------------------------------------------
# Testing wait_for() API
#
# (timeouts inside wait_for are covered above)
# Should exit from blocking call when expected data comes.
# Future should be done as well.
# --------------------------------------------------------------------
def test_can_await_connection_observer_to_complete(observer_and_awaited_data):
connection_observer, awaited_data = observer_and_awaited_data
observer_runner = connection_observer.runner
connection_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(connection_observer)
def inject_data():
time.sleep(0.1)
moler_conn = connection_observer.connection
moler_conn.data_received(awaited_data, datetime.datetime.now())
ext_io = threading.Thread(target=inject_data)
ext_io.start()
observer_runner.wait_for(connection_observer, future,
timeout=0.3)
assert connection_observer.done() # done but success or failure?
assert connection_observer.result() is not None # it should be success
assert future.done()
assert future.result() is None
# --------------------------------------------------------------------
# Testing wait_for_iterator() API
#
# Should exit from blocking call when expected data comes.
# Future should be done as well.
# --------------------------------------------------------------------
@pytest.mark.asyncio
async def test_can_async_await_connection_observer_to_complete(observer_and_awaited_data):
connection_observer, awaited_data = observer_and_awaited_data
observer_runner = connection_observer.runner
connection_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(connection_observer)
connection_observer.timeout = 0.3
def inject_data():
time.sleep(0.1)
moler_conn = connection_observer.connection
moler_conn.data_received(awaited_data, datetime.datetime.now())
ext_io = threading.Thread(target=inject_data)
ext_io.start()
connection_observer._future = future
connection_observer.runner = observer_runner
# connection_observer.__await__ calls connection_observer.runner.wait_for_iterator(connection_observer,
# connection_observer._future)
await connection_observer
assert connection_observer.done() # done but success or failure?
assert connection_observer.result() is not None # it should be success
assert future.done()
assert future.result() is None
# --------------------------------------------------------------------
# Testing correct usage
#
# We want to be helpful for users. Even if some usage is 'user fault'
# (like calling long lasting functions inside async code) we want
# to inform about such cases as much as we can. Not always it is possible.
# --------------------------------------------------------------------
@pytest.mark.asyncio
async def test_wait_for__is_prohibited_inside_async_def(async_runner):
# can't raise in generic runner since why non-async-runner should bother about being used inside 'async def'
# using them in such case is end-user error the same way as using time.sleep(2.41) inside 'async def'
from moler.exceptions import WrongUsage
from moler.threaded_moler_connection import ThreadedMolerConnection
# TODO: can we confidently check "called from async def"
# https://stackoverflow.com/questions/30155138/how-can-i-write-asyncio-coroutines-that-optionally-act-as-regular-functions
# "magically_determine_if_being_yielded_from() is actually event_loop.is_running()"
# but that works for asyncio and not for curio/trio
#
# Any way to treat wait_for() as awaitable?
#
connection_observer = NetworkDownDetector(connection=ThreadedMolerConnection(), runner=async_runner)
connection_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
future = async_runner.submit(connection_observer)
with pytest.raises(WrongUsage) as err:
async_runner.wait_for(connection_observer, future)
connection_observer.result() # should raise WrongUsage
assert "Can't call wait_for() from 'async def' - it is blocking call" in str(err.value)
# check "fix-hint" inside exception
assert re.findall(r'consider using:\s+await observer\s+instead of:\s+observer.await_done()', str(err.value))
@pytest.mark.asyncio
async def test_wait_for__prohibited_inside_async_def_speaks_in_observer_API(async_runner):
from moler.exceptions import WrongUsage
from moler.threaded_moler_connection import ThreadedMolerConnection
connection_observer = NetworkDownDetector(connection=ThreadedMolerConnection(), runner=async_runner)
connection_observer.start() # internally calls async_runner.submit()
future = async_runner.submit(connection_observer)
with pytest.raises(WrongUsage) as err:
connection_observer.await_done() # internally calls async_runner.wait_for() + connection_observer.result()
assert "Can't call await_done() from 'async def' - it is blocking call" in str(err.value)
# check "fix-hint" inside exception
assert re.findall(r'consider using:\s+await observer\s+instead of:\s+observer.await_done()', str(err.value))
# TODO: test usage of iterable/awaitable
# TODO: handling not awaited futures (infinite background observer, timeouting observer but "failing path stopped"
# --------------------------- resources ---------------------------
def is_python36_or_above():
(ver_major, ver_minor, _) = platform.python_version().split('.')
return (ver_major == '3') and (int(ver_minor) >= 6)
# bg_runners may be called from both 'async def' and raw 'def' functions
available_bg_runners = [] # 'runner.ThreadPoolExecutorRunner']
available_bg_runners = ['runner.ThreadPoolExecutorRunner']
# standalone_runners may run without giving up control to some event loop (since they create own thread(s))
available_standalone_runners = ['runner.ThreadPoolExecutorRunner']
# async_runners may be called only from 'async def' functions and require already running events-loop
available_async_runners = []
if is_python36_or_above():
available_bg_runners.append('asyncio_runner.AsyncioRunner')
available_async_runners.append('asyncio_runner.AsyncioRunner')
# available_bg_runners.append('asyncio_runner.AsyncioInThreadRunner')
# available_async_runners.append('asyncio_runner.AsyncioInThreadRunner')
# available_standalone_runners.append('asyncio_runner.AsyncioInThreadRunner')
pass
@pytest.yield_fixture(params=available_bg_runners)
def observer_runner(request):
module_name, class_name = request.param.rsplit('.', 1)
module = importlib.import_module('moler.{}'.format(module_name))
runner_class = getattr(module, class_name)
runner = runner_class()
# NOTE: AsyncioRunner given here will start without running event loop
yield runner
# remove exceptions collected inside ConnectionObserver
ConnectionObserver.get_unraised_exceptions(remove=True)
runner.shutdown()
@pytest.yield_fixture(params=available_standalone_runners)
def standalone_runner(request):
module_name, class_name = request.param.rsplit('.', 1)
module = importlib.import_module('moler.{}'.format(module_name))
runner_class = getattr(module, class_name)
runner = runner_class()
yield runner
# remove exceptions collected inside ConnectionObserver
ConnectionObserver.get_unraised_exceptions(remove=True)
runner.shutdown()
@pytest.yield_fixture(params=available_async_runners)
def async_runner(request):
module_name, class_name = request.param.rsplit('.', 1)
module = importlib.import_module('moler.{}'.format(module_name))
runner_class = getattr(module, class_name)
runner = runner_class()
yield runner
# remove exceptions collected inside ConnectionObserver
ConnectionObserver.get_unraised_exceptions(remove=True)
runner.shutdown()
class NetworkDownDetector(ConnectionObserver):
def __init__(self, connection=None, runner=None):
super(NetworkDownDetector, self).__init__(connection=connection, runner=runner)
self.all_data_received = []
def data_received(self, data, recv_time):
"""
Awaiting change like:
64 bytes from 10.0.2.15: icmp_req=3 ttl=64 time=0.045 ms
ping: sendmsg: Network is unreachable
"""
self.all_data_received.append(data)
if not self.done():
if "Network is unreachable" in data:
when_detected = time.time()
self.set_result(result=when_detected)
@pytest.yield_fixture()
def connection_observer(observer_runner):
from moler.threaded_moler_connection import ThreadedMolerConnection
moler_conn = ThreadedMolerConnection()
observer = NetworkDownDetector(connection=moler_conn, runner=observer_runner)
yield observer
# remove exceptions collected inside ConnectionObserver
ConnectionObserver.get_unraised_exceptions(remove=True)
@pytest.fixture()
def net_down_detector(connection_observer): # let name say what type of observer it is
return connection_observer
@contextlib.contextmanager
def failing_net_down_detector(fail_on_data, fail_by_raising, runner):
from moler.threaded_moler_connection import ThreadedMolerConnection
class FailingNetworkDownDetector(NetworkDownDetector):
def data_received(self, data, recv_time):
if data == fail_on_data:
raise fail_by_raising
return super(FailingNetworkDownDetector, self).data_received(data, recv_time)
moler_conn = ThreadedMolerConnection()
failing_detector = FailingNetworkDownDetector(connection=moler_conn, runner=runner)
yield failing_detector
# remove exceptions collected inside ConnectionObserver
ConnectionObserver.get_unraised_exceptions(remove=True)
@pytest.fixture()
def observer_and_awaited_data(connection_observer):
awaited_data = 'ping: sendmsg: Network is unreachable'
return connection_observer, awaited_data
@pytest.fixture(scope='module', autouse=True)
def use_loud_event_loop():
from moler.asyncio_runner import LoudEventLoopPolicy
loud_policy = LoudEventLoopPolicy()
asyncio.set_event_loop_policy(loud_policy)
@pytest.yield_fixture()
def event_loop():
from moler.asyncio_runner import cancel_remaining_feeders
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
# event_loop fixture is autoloaded by @pytest.mark.asyncio decorator
# and inside some of our async tests we just submit() observer inside runner without stopping it
# so, we need to stop all submitted futures
cancel_remaining_feeders(loop)
loop.close()
|
multi_echo_server.py |
import socket
from multiprocessing import Process
import time
HOST = "localhost"
PORT = 8001
BUFFER_SIZE = 1024
def main():
#create socket, bind and set to listening mode
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# allow reused address
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST,PORT))
s.listen(2)
while True:
#accept connections and start a Process daemon for handling multiple connections
conn, addr = s.accept()
p = Process(target=handle_echo,args=(addr,conn))
p.daemon = True
p.start()
print("Started process ", p)
#echo connections back to client
def handle_echo(addr, conn):
print("Connected by ", addr)
full_data = conn.recv(BUFFER_SIZE)
conn.sendall(full_data)
conn.shutdown(socket.SHUT_RDWR)
conn.close()
if __name__ == "__main__":
main() |
train.py | # Author: Bichen Wu (bichen@berkeley.edu) 08/25/2016
"""Train"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import sys
import time
import math
import numpy as np
from six.moves import xrange
import tensorflow as tf
import threading
from config import *
from imdb import kitti
from utils.util import *
from nets import *
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', 'KITTI', """Currently only support KITTI dataset.""")
tf.app.flags.DEFINE_string('data_path', '', """Root directory of data""")
tf.app.flags.DEFINE_string('image_set', 'train', """ Can be train, trainval, val, or test""")
tf.app.flags.DEFINE_string('train_dir', '/tmp/bichen/logs/squeezeseg/train',
"""Directory where to write event logs and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000, """Maximum number of batches to run.""")
tf.app.flags.DEFINE_string('net', 'squeezeSeg', """Neural net architecture. """)
tf.app.flags.DEFINE_string('pretrained_model_path', '', """Path to the pretrained model.""")
tf.app.flags.DEFINE_integer('summary_step', 50, """Number of steps to save summary.""")
tf.app.flags.DEFINE_integer('checkpoint_step', 1000, """Number of steps to save summary.""")
tf.app.flags.DEFINE_string('gpu', '0', """gpu id.""")
tf.app.flags.DEFINE_string('classes', 'red', """Extended classes.""")
tf.app.flags.DEFINE_string('restore', 0, """Start from checkpoint""")
tf.app.flags.DEFINE_string('crf', 1, """Using CRF""")
def train():
"""Train SqueezeSeg model"""
assert FLAGS.dataset == 'KITTI', \
'Currently only support KITTI dataset'
if FLAGS.gpu == '0':
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
else:
os.environ['CUDA_VISIBLE_DEVICES'] = "" # Train only with CPU
with tf.Graph().as_default():
assert FLAGS.net == 'squeezeSeg' or FLAGS.net == 'squeezeSeg32' or FLAGS.net == 'squeezeSeg16', \
'Selected neural net architecture not supported: {}'.format(FLAGS.net)
if FLAGS.net == 'squeezeSeg':
if FLAGS.classes == 'ext':
mc = kitti_squeezeSeg_config_ext() # Added ground class
else:
mc = kitti_squeezeSeg_config() # Original training set
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = SqueezeSeg(mc)
elif FLAGS.net == 'squeezeSeg32':
if FLAGS.classes == 'ext':
mc = kitti_squeezeSeg32_config_ext() # Added ground class
else:
mc = kitti_squeezeSeg32_config() # Original training set
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
if FLAGS.crf == '1': # Using conditional random fields (CRF)
model = SqueezeSeg32(mc)
else: # Disable CRF
model = SqueezeSeg32x(mc)
elif FLAGS.net == 'squeezeSeg16':
if FLAGS.classes == 'ext':
mc = kitti_squeezeSeg16_config_ext() # Added ground class
else:
mc = kitti_squeezeSeg16_config() # Original training set
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
if FLAGS.crf == '1': # Using conditional random fields (CRF)
model = SqueezeSeg16(mc)
else: # Disable CRF
model = SqueezeSeg16x(mc)
imdb = kitti(FLAGS.image_set, FLAGS.data_path, mc)
# save model size, flops, activations by layers
with open(os.path.join(FLAGS.train_dir, 'model_metrics.txt'), 'w') as f:
f.write('Number of parameter by layer:\n')
count = 0
for c in model.model_size_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
count = 0
f.write('\nActivation size by layer:\n')
for c in model.activation_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
count = 0
f.write('\nNumber of flops by layer:\n')
for c in model.flop_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
f.close()
print ('Model statistics saved to {}.'.format(
os.path.join(FLAGS.train_dir, 'model_metrics.txt')))
def enqueue(sess, coord):
with coord.stop_on_exception():
while not coord.should_stop():
# read batch input
lidar_per_batch, lidar_mask_per_batch, label_per_batch,\
weight_per_batch = imdb.read_batch()
feed_dict = {
model.ph_keep_prob: mc.KEEP_PROB,
model.ph_lidar_input: lidar_per_batch,
model.ph_lidar_mask: lidar_mask_per_batch,
model.ph_label: label_per_batch,
model.ph_loss_weight: weight_per_batch,
}
sess.run(model.enqueue_op, feed_dict=feed_dict)
# Train form checkpoint
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt == None:
'''Creating a new Checkpoint'''
saver = tf.train.Saver(tf.all_variables(),max_to_keep=None)
summary_op = tf.summary.merge_all()
init = tf.initialize_all_variables()
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(init)
global_step=0
else:
'''Restoring Checkpoint '''
var_list=tf.all_variables()
new_var_list=[variable for variable in var_list if "recurrent_crf" not in variable.name and "conv14_prob" not in variable.name]
try:
'''Restoring all variables '''
check_point_path=ckpt.model_checkpoint_path
global_step = int(float(check_point_path.split('/')[-1].split('-')[-1]))
saver = tf.train.Saver(tf.all_variables(),max_to_keep=None)
summary_op = tf.summary.merge_all()
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
saver.restore(sess, check_point_path)
except tf.errors.InvalidArgumentError:
'''Restoring only variables with matching shapes, other variables are randomly initialized'''
print("###########Number of output channels/labels different from checkpoint. Not restoring the Recurrent CRF Layer and conv14 layer###########")
check_point_path=ckpt.model_checkpoint_path
global_step = int(float(check_point_path.split('/')[-1].split('-')[-1]))
saver = tf.train.Saver(new_var_list,max_to_keep=None)
summary_op = tf.summary.merge_all()
config=tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
saver.restore(sess, check_point_path)
'''initializing CRF parameters and conv14 layer'''
r_crf_var_list=[variable for variable in var_list if "recurrent_crf" in variable.name or "conv14_prob" in variable.name]
init_new_vars_op = tf.initialize_variables(r_crf_var_list)
sess.run(init_new_vars_op)
'''Setting up global saver'''
saver = tf.train.Saver(tf.all_variables(),max_to_keep=None)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
coord = tf.train.Coordinator()
enq_threads = []
for _ in range(mc.NUM_ENQUEUE_THREAD):
eqth = threading.Thread(target=enqueue, args=[sess, coord])
eqth.start()
enq_threads.append(eqth)
run_options = tf.RunOptions(timeout_in_ms=60000)
try:
for step in xrange(FLAGS.max_steps):
start_time = time.time()
if step % FLAGS.summary_step == 0 or step == FLAGS.max_steps-1:
op_list = [
model.lidar_input, model.lidar_mask, model.label, model.train_op,
model.loss, model.pred_cls, summary_op
]
lidar_per_batch, lidar_mask_per_batch, label_per_batch, \
_, loss_value, pred_cls, summary_str = sess.run(op_list,
options=run_options)
label_image = visualize_seg(label_per_batch[:6, :, :], mc)
pred_image = visualize_seg(pred_cls[:6, :, :], mc)
# Run evaluation on the batch
ious, _, _, _ = evaluate_iou(
label_per_batch, pred_cls*np.squeeze(lidar_mask_per_batch),
mc.NUM_CLASS)
feed_dict = {}
# Assume that class-0 is the background class
for i in range(1, mc.NUM_CLASS):
feed_dict[model.iou_summary_placeholders[i]] = ious[i]
iou_summary_list = sess.run(model.iou_summary_ops[1:], feed_dict)
# Run visualization
viz_op_list = [model.show_label, model.show_depth_img, model.show_pred]
viz_summary_list = sess.run(
viz_op_list,
feed_dict={
model.depth_image_to_show: lidar_per_batch[:6, :, :, [4]],
model.label_to_show: label_image,
model.pred_image_to_show: pred_image,
}
)
# Add summaries
summary_writer.add_summary(summary_str, step)
for sum_str in iou_summary_list:
summary_writer.add_summary(sum_str, step)
for viz_sum in viz_summary_list:
summary_writer.add_summary(viz_sum, step)
# force tensorflow to synchronise summaries
summary_writer.flush()
else:
_, loss_value = sess.run(
[model.train_op, model.loss], options=run_options)
duration = time.time() - start_time
assert not np.isnan(loss_value), \
'Model diverged. Total loss: {}, conf_loss: {}, bbox_loss: {}, ' \
'class_loss: {}'.format(loss_value, conf_loss, bbox_loss,
class_loss)
if step % 10 == 0:
num_images_per_step = mc.BATCH_SIZE
images_per_sec = num_images_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f images/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
images_per_sec, sec_per_batch))
sys.stdout.flush()
# Save the model checkpoint periodically.
if step % FLAGS.checkpoint_step == 0 or step == FLAGS.max_steps-1:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
except Exception, e:
coord.request_stop(e)
finally:
coord.request_stop()
sess.run(model.q.close(cancel_pending_enqueues=True))
coord.join(enq_threads)
def main(argv=None): # pylint: disable=unused-argument
if FLAGS.restore == '0':
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
concurrent.py | #!/usr/bin/env python3
###########################################################################
##
## Copyright (c) 2014 Adobe Systems Incorporated. All rights reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###########################################################################
import argparse
from urllib.request import urlopen
import yaml
import statistics
import os
import sys
import time
import re
from subprocess import Popen, PIPE
import traceback
from threading import Thread,Lock
import random
import bench_base
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
timesToRun = 4
def runConcurrentExperiment(name, data_dir):
global timesToRun
print("Running concurrent experiment for '" + name + "'.")
allConcurrentQueries = list(range(1,9))
cores = 143
memoryPerWorker = "20g"
tps = 1500000
def isDataFinished(concurrentQueries,d):
if not d or not isinstance(d,list) or len(d)<concurrentQueries:
return False
for thread in d:
if len(thread) < timesToRun: return False
return True
def run(concurrentQueries):
g_lock = Lock()
def threadEntry(threadNum):
def isFinished():
with g_lock:
for n in results:
if len(n) < timesToRun: return False
return True
try:
while not isFinished():
print(str(threadNum) + ": Calling query.")
result = bench_base.runQuery(name, "2014-01-01", "2014-01-07", True)
queryExecutionTime = result[2]['TimeMillis']-result[0]['TimeMillis']
print(str(threadNum) + ": Query execution time: " +
str(queryExecutionTime))
with g_lock:
results[threadNum].append(queryExecutionTime)
except:
print("Error occurred in thread.")
traceback.print_exc()
results = [[] for x in range(0,concurrentQueries)]
threads = [
Thread(target=threadEntry, args=(i,)) for i in range(0,concurrentQueries)
]
[t.start() for t in threads]
[t.join() for t in threads]
return results
outFilePath = data_dir + "/concurrent/" + name + ".yaml"
if os.path.isfile(outFilePath):
with open(outFilePath, "r") as f: data = yaml.load(f)
else: data = {}
for concurrentQueries in allConcurrentQueries:
if concurrentQueries in data and \
isDataFinished(concurrentQueries,data[concurrentQueries]):
print(" Already profiled for " + str(concurrentQueries) +
" concurrent queries, skipping.")
continue
else:
data[concurrentQueries] = {}
while not isDataFinished(concurrentQueries,data[concurrentQueries]):
try:
bench_base.restartServers()
bench_base.restartSparkContext(memoryPerWorker, cores)
# For cache.
bench_base.runQuery(name, "2014-01-01", "2014-01-07", True, tps)
data[concurrentQueries] = run(concurrentQueries)
with open(outFilePath, "w") as f:
f.write(yaml.dump(data, indent=2, default_flow_style=False))
except KeyboardInterrupt: sys.exit(-1)
except Exception:
print("Exception occurred, retrying.")
traceback.print_exc()
data[concurrentQueries] = {}
pass
return data
def getStats(data):
global timesToRun
x = []; y = []; err = []
sortedKeys = sorted(data)
minX = sortedKeys[0]; maxX = sortedKeys[-1]
minY = data[minX][0][0]; maxY = minY
for concurrentQueries in sortedKeys:
x.append(concurrentQueries)
allTimes = []
for thread in data[concurrentQueries]:
allTimes += thread[0:timesToRun]
m_data = statistics.mean(allTimes)
if m_data < minY: minY = m_data
if m_data > maxY: maxY = m_data
y.append(m_data)
err.append(statistics.stdev(allTimes))
return (x,y,err,minX,maxX,minY,maxY)
def plotConcurrent(query, data, data_dir):
fig = plt.figure()
ax = plt.subplot(111)
plt.title(query)
plt.xlabel("Concurrent Queries")
plt.ylabel("Execution Time (ms)")
(x, y, err, minX, maxX, minY, maxY) = getStats(data)
plt.errorbar(x, y, yerr=err, marker='.', color="black", ecolor="gray")
plt.axis([minX-1, maxX+1, 0, 1.02*(maxY+max(err))])
leg = ax.legend(
["Caching. 1.5M target partition size. 6 workers."],
fancybox=True
)
leg.get_frame().set_alpha(0.5)
# plt.grid()
plt.savefig(data_dir + "/concurrent/pdf/" + query + ".pdf")
plt.savefig(data_dir + "/concurrent/png/" + query + ".png")
plt.clf()
# Print stats.
def two(s): return "{:.2f}".format(s)
print(" & ".join([query, two(y[0]), two(y[1]/y[0]), two(y[7]/y[0])]) + r" \\")
parser = argparse.ArgumentParser()
parser.add_argument("--collect-data", dest="collect", action="store_true")
parser.add_argument("--create-plots", dest="plot", action="store_true")
parser.add_argument("--data-dir", dest="data_dir", type=str, default=".")
args = parser.parse_args()
queries = [
"Pageviews",
"TopPages",
"TopPagesByBrowser",
"TopPagesByPreviousTopPages",
"Revenue",
"TopReferringDomains",
"RevenueFromTopReferringDomains",
"RevenueFromTopReferringDomainsFirstVisitGoogle"
]
if args.collect:
if not os.path.isdir(args.data_dir + "/concurrent"):
os.makedirs(args.data_dir + "/concurrent")
for query in queries:
runConcurrentExperiment(query, args.data_dir)
if args.plot:
print(" & ".join(
["Query","Serial Time (ms)","2 Concurrent Slowdown","8 Concurrent Slowdown"]
) + r" \\ \hline")
if not os.path.isdir(args.data_dir + "/concurrent/pdf"):
os.makedirs(args.data_dir + "/concurrent/pdf")
if not os.path.isdir(args.data_dir + "/concurrent/png"):
os.makedirs(args.data_dir + "/concurrent/png")
for query in queries:
with open(args.data_dir + "/concurrent/" + query + ".yaml", "r") as f:
data = yaml.load(f)
plotConcurrent(query, data, args.data_dir)
|
distributed_tuple_observation.py | from multiprocessing import Process
import numpy as np
from kiox.distributed.server import KioxServer
from kiox.distributed.step_sender import StepSender
from kiox.transition_buffer import FIFOTransitionBuffer
from kiox.transition_factory import SimpleTransitionFactory
def rollout():
sender = StepSender("localhost", 8000, 1)
for i in range(1000):
observation = (
np.random.random(100).astype(np.float32),
np.random.random((3, 84, 84)).astype(np.float32),
)
action = np.random.random(4).astype(np.float32)
reward = np.random.random()
terminal = (i % 100) == 0
sender.collect(observation, action, reward, terminal)
sender.stop()
def main():
def transition_buffer_builder():
return FIFOTransitionBuffer(1000)
def transition_factory_builder():
return SimpleTransitionFactory()
# setup server
server = KioxServer(
host="localhost",
port=8000,
observation_shape=((100,), (3, 84, 84)),
action_shape=(4,),
reward_shape=(1,),
batch_size=8,
transition_buffer_builder=transition_buffer_builder,
transition_factory_builder=transition_factory_builder,
)
server.start()
# start rollout
p = Process(target=rollout)
p.start()
# wait until episode ends
p.join()
# sample mini-batch
batch = server.sample()
print(batch.observations[0].shape)
print(batch.observations[1].shape)
server.stop()
if __name__ == "__main__":
main()
|
main.py | import os
import sys
import random
import requests
from threading import Thread
client = requests.Session()
amount = 0
class astro:
if os.name == "nt":
os.system("cls")
os.system("title Astro Massban")
else:
os.system("clear")
terminal_title = f"Astro Massban"
print(f'\33]0;{terminal_title}\a', end='', flush=True)
print("\u001b[38;5;46m ╔═╗╔═╗╔╦╗╦═╗╔═╗")
print("\u001b[38;5;48m ╠═╣╚═╗ ║ ╠╦╝║ ║")
print("\u001b[38;5;50m ╩ ╩╚═╝ ╩ ╩╚═╚═╝")
print("\u001b[38;5;51m .gg/zeroday\n\n")
token = input("\u001b[38;5;46mtoken@astro# ")
guild = input("\u001b[38;5;46mguild@astro# ")
def requeststart(id, api):
headers = {"Authorization": f"Bot {astro.token}"}
s = client.put(random.choice(api), headers=headers)
if s.status_code in (200, 201, 203, 204, 205, 206, 207, 208, 210):
print(f"\u001b[38;5;46m[Astro] - Executed ID - Status code: {s.status_code}")
if s.status_code == 429:
print(f"\033[91m[Astro] - Rate limited, trying in 1 second.")
if s.status_code == 400:
print(f"\033[91m[Astro] - Bad request, or reached most non-guilded banned members.")
if __name__ == "__main__":
for id in open("ids.txt", "r"):
amount += 1
if os.name == "nt":
os.system(f"title Astro Massban - Requests: {amount}")
else:
terminal_title = f"Astro Massban - Requests: {amount}"
print(f'\33]0;{terminal_title}\a', end='', flush=True)
api = [f"https://discord.com/api/v9/guilds/{astro.guild}/bans/{id}", f"https://discordapp.com/api/v9/guilds/{astro.guild}/bans/{id}", f"https://canary.discord.com/api/v9/guilds/{astro.guild}/bans/{id}", f"https://ptb.discord.com/api/v9/guilds/{astro.guild}/bans/{id}"]
Thread(target=astro.requeststart, args=(id, api)).start()
|
windows.py | from ...third_party import WebsocketServer # type: ignore
from .configurations import ConfigManager
from .configurations import WindowConfigManager
from .diagnostics import ensure_diagnostics_panel
from .diagnostics_manager import is_severity_included
from .logging import debug
from .logging import exception_log
from .message_request_handler import MessageRequestHandler
from .panels import log_server_message
from .promise import Promise
from .protocol import DocumentUri
from .protocol import Error
from .protocol import Location
from .sessions import AbstractViewListener
from .sessions import get_plugin
from .sessions import Logger
from .sessions import Manager
from .sessions import Session
from .settings import userprefs
from .transports import create_transport
from .types import ClientConfig
from .types import matches_pattern
from .typing import Optional, Any, Dict, Deque, List, Generator, Tuple
from .url import parse_uri
from .views import extract_variables
from .views import format_diagnostic_for_panel
from .views import make_link
from .workspace import ProjectFolders
from .workspace import sorted_workspace_folders
from collections import OrderedDict
from collections import deque
from subprocess import CalledProcessError
from time import time
from weakref import ref
from weakref import WeakSet
import functools
import json
import sublime
import threading
import urllib.parse
_NO_DIAGNOSTICS_PLACEHOLDER = " No diagnostics. Well done!"
def extract_message(params: Any) -> str:
return params.get("message", "???") if isinstance(params, dict) else "???"
def set_diagnostics_count(view: sublime.View, errors: int, warnings: int) -> None:
try:
key = AbstractViewListener.TOTAL_ERRORS_AND_WARNINGS_STATUS_KEY
if userprefs().show_diagnostics_count_in_view_status:
view.set_status(key, "E: {}, W: {}".format(errors, warnings))
else:
view.erase_status(key)
except Exception:
pass
class WindowManager(Manager):
DIAGNOSTIC_PHANTOM_KEY = "lsp_diagnostic_phantom"
def __init__(
self,
window: sublime.Window,
workspace: ProjectFolders,
configs: WindowConfigManager,
) -> None:
self._window = window
self._configs = configs
self._sessions = WeakSet() # type: WeakSet[Session]
self._workspace = workspace
self._pending_listeners = deque() # type: Deque[AbstractViewListener]
self._listeners = WeakSet() # type: WeakSet[AbstractViewListener]
self._new_listener = None # type: Optional[AbstractViewListener]
self._new_session = None # type: Optional[Session]
self._diagnostic_phantom_set = None # type: Optional[sublime.PhantomSet]
self._panel_code_phantoms = None # type: Optional[sublime.PhantomSet]
self.total_error_count = 0
self.total_warning_count = 0
sublime.set_timeout(functools.partial(self._update_panel_main_thread, _NO_DIAGNOSTICS_PLACEHOLDER, []))
def get_config_manager(self) -> WindowConfigManager:
return self._configs
def on_load_project_async(self) -> None:
self.update_workspace_folders_async()
self._configs.update()
def on_post_save_project_async(self) -> None:
self.on_load_project_async()
def update_workspace_folders_async(self) -> None:
if self._workspace.update():
workspace_folders = self._workspace.get_workspace_folders()
for session in self._sessions:
session.update_folders(workspace_folders)
def enable_config_async(self, config_name: str) -> None:
self._configs.enable_config(config_name)
def disable_config_async(self, config_name: str) -> None:
self._configs.disable_config(config_name)
def open_location_async(
self,
location: Location,
session_name: Optional[str],
view: sublime.View,
flags: int = 0,
group: int = -1
) -> Promise[Optional[sublime.View]]:
for session in self.sessions(view):
if session_name is None or session_name == session.config.name:
return session.open_location_async(location, flags, group)
return Promise.resolve(None)
def register_listener_async(self, listener: AbstractViewListener) -> None:
set_diagnostics_count(listener.view, self.total_error_count, self.total_warning_count)
# Update workspace folders in case the user have changed those since window was created.
# There is no currently no notification in ST that would notify about folder changes.
self.update_workspace_folders_async()
self._pending_listeners.appendleft(listener)
if self._new_listener is None:
self._dequeue_listener_async()
def unregister_listener_async(self, listener: AbstractViewListener) -> None:
self._listeners.discard(listener)
def listeners(self) -> Generator[AbstractViewListener, None, None]:
yield from self._listeners
def listener_for_view(self, view: sublime.View) -> Optional[AbstractViewListener]:
for listener in self.listeners():
if listener.view == view:
return listener
return None
def _dequeue_listener_async(self) -> None:
listener = None # type: Optional[AbstractViewListener]
if self._new_listener is not None:
listener = self._new_listener
# debug("re-checking listener", listener)
self._new_listener = None
else:
try:
listener = self._pending_listeners.pop()
if not listener.view.is_valid():
# debug("listener", listener, "is no longer valid")
return self._dequeue_listener_async()
# debug("adding new pending listener", listener)
self._listeners.add(listener)
except IndexError:
# We have handled all pending listeners.
self._new_session = None
return
if self._new_session:
self._sessions.add(self._new_session)
self._publish_sessions_to_listener_async(listener)
if self._new_session:
if not any(self._new_session.session_views_async()):
self._sessions.discard(self._new_session)
self._new_session.end_async()
self._new_session = None
config = self._needed_config(listener.view)
if config:
# debug("found new config for listener", listener)
self._new_listener = listener
self.start_async(config, listener.view)
else:
# debug("no new config found for listener", listener)
self._new_listener = None
self._dequeue_listener_async()
def _publish_sessions_to_listener_async(self, listener: AbstractViewListener) -> None:
inside_workspace = self._workspace.contains(listener.view)
scheme = urllib.parse.urlparse(listener.get_uri()).scheme
for session in self._sessions:
if session.can_handle(listener.view, scheme, capability=None, inside_workspace=inside_workspace):
# debug("registering session", session.config.name, "to listener", listener)
try:
listener.on_session_initialized_async(session)
except Exception as ex:
message = "failed to register session {} to listener {}".format(session.config.name, listener)
exception_log(message, ex)
def window(self) -> sublime.Window:
return self._window
def sessions(self, view: sublime.View, capability: Optional[str] = None) -> Generator[Session, None, None]:
inside_workspace = self._workspace.contains(view)
sessions = list(self._sessions)
uri = view.settings().get("lsp_uri")
if not isinstance(uri, str):
return
scheme = urllib.parse.urlparse(uri).scheme
for session in sessions:
if session.can_handle(view, scheme, capability, inside_workspace):
yield session
def get_session(self, config_name: str, file_path: str) -> Optional[Session]:
return self._find_session(config_name, file_path)
def _can_start_config(self, config_name: str, file_path: str) -> bool:
return not bool(self._find_session(config_name, file_path))
def _find_session(self, config_name: str, file_path: str) -> Optional[Session]:
inside = self._workspace.contains(file_path)
for session in self._sessions:
if session.config.name == config_name and session.handles_path(file_path, inside):
return session
return None
def _needed_config(self, view: sublime.View) -> Optional[ClientConfig]:
configs = self._configs.match_view(view)
handled = False
file_name = view.file_name()
inside = self._workspace.contains(view)
for config in configs:
handled = False
for session in self._sessions:
if config.name == session.config.name and session.handles_path(file_name, inside):
handled = True
break
if not handled:
return config
return None
def start_async(self, config: ClientConfig, initiating_view: sublime.View) -> None:
config = ClientConfig.from_config(config, {})
file_path = initiating_view.file_name() or ''
if not self._can_start_config(config.name, file_path):
# debug('Already starting on this window:', config.name)
return
try:
workspace_folders = sorted_workspace_folders(self._workspace.folders, file_path)
plugin_class = get_plugin(config.name)
variables = extract_variables(self._window)
cwd = None # type: Optional[str]
if plugin_class is not None:
if plugin_class.needs_update_or_installation():
config.set_view_status(initiating_view, "installing...")
plugin_class.install_or_update()
additional_variables = plugin_class.additional_variables()
if isinstance(additional_variables, dict):
variables.update(additional_variables)
cannot_start_reason = plugin_class.can_start(self._window, initiating_view, workspace_folders, config)
if cannot_start_reason:
config.erase_view_status(initiating_view)
message = "cannot start {}: {}".format(config.name, cannot_start_reason)
self._configs.disable_config(config.name, only_for_session=True)
# Continue with handling pending listeners
self._new_session = None
sublime.set_timeout_async(self._dequeue_listener_async)
return self._window.status_message(message)
cwd = plugin_class.on_pre_start(self._window, initiating_view, workspace_folders, config)
config.set_view_status(initiating_view, "starting...")
session = Session(self, self._create_logger(config.name), workspace_folders, config, plugin_class)
if cwd:
transport_cwd = cwd # type: Optional[str]
else:
transport_cwd = workspace_folders[0].path if workspace_folders else None
transport_config = config.resolve_transport_config(variables)
transport = create_transport(transport_config, transport_cwd, session)
if plugin_class:
plugin_class.on_post_start(self._window, initiating_view, workspace_folders, config)
config.set_view_status(initiating_view, "initialize")
session.initialize_async(
variables=variables,
transport=transport,
working_directory=cwd,
init_callback=functools.partial(self._on_post_session_initialize, initiating_view)
)
self._new_session = session
except Exception as e:
message = "".join((
"Failed to start {0} - disabling for this window for the duration of the current session.\n",
"Re-enable by running \"LSP: Enable Language Server In Project\" from the Command Palette.",
"\n\n--- Error: ---\n{1}"
)).format(config.name, str(e))
exception_log("Unable to start subprocess for {}".format(config.name), e)
if isinstance(e, CalledProcessError):
print("Server output:\n{}".format(e.output.decode('utf-8', 'replace')))
self._configs.disable_config(config.name, only_for_session=True)
config.erase_view_status(initiating_view)
sublime.message_dialog(message)
# Continue with handling pending listeners
self._new_session = None
sublime.set_timeout_async(self._dequeue_listener_async)
def _on_post_session_initialize(
self, initiating_view: sublime.View, session: Session, is_error: bool = False
) -> None:
if is_error:
session.config.erase_view_status(initiating_view)
self._new_listener = None
self._new_session = None
else:
sublime.set_timeout_async(self._dequeue_listener_async)
def _create_logger(self, config_name: str) -> Logger:
logger_map = {
"panel": PanelLogger,
"remote": RemoteLogger,
}
loggers = []
for logger_type in userprefs().log_server:
if logger_type not in logger_map:
debug("Invalid logger type ({}) specified for log_server settings".format(logger_type))
continue
loggers.append(logger_map[logger_type])
if len(loggers) == 0:
return RouterLogger() # logs nothing
elif len(loggers) == 1:
return loggers[0](self, config_name)
else:
router_logger = RouterLogger()
for logger in loggers:
router_logger.append(logger(self, config_name))
return router_logger
def handle_message_request(self, session: Session, params: Any, request_id: Any) -> None:
view = self._window.active_view()
if view:
MessageRequestHandler(view, session, request_id, params, session.config.name).show()
def restart_sessions_async(self, config_name: Optional[str] = None) -> None:
self._end_sessions_async(config_name)
listeners = list(self._listeners)
self._listeners.clear()
for listener in listeners:
self.register_listener_async(listener)
def _end_sessions_async(self, config_name: Optional[str] = None) -> None:
sessions = list(self._sessions)
for session in sessions:
if config_name is None or config_name == session.config.name:
session.end_async()
self._sessions.discard(session)
def get_project_path(self, file_path: str) -> Optional[str]:
candidate = None # type: Optional[str]
for folder in self._workspace.folders:
if file_path.startswith(folder):
if candidate is None or len(folder) > len(candidate):
candidate = folder
return candidate
def should_present_diagnostics(self, uri: DocumentUri) -> Optional[str]:
scheme, path = parse_uri(uri)
if scheme != "file":
return None
if not self._workspace.contains(path):
return "not inside window folders"
view = self._window.active_view()
if not view:
return None
settings = view.settings()
if matches_pattern(path, settings.get("binary_file_patterns")):
return "matches a pattern in binary_file_patterns"
if matches_pattern(path, settings.get("file_exclude_patterns")):
return "matches a pattern in file_exclude_patterns"
if matches_pattern(path, settings.get("folder_exclude_patterns")):
return "matches a pattern in folder_exclude_patterns"
return None
def on_post_exit_async(self, session: Session, exit_code: int, exception: Optional[Exception]) -> None:
self._sessions.discard(session)
for listener in self._listeners:
listener.on_session_shutdown_async(session)
if exit_code != 0 or exception:
config = session.config
msg = "".join((
"{0} exited with status code {1}. ",
"Do you want to restart it? If you choose Cancel, it will be disabled for this window for the ",
"duration of the current session. ",
"Re-enable by running \"LSP: Enable Language Server In Project\" from the Command Palette."
)).format(config.name, exit_code)
if exception:
msg += "\n\n--- Error: ---\n{}".format(str(exception))
if sublime.ok_cancel_dialog(msg, "Restart {}".format(config.name)):
for listener in self._listeners:
self.register_listener_async(listener)
else:
self._configs.disable_config(config.name, only_for_session=True)
def plugin_unloaded(self) -> None:
"""
This is called **from the main thread** when the plugin unloads. In that case we must destroy all sessions
from the main thread. That could lead to some dict/list being mutated while iterated over, so be careful
"""
self._end_sessions_async()
def handle_server_message(self, server_name: str, message: str) -> None:
sublime.set_timeout(lambda: log_server_message(self._window, server_name, message))
def handle_log_message(self, session: Session, params: Any) -> None:
self.handle_server_message(session.config.name, extract_message(params))
def handle_stderr_log(self, session: Session, message: str) -> None:
self.handle_server_message(session.config.name, message)
def handle_show_message(self, session: Session, params: Any) -> None:
sublime.status_message("{}: {}".format(session.config.name, extract_message(params)))
def update_diagnostics_panel_async(self) -> None:
to_render = [] # type: List[str]
self.total_error_count = 0
self.total_warning_count = 0
listeners = list(self._listeners)
prephantoms = [] # type: List[Tuple[int, int, str, str]]
row = 0
max_severity = userprefs().diagnostics_panel_include_severity_level
contributions = OrderedDict(
) # type: OrderedDict[str, List[Tuple[str, Optional[int], Optional[str], Optional[str]]]]
for session in self._sessions:
local_errors, local_warnings = session.diagnostics_manager.sum_total_errors_and_warnings_async()
self.total_error_count += local_errors
self.total_warning_count += local_warnings
for (_, path), contribution in session.diagnostics_manager.filter_map_diagnostics_async(
is_severity_included(max_severity), lambda _, diagnostic: format_diagnostic_for_panel(diagnostic)):
seen = path in contributions
contributions.setdefault(path, []).extend(contribution)
if not seen:
contributions.move_to_end(path)
for path, contribution in contributions.items():
to_render.append("{}:".format(path))
row += 1
for content, offset, code, href in contribution:
to_render.append(content)
if offset is not None and code is not None and href is not None:
prephantoms.append((row, offset, code, href))
row += content.count("\n") + 1
to_render.append("") # add spacing between filenames
row += 1
for listener in listeners:
set_diagnostics_count(listener.view, self.total_error_count, self.total_warning_count)
characters = "\n".join(to_render)
if not characters:
characters = _NO_DIAGNOSTICS_PLACEHOLDER
sublime.set_timeout(functools.partial(self._update_panel_main_thread, characters, prephantoms))
def _update_panel_main_thread(self, characters: str, prephantoms: List[Tuple[int, int, str, str]]) -> None:
panel = ensure_diagnostics_panel(self._window)
if not panel or not panel.is_valid():
return
panel.run_command("lsp_update_panel", {"characters": characters})
if self._panel_code_phantoms is None:
self._panel_code_phantoms = sublime.PhantomSet(panel, "hrefs")
phantoms = [] # type: List[sublime.Phantom]
for row, col, code, href in prephantoms:
point = panel.text_point(row, col)
region = sublime.Region(point, point)
phantoms.append(sublime.Phantom(region, make_link(href, code), sublime.LAYOUT_INLINE))
self._panel_code_phantoms.update(phantoms)
def show_diagnostics_panel_async(self) -> None:
if self._window.active_panel() is None:
self._window.run_command("show_panel", {"panel": "output.diagnostics"})
class WindowRegistry(object):
def __init__(self, configs: ConfigManager) -> None:
self._windows = {} # type: Dict[int, WindowManager]
self._configs = configs
def lookup(self, window: sublime.Window) -> WindowManager:
wm = self._windows.get(window.id())
if wm:
return wm
workspace = ProjectFolders(window)
window_configs = self._configs.for_window(window)
state = WindowManager(window=window, workspace=workspace, configs=window_configs)
self._windows[window.id()] = state
return state
def listener_for_view(self, view: sublime.View) -> Optional[AbstractViewListener]:
w = view.window()
if not w:
return None
return self.lookup(w).listener_for_view(view)
def discard(self, window: sublime.Window) -> None:
self._windows.pop(window.id(), None)
class PanelLogger(Logger):
def __init__(self, manager: WindowManager, server_name: str) -> None:
self._manager = ref(manager)
self._server_name = server_name
def stderr_message(self, message: str) -> None:
"""
Not handled here as stderr messages are handled by WindowManager regardless
if this logger is enabled.
"""
pass
def log(self, message: str, params: Any) -> None:
def run_on_async_worker_thread() -> None:
nonlocal message
params_str = str(params)
if 0 < userprefs().log_max_size <= len(params_str):
params_str = '<params with {} characters>'.format(len(params_str))
message = "{}: {}".format(message, params_str)
manager = self._manager()
if manager is not None:
manager.handle_server_message(":", message)
sublime.set_timeout_async(run_on_async_worker_thread)
def outgoing_response(self, request_id: Any, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_response(">>>", request_id), params)
def outgoing_error_response(self, request_id: Any, error: Error) -> None:
if not userprefs().log_server:
return
self.log(self._format_response("~~>", request_id), error.to_lsp())
def outgoing_request(self, request_id: int, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_request("-->", method, request_id), params)
def outgoing_notification(self, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_notification(" ->", method), params)
def incoming_response(self, request_id: int, params: Any, is_error: bool) -> None:
if not userprefs().log_server:
return
direction = "<~~" if is_error else "<<<"
self.log(self._format_response(direction, request_id), params)
def incoming_request(self, request_id: Any, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_request("<--", method, request_id), params)
def incoming_notification(self, method: str, params: Any, unhandled: bool) -> None:
if not userprefs().log_server:
return
direction = "<? " if unhandled else "<- "
self.log(self._format_notification(direction, method), params)
def _format_response(self, direction: str, request_id: Any) -> str:
return "{} {} {}".format(direction, self._server_name, request_id)
def _format_request(self, direction: str, method: str, request_id: Any) -> str:
return "{} {} {}({})".format(direction, self._server_name, method, request_id)
def _format_notification(self, direction: str, method: str) -> str:
return "{} {} {}".format(direction, self._server_name, method)
class RemoteLogger(Logger):
PORT = 9981
DIRECTION_OUTGOING = 1
DIRECTION_INCOMING = 2
_ws_server = None # type: Optional[WebsocketServer]
_ws_server_thread = None # type: Optional[threading.Thread]
_last_id = 0
def __init__(self, manager: WindowManager, server_name: str) -> None:
RemoteLogger._last_id += 1
self._server_name = '{} ({})'.format(server_name, RemoteLogger._last_id)
if not RemoteLogger._ws_server:
try:
RemoteLogger._ws_server = WebsocketServer(self.PORT)
RemoteLogger._ws_server.set_fn_new_client(self._on_new_client)
RemoteLogger._ws_server.set_fn_client_left(self._on_client_left)
RemoteLogger._ws_server.set_fn_message_received(self._on_message_received)
self._start_server()
except OSError as ex:
if ex.errno == 48: # Address already in use
debug('WebsocketServer not started - address already in use')
RemoteLogger._ws_server = None
else:
raise ex
def _start_server(self) -> None:
def start_async() -> None:
if RemoteLogger._ws_server:
RemoteLogger._ws_server.run_forever()
RemoteLogger._ws_server_thread = threading.Thread(target=start_async)
RemoteLogger._ws_server_thread.start()
def _stop_server(self) -> None:
if RemoteLogger._ws_server:
RemoteLogger._ws_server.shutdown()
RemoteLogger._ws_server = None
if RemoteLogger._ws_server_thread:
RemoteLogger._ws_server_thread.join()
RemoteLogger._ws_server_thread = None
def _on_new_client(self, client: Dict, server: WebsocketServer) -> None:
"""Called for every client connecting (after handshake)."""
debug("New client connected and was given id %d" % client['id'])
# server.send_message_to_all("Hey all, a new client has joined us")
def _on_client_left(self, client: Dict, server: WebsocketServer) -> None:
"""Called for every client disconnecting."""
debug("Client(%d) disconnected" % client['id'])
def _on_message_received(self, client: Dict, server: WebsocketServer, message: str) -> None:
"""Called when a client sends a message."""
debug("Client(%d) said: %s" % (client['id'], message))
def stderr_message(self, message: str) -> None:
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'method': 'stderr',
'params': message,
'isError': True,
'direction': self.DIRECTION_INCOMING,
})
def outgoing_request(self, request_id: int, method: str, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'method': method,
'params': params,
'direction': self.DIRECTION_OUTGOING,
})
def incoming_response(self, request_id: int, params: Any, is_error: bool) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'params': params,
'direction': self.DIRECTION_INCOMING,
'isError': is_error,
})
def incoming_request(self, request_id: Any, method: str, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'method': method,
'params': params,
'direction': self.DIRECTION_INCOMING,
})
def outgoing_response(self, request_id: Any, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'params': params,
'direction': self.DIRECTION_OUTGOING,
})
def outgoing_error_response(self, request_id: Any, error: Error) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'isError': True,
'params': error.to_lsp(),
'time': round(time() * 1000),
'direction': self.DIRECTION_OUTGOING,
})
def outgoing_notification(self, method: str, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'method': method,
'params': params,
'direction': self.DIRECTION_OUTGOING,
})
def incoming_notification(self, method: str, params: Any, unhandled: bool) -> None:
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'error': 'Unhandled notification!' if unhandled else None,
'method': method,
'params': params,
'direction': self.DIRECTION_INCOMING,
})
def _broadcast_json(self, data: Dict[str, Any]) -> None:
if RemoteLogger._ws_server:
json_data = json.dumps(data, sort_keys=True, check_circular=False, separators=(',', ':'))
RemoteLogger._ws_server.send_message_to_all(json_data)
class RouterLogger(Logger):
def __init__(self) -> None:
self._loggers = [] # type: List[Logger]
def append(self, logger: Logger) -> None:
self._loggers.append(logger)
def stderr_message(self, *args: Any, **kwargs: Any) -> None:
self._foreach("stderr_message", *args, **kwargs)
def outgoing_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_response", *args, **kwargs)
def outgoing_error_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_error_response", *args, **kwargs)
def outgoing_request(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_request", *args, **kwargs)
def outgoing_notification(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_notification", *args, **kwargs)
def incoming_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_response", *args, **kwargs)
def incoming_request(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_request", *args, **kwargs)
def incoming_notification(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_notification", *args, **kwargs)
def _foreach(self, method: str, *args: Any, **kwargs: Any) -> None:
for logger in self._loggers:
getattr(logger, method)(*args, **kwargs)
|
example_test.py | import http.server
import os
import re
import socket
import ssl
from threading import Thread
import ttfw_idf
from tiny_test_fw import DUT
server_cert = '-----BEGIN CERTIFICATE-----\n' \
'MIIDXTCCAkWgAwIBAgIJAP4LF7E72HakMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n'\
'BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n'\
'aWRnaXRzIFB0eSBMdGQwHhcNMTkwNjA3MDk1OTE2WhcNMjAwNjA2MDk1OTE2WjBF\n'\
'MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n'\
'ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\n'\
'CgKCAQEAlzfCyv3mIv7TlLkObxunKfCdrJ/zgdANrsx0RBtpEPhV560hWJ0fEin0\n'\
'nIOMpJSiF9E6QsPdr6Q+eogH4XnOMU9JE+iG743N1dPfGEzJvRlyct/Ck8SswKPC\n'\
'9+VXsnOdZmUw9y/xtANbURA/TspvPzz3Avv382ffffrJGh7ooOmaZSCZFlSYHLZA\n'\
'w/XlRr0sSRbLpFGY0gXjaAV8iHHiPDYLy4kZOepjV9U51xi+IGsL4w75zuMgsHyF\n'\
'3nJeGYHgtGVBrkL0ZKG5udY0wcBjysjubDJC4iSlNiq2HD3fhs7j6CZddV2v845M\n'\
'lVKNxP0kO4Uj4D8r+5USWC8JKfAwxQIDAQABo1AwTjAdBgNVHQ4EFgQU6OE7ssfY\n'\
'IIPTDThiUoofUpsD5NwwHwYDVR0jBBgwFoAU6OE7ssfYIIPTDThiUoofUpsD5Nww\n'\
'DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAXIlHS/FJWfmcinUAxyBd\n'\
'/xd5Lu8ykeru6oaUCci+Vk9lyoMMES7lQ+b/00d5x7AcTawkTil9EWpBTPTOTraA\n'\
'lzJMQhNKmSLk0iIoTtAJtSZgUSpIIozqK6lenxQQDsHbXKU6h+u9H6KZE8YcjsFl\n'\
'6vL7sw9BVotw/VxfgjQ5OSGLgoLrdVT0z5C2qOuwOgz1c7jNiJhtMdwN+cOtnJp2\n'\
'fuBgEYyE3eeuWogvkWoDcIA8r17Ixzkpq2oJsdvZcHZPIZShPKW2SHUsl98KDemu\n'\
'y0pQyExmQUbwKE4vbFb9XuWCcL9XaOHQytyszt2DeD67AipvoBwVU7/LBOvqnsmy\n'\
'hA==\n'\
'-----END CERTIFICATE-----\n'
server_key = '-----BEGIN PRIVATE KEY-----\n'\
'MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCXN8LK/eYi/tOU\n'\
'uQ5vG6cp8J2sn/OB0A2uzHREG2kQ+FXnrSFYnR8SKfScg4yklKIX0TpCw92vpD56\n'\
'iAfhec4xT0kT6Ibvjc3V098YTMm9GXJy38KTxKzAo8L35Veyc51mZTD3L/G0A1tR\n'\
'ED9Oym8/PPcC+/fzZ999+skaHuig6ZplIJkWVJgctkDD9eVGvSxJFsukUZjSBeNo\n'\
'BXyIceI8NgvLiRk56mNX1TnXGL4gawvjDvnO4yCwfIXecl4ZgeC0ZUGuQvRkobm5\n'\
'1jTBwGPKyO5sMkLiJKU2KrYcPd+GzuPoJl11Xa/zjkyVUo3E/SQ7hSPgPyv7lRJY\n'\
'Lwkp8DDFAgMBAAECggEAfBhAfQE7mUByNbxgAgI5fot9eaqR1Nf+QpJ6X2H3KPwC\n'\
'02sa0HOwieFwYfj6tB1doBoNq7i89mTc+QUlIn4pHgIowHO0OGawomeKz5BEhjCZ\n'\
'4XeLYGSoODary2+kNkf2xY8JTfFEcyvGBpJEwc4S2VyYgRRx+IgnumTSH+N5mIKZ\n'\
'SXWNdZIuHEmkwod+rPRXs6/r+PH0eVW6WfpINEbr4zVAGXJx2zXQwd2cuV1GTJWh\n'\
'cPVOXLu+XJ9im9B370cYN6GqUnR3fui13urYbnWnEf3syvoH/zuZkyrVChauoFf8\n'\
'8EGb74/HhXK7Q2s8NRakx2c7OxQifCbcy03liUMmyQKBgQDFAob5B/66N4Q2cq/N\n'\
'MWPf98kYBYoLaeEOhEJhLQlKk0pIFCTmtpmUbpoEes2kCUbH7RwczpYko8tlKyoB\n'\
'6Fn6RY4zQQ64KZJI6kQVsjkYpcP/ihnOY6rbds+3yyv+4uPX7Eh9sYZwZMggE19M\n'\
'CkFHkwAjiwqhiiSlUxe20sWmowKBgQDEfx4lxuFzA1PBPeZKGVBTxYPQf+DSLCre\n'\
'ZFg3ZmrxbCjRq1O7Lra4FXWD3dmRq7NDk79JofoW50yD8wD7I0B7opdDfXD2idO8\n'\
'0dBnWUKDr2CAXyoLEINce9kJPbx4kFBQRN9PiGF7VkDQxeQ3kfS8CvcErpTKCOdy\n'\
'5wOwBTwJdwKBgDiTFTeGeDv5nVoVbS67tDao7XKchJvqd9q3WGiXikeELJyuTDqE\n'\
'zW22pTwMF+m3UEAxcxVCrhMvhkUzNAkANHaOatuFHzj7lyqhO5QPbh4J3FMR0X9X\n'\
'V8VWRSg+jA/SECP9koOl6zlzd5Tee0tW1pA7QpryXscs6IEhb3ns5R2JAoGAIkzO\n'\
'RmnhEOKTzDex611f2D+yMsMfy5BKK2f4vjLymBH5TiBKDXKqEpgsW0huoi8Gq9Uu\n'\
'nvvXXAgkIyRYF36f0vUe0nkjLuYAQAWgC2pZYgNLJR13iVbol0xHJoXQUHtgiaJ8\n'\
'GLYFzjHQPqFMpSalQe3oELko39uOC1CoJCHFySECgYBeycUnRBikCO2n8DNhY4Eg\n'\
'9Y3oxcssRt6ea5BZwgW2eAYi7/XqKkmxoSoOykUt3MJx9+EkkrL17bxFSpkj1tvL\n'\
'qvxn7egtsKjjgGNAxwXC4MwCvhveyUQQxtQb8AqGrGqo4jEEN0L15cnP38i2x1Uo\n'\
'muhfskWf4MABV0yTUaKcGg==\n'\
'-----END PRIVATE KEY-----\n'
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(('8.8.8.8', 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def start_https_server(ota_image_dir, server_ip, server_port):
# parser = argparse.ArgumentParser()
# parser.add_argument('-p', '--port', dest='port', type= int,
# help= "Server Port", default= 8000)
# args = parser.parse_args()
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, 'server_cert.pem')
cert_file_handle = open(server_file, 'w+')
cert_file_handle.write(server_cert)
cert_file_handle.close()
key_file = os.path.join(ota_image_dir, 'server_key.pem')
key_file_handle = open('server_key.pem', 'w+')
key_file_handle.write(server_key)
key_file_handle.close()
httpd = http.server.HTTPServer((server_ip, server_port), http.server.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_simple_ota_example(env, extra_data):
"""
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('simple_ota_example', 'examples/system/ota/simple_ota_example', dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, 'simple_ota.bin')
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('simple_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8000))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset 0x10000', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':8000/simple_ota.bin'))
dut1.write('https://' + host_ip + ':8000/simple_ota.bin')
dut1.expect('Loaded app from partition at offset 0x110000', timeout=60)
dut1.expect('Starting OTA example', timeout=30)
@ttfw_idf.idf_example_test(env_tag='Example_EthKitV1')
def test_examples_protocol_simple_ota_example_ethernet_with_spiram_config(env, extra_data):
"""
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('simple_ota_example', 'examples/system/ota/simple_ota_example', dut_class=ttfw_idf.ESP32DUT, app_config_name='spiram')
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, 'simple_ota.bin')
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('simple_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8000))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset 0x10000', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' eth ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':8000/simple_ota.bin'))
dut1.write('https://' + host_ip + ':8000/simple_ota.bin')
dut1.expect('Loaded app from partition at offset 0x110000', timeout=60)
dut1.expect('Starting OTA example', timeout=30)
@ttfw_idf.idf_example_test(env_tag='Example_Flash_Encryption_OTA')
def test_examples_protocol_simple_ota_example_with_flash_encryption(env, extra_data):
"""
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('simple_ota_example', 'examples/system/ota/simple_ota_example', dut_class=ttfw_idf.ESP32DUT, app_config_name='flash_enc')
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, 'simple_ota.bin')
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('simple_ota_bin_size', '{}KB'.format(bin_size // 1024))
# erase flash on the device
print('Erasing the flash in order to have an empty NVS key partiton')
dut1.erase_flash()
# start test
host_ip = get_my_ip()
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8000))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset 0x20000', timeout=30)
dut1.expect('Flash encryption mode is DEVELOPMENT (not secure)', timeout=10)
try:
ip_address = dut1.expect(re.compile(r' eth ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':8000/simple_ota.bin'))
dut1.write('https://' + host_ip + ':8000/simple_ota.bin')
dut1.expect('Loaded app from partition at offset 0x120000', timeout=60)
dut1.expect('Flash encryption mode is DEVELOPMENT (not secure)', timeout=10)
dut1.expect('Starting OTA example', timeout=30)
if __name__ == '__main__':
test_examples_protocol_simple_ota_example()
test_examples_protocol_simple_ota_example_ethernet_with_spiram_config()
test_examples_protocol_simple_ota_example_with_flash_encryption()
|
userThread.py | #This thread handles user operations of only 1 user
#, and is connected to the matchmaking thread and to the database thread
#The list of operations is as follows:
#userType: 0 for normal, 1 for facebook
# ID | ARGUMENTS
# 0 --- User signup | userType(fb or normal),id,name,email,password
# 1 --- User login | userType,id,name,email,password
# 2 --- Change password | newPassword
# 3 --- Forgot password | email,name
# 4 --- Confirm password change code| email,name,code
# 5 --- Start game | -
#The separator in the messages can be a space and messages are terminated with \n
#so the final form of the messages is:
# 0 0 userType id name email password
# 1 1 userType id name email password
# 2 2 newPassword
# 3 3 email,name
# 4 4 email,name,code
# 5 5
import socket,Queue
from threading import *
PORT = 11337
#This function-thread listens on a port for connections
def listener(queueToDatabase,queueToMatchMaking,setupSocket):
#Configure server Socket
setupSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#Listen on all interfaces
setupSocket.bind(('0.0.0.0',PORT))
setupSocket.setblocking(True)
while True:
setupSocket.settimeout(None)
setupSocket.listen(1)
print 'LISTENING'
replySocket,address = setupSocket.accept()
#now create a new userThread
uThread = Thread(target=userThread,args=(replySocket,queueToDatabase,queueToMatchMaking))
uThread.start()
replySocket.send('0\n')
print 'Created new user thread'
print('Listener Thread ends now')
setupSocket.close()
#dbQueue is for communicating with database thread
#matchQueue is for communicating with matchmaking thread
def userThread(replySocket,dbQueue,matchQueue,userType = None,userId = None,name = None,email = None):
answerQueue = Queue.Queue()
replySocket.settimeout(None)
while True:
message = replySocket.recv(512)
#Connection shut down on other side
if len(message) == 0:
print 'CLIENT SOCKET SHUT DOWN'
break
print "MESSAGE IS " + message
args = message.split()
#After game message
if (len(args) == 1 and args[0] != '5'):
continue
#Now check operation type
if args[0] == '0':
userType = args[1]
userId = args[2]
name = args[3]
email = args[4]
password = args[5]
#Check user type
if userType == '0':#normal user
data = {'operation':0,'answer':answerQueue,'name':name,'email':email,'password':password}
elif userType == '1':#Facebook user
data = {'operation':1,'answer':answerQueue,'id':userId,'name':name,'email':email}
elif args[0] == '1':
userType = args[1]
userId = args[2]
name = None if args[3] == '0' else args[3]
email = None if args[4] == '0' else args[4]
password = args[5]
if userType == '0':#normal user
data = {'operation':2,'answer':answerQueue,'name':name,'email':email,'password':password}
elif userType == '1':#Facebook user
data = {'operation':3,'answer':answerQueue,'id':userId}
elif args[0] == '2':
password = args[1]
data = {'operation':6,'answer':answerQueue,'name':name,'email':email,'newPass':password}
elif args[0] == '3':
email = None if args[1] == '0' else args[1]
name = None if args[2] == '0' else args[2]
data = {'operation':7,'answer':answerQueue,'name':name,'email':email}
elif args[0] == '4':
email = None if args[1] == '0' else args[1]
name = None if args[2] == '0' else args[2]
code = int(args[3])
data = {'operation':8,'answer':answerQueue,'name':name,'email':email,'code':code}
elif args[0] == '5':
if userType == '0':
data = {'operation':9,'answer':answerQueue,'name':name,'email':email}
elif userType == '1':
data = {'operation':10,'answer':answerQueue,'id':userId}
#get user data
dbQueue.put(data)
playerToken = answerQueue.get()
playerToken['type'] = userType
playerToken['socket'] = replySocket
#now send to matchmaking thread
print 'Send data to %s' % name
replySocket.send('0\n')
matchQueue.put(playerToken)
print 'Send data to match making thread'
break
#now send data
dbQueue.put(data)
result = answerQueue.get()
print 'result of operation is %r' % result
if result:
replySocket.send('0\n')
else:
replySocket.send('1\n')
#Terminate thread
print 'User Thread out'
|
train_pg_f18.py | """
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
"""
import numpy as np
import tensorflow as tf
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
# YOUR CODE HERE
with tf.variable_scope(scope):
for i in range(n_layers):
if i == 0:
x = tf.layers.dense(input_placeholder, units=size, activation=activation)
else:
x = tf.layers.dense(x, units=size, activation=activation)
output_placeholder = tf.layers.dense(x, units=output_size, activation=output_activation)
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Policy Gradient
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
tf_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
# YOUR CODE HERE
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
# YOUR_CODE_HERE
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, "policy", self.n_layers, self.size)
return sy_logits_na
else:
# YOUR_CODE_HERE
sy_mean = build_mlp(sy_ob_no, self.ac_dim, "policy", self.n_layers, self.size)
sy_logstd = tf.get_variable(name="sy_logstd", shape=[self.ac_dim], dtype=tf.float32)
return (sy_mean, sy_logstd)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_CODE_HERE
act_log = tf.nn.log_softmax(sy_logits_na, axis=1)
sy_sampled_ac = tf.squeeze(tf.multinomial(act_log, 1), -1)
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_CODE_HERE
z = tf.random_normal(shape=tf.shape(sy_mean), dtype=tf.float32)
sigma = tf.exp(sy_logstd)
sy_sampled_ac = sy_mean + sigma*z
return sy_sampled_ac
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
act_log = tf.nn.log_softmax(sy_logits_na, axis=1)
# YOUR_CODE_HERE
sy_logprob_n = tf.reduce_sum(tf.one_hot(sy_ac_na, self.ac_dim)*act_log, axis=1)
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_CODE_HERE
# sigma = tf.exp(sy_logstd)
# stand_normal = 0.5*tf.square((sy_ac_na - sy_mean)/sigma)
# pre_sum = -tf.log(sigma*np.sqrt(2*np.pi)) - stand_normal
#
# sy_logprob_n = tf.reduce_sum(pre_sum, axis=1)
pre_sum = -0.5 * (((sy_ac_na-sy_mean)/(tf.exp(sy_logstd)))**2 + 2*sy_logstd + np.log(2*np.pi))
sy_logprob_n = tf.reduce_sum(pre_sum, axis=1)
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
#========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
#========================================================================================#
loss = -tf.reduce_mean(self.sy_logprob_n*self.sy_adv_n) # YOUR CODE HERE
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
#========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
#========================================================================================#
if self.nn_baseline:
self.baseline_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size))
# YOUR_CODE_HERE
self.sy_target_n = tf.placeholder(tf.float32, shape=[None, ], name="b_target")
baseline_loss = tf.nn.l2_loss(self.baseline_prediction - self.sy_target_n)
self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
# YOUR CODE HERE
ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no: np.array([ob])})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32)}
return path
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
# YOUR_CODE_HERE
q_n = []
if self.reward_to_go:
l = 0 # 每条path的起始位置
for re in re_n:
q_n.extend([0]*len(re))
q = 0
for i in range(len(re)-1, -1, -1):
q = q*self.gamma+re[i]
q_n[l+i] = q
l += len(re)
else:
for re in re_n:
q = 0
for i in range(len(re)-1, -1, -1):
q = q*self.gamma+re[i]
q_n.extend([q]*len(re))
q_n = np.array(q_n, dtype=np.float32)
return q_n
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
#====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
b_n = self.sess.run(self.baseline_prediction, feed_dict={self.sy_ob_no: ob_no}) # YOUR CODE HERE
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
#====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
#====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
#adv_n = (adv_n - np.mean(adv_n)) / np.std(adv_n, dtype=np.float64) # YOUR_CODE_HERE
adv_n = adv_n - np.sum(adv_n)
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
p, mean, a = self.sess.run([self.sy_logprob_n, self.policy_parameters, self.sy_sampled_ac], feed_dict={self.sy_ob_no: ob_no[0:2], self.sy_ac_na: ac_na[0:2]})
#====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
#====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
# YOUR_CODE_HERE
target_n = (q_n-q_n.mean())/q_n.std()
self.sess.run(self.baseline_update_op, feed_dict={self.sy_ob_no: ob_no,
self.sy_target_n: target_n})
#====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
self.sess.run(self.update_op, feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na,
self.sy_adv_n: adv_n})
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', type=str, default='CartPole-v0')
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
ui_utils.py | # -*- coding: utf-8 -*-
from logging import getLogger
import os
import platform
import re
import subprocess
import sys
import textwrap
import threading
import time
import tkinter as tk
import tkinter.font
import traceback
from tkinter import filedialog, messagebox, ttk
from typing import Callable, List, Optional, Tuple, Union # @UnusedImport
from _tkinter import TclError
from thonny import get_workbench, misc_utils, tktextext
from thonny.common import TextRange
from thonny.languages import get_button_padding, tr
from thonny.misc_utils import (
running_on_linux,
running_on_mac_os,
running_on_rpi,
running_on_windows,
)
from thonny.tktextext import TweakableText
PARENS_REGEX = re.compile(r"[\(\)\{\}\[\]]")
logger = getLogger(__name__)
class CommonDialog(tk.Toplevel):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
self.bind("<FocusIn>", self._unlock_on_focus_in, True)
def _unlock_on_focus_in(self, event):
if not self.winfo_ismapped():
focussed_widget = self.focus_get()
self.deiconify()
if focussed_widget:
focussed_widget.focus_set()
def get_padding(self):
return ems_to_pixels(2)
def get_internal_padding(self):
return self.get_padding() // 4
class CommonDialogEx(CommonDialog):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
# Need to fill the dialog with a frame to gain theme support
self.main_frame = ttk.Frame(self)
self.main_frame.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.bind("<Escape>", self.on_close, True)
self.protocol("WM_DELETE_WINDOW", self.on_close)
def on_close(self, event=None):
self.destroy()
class QueryDialog(CommonDialogEx):
def __init__(
self,
master,
title: str,
prompt: str,
initial_value: str = "",
options: List[str] = [],
entry_width: Optional[int] = None,
):
super().__init__(master)
self.var = tk.StringVar(value=initial_value)
self.result = None
margin = self.get_padding()
spacing = margin // 2
self.title(title)
self.prompt_label = ttk.Label(self.main_frame, text=prompt)
self.prompt_label.grid(row=1, column=1, columnspan=2, padx=margin, pady=(margin, spacing))
if options:
self.entry_widget = ttk.Combobox(
self.main_frame, textvariable=self.var, values=options, height=15, width=entry_width
)
else:
self.entry_widget = ttk.Entry(self.main_frame, textvariable=self.var, width=entry_width)
self.entry_widget.bind("<Return>", self.on_ok, True)
self.entry_widget.bind("<KP_Enter>", self.on_ok, True)
self.entry_widget.grid(
row=3, column=1, columnspan=2, sticky="we", padx=margin, pady=(0, margin)
)
self.ok_button = ttk.Button(
self.main_frame, text=tr("OK"), command=self.on_ok, default="active"
)
self.ok_button.grid(row=5, column=1, padx=(margin, spacing), pady=(0, margin), sticky="e")
self.cancel_button = ttk.Button(self.main_frame, text=tr("Cancel"), command=self.on_cancel)
self.cancel_button.grid(row=5, column=2, padx=(0, margin), pady=(0, margin), sticky="e")
self.main_frame.columnconfigure(1, weight=1)
self.entry_widget.focus_set()
def on_ok(self, event=None):
self.result = self.var.get()
self.destroy()
def on_cancel(self, event=None):
self.result = None
self.destroy()
def get_result(self) -> Optional[str]:
return self.result
def ask_string(
title: str,
prompt: str,
initial_value: str = "",
options: List[str] = [],
entry_width: Optional[int] = None,
master=None,
):
dlg = QueryDialog(
master, title, prompt, initial_value=initial_value, options=options, entry_width=entry_width
)
show_dialog(dlg, master)
return dlg.get_result()
class CustomMenubar(ttk.Frame):
def __init__(self, master):
ttk.Frame.__init__(self, master, style="CustomMenubar.TFrame")
self._menus = []
self._opened_menu = None
ttk.Style().map(
"CustomMenubarLabel.TLabel",
background=[
("!active", lookup_style_option("Menubar", "background", "gray")),
("active", lookup_style_option("Menubar", "activebackground", "LightYellow")),
],
foreground=[
("!active", lookup_style_option("Menubar", "foreground", "black")),
("active", lookup_style_option("Menubar", "activeforeground", "black")),
],
)
def add_cascade(self, label, menu):
label_widget = ttk.Label(
self,
style="CustomMenubarLabel.TLabel",
text=label,
padding=[6, 3, 6, 2],
font="TkDefaultFont",
)
if len(self._menus) == 0:
padx = (6, 0)
else:
padx = 0
label_widget.grid(row=0, column=len(self._menus), padx=padx)
def enter(event):
label_widget.state(("active",))
# Don't know how to open this menu when another menu is open
# another tk_popup just doesn't work unless old menu is closed by click or Esc
# https://stackoverflow.com/questions/38081470/is-there-a-way-to-know-if-tkinter-optionmenu-dropdown-is-active
# unpost doesn't work in Win and Mac: https://www.tcl.tk/man/tcl8.5/TkCmd/menu.htm#M62
# print("ENTER", menu, self._opened_menu)
if self._opened_menu is not None:
self._opened_menu.unpost()
click(event)
def leave(event):
label_widget.state(("!active",))
def click(event):
try:
# print("Before")
self._opened_menu = menu
menu.tk_popup(
label_widget.winfo_rootx(),
label_widget.winfo_rooty() + label_widget.winfo_height(),
)
finally:
# print("After")
self._opened_menu = None
label_widget.bind("<Enter>", enter, True)
label_widget.bind("<Leave>", leave, True)
label_widget.bind("<1>", click, True)
self._menus.append(menu)
class AutomaticPanedWindow(tk.PanedWindow):
"""
Enables inserting panes according to their position_key-s.
Automatically adds/removes itself to/from its master AutomaticPanedWindow.
Fixes some style glitches.
"""
def __init__(self, master, position_key=None, preferred_size_in_pw=None, **kwargs):
tk.PanedWindow.__init__(self, master, border=0, **kwargs)
self._pane_minsize = 100
self.position_key = position_key
self._restoring_pane_sizes = False
self._last_window_size = (0, 0)
self._full_size_not_final = True
self._configure_binding = self.bind("<Configure>", self._on_window_resize, True)
self._update_appearance_binding = self.bind(
"<<ThemeChanged>>", self._update_appearance, True
)
self.bind("<B1-Motion>", self._on_mouse_dragged, True)
self._update_appearance()
# should be in the end, so that it can be detected when
# constructor hasn't completed yet
self.preferred_size_in_pw = preferred_size_in_pw
def insert(self, pos, child, **kw):
kw.setdefault("minsize", self._pane_minsize)
if pos == "auto":
# According to documentation I should use self.panes()
# but this doesn't return expected widgets
for sibling in sorted(
self.pane_widgets(),
key=lambda p: p.position_key if hasattr(p, "position_key") else 0,
):
if (
not hasattr(sibling, "position_key")
or sibling.position_key == None
or sibling.position_key > child.position_key
):
pos = sibling
break
else:
pos = "end"
if isinstance(pos, tk.Widget):
kw["before"] = pos
self.add(child, **kw)
def add(self, child, **kw):
kw.setdefault("minsize", self._pane_minsize)
tk.PanedWindow.add(self, child, **kw)
self._update_visibility()
self._check_restore_preferred_sizes()
def remove(self, child):
tk.PanedWindow.remove(self, child)
self._update_visibility()
self._check_restore_preferred_sizes()
def forget(self, child):
tk.PanedWindow.forget(self, child)
self._update_visibility()
self._check_restore_preferred_sizes()
def destroy(self):
self.unbind("<Configure>", self._configure_binding)
self.unbind("<<ThemeChanged>>", self._update_appearance_binding)
tk.PanedWindow.destroy(self)
def is_visible(self):
if not isinstance(self.master, AutomaticPanedWindow):
return self.winfo_ismapped()
else:
return self in self.master.pane_widgets()
def pane_widgets(self):
result = []
for pane in self.panes():
# pane is not the widget but some kind of reference object
assert not isinstance(pane, tk.Widget)
result.append(self.nametowidget(str(pane)))
return result
def _on_window_resize(self, event):
if event.width < 10 or event.height < 10:
return
window = self.winfo_toplevel()
window_size = (window.winfo_width(), window.winfo_height())
initializing = hasattr(window, "initializing") and window.initializing
if (
not initializing
and not self._restoring_pane_sizes
and (window_size != self._last_window_size or self._full_size_not_final)
):
self._check_restore_preferred_sizes()
self._last_window_size = window_size
def _on_mouse_dragged(self, event):
if event.widget == self and not self._restoring_pane_sizes:
self._update_preferred_sizes()
def _update_preferred_sizes(self):
for pane in self.pane_widgets():
if getattr(pane, "preferred_size_in_pw", None) is not None:
if self.cget("orient") == "horizontal":
current_size = pane.winfo_width()
else:
current_size = pane.winfo_height()
if current_size > 20:
pane.preferred_size_in_pw = current_size
# paneconfig width/height effectively puts
# unexplainable maxsize to some panes
# if self.cget("orient") == "horizontal":
# self.paneconfig(pane, width=current_size)
# else:
# self.paneconfig(pane, height=current_size)
#
# else:
# self.paneconfig(pane, width=1000, height=1000)
def _check_restore_preferred_sizes(self):
window = self.winfo_toplevel()
if getattr(window, "initializing", False):
return
try:
self._restoring_pane_sizes = True
self._restore_preferred_sizes()
finally:
self._restoring_pane_sizes = False
def _restore_preferred_sizes(self):
total_preferred_size = 0
panes_without_preferred_size = []
panes = self.pane_widgets()
for pane in panes:
if not hasattr(pane, "preferred_size_in_pw"):
# child isn't fully constructed yet
return
if pane.preferred_size_in_pw is None:
panes_without_preferred_size.append(pane)
# self.paneconfig(pane, width=1000, height=1000)
else:
total_preferred_size += pane.preferred_size_in_pw
# Without updating pane width/height attribute
# the preferred size may lose effect when squeezing
# non-preferred panes too small. Also zooming/unzooming
# changes the supposedly fixed panes ...
#
# but
# paneconfig width/height effectively puts
# unexplainable maxsize to some panes
# if self.cget("orient") == "horizontal":
# self.paneconfig(pane, width=pane.preferred_size_in_pw)
# else:
# self.paneconfig(pane, height=pane.preferred_size_in_pw)
assert len(panes_without_preferred_size) <= 1
size = self._get_size()
if size is None:
return
leftover_size = self._get_size() - total_preferred_size
used_size = 0
for i, pane in enumerate(panes[:-1]):
used_size += pane.preferred_size_in_pw or leftover_size
self._place_sash(i, used_size)
used_size += int(str(self.cget("sashwidth")))
def _get_size(self):
if self.cget("orient") == tk.HORIZONTAL:
result = self.winfo_width()
else:
result = self.winfo_height()
if result < 20:
# Not ready yet
return None
else:
return result
def _place_sash(self, i, distance):
if self.cget("orient") == tk.HORIZONTAL:
self.sash_place(i, distance, 0)
else:
self.sash_place(i, 0, distance)
def _update_visibility(self):
if not isinstance(self.master, AutomaticPanedWindow):
return
if len(self.panes()) == 0 and self.is_visible():
self.master.forget(self)
if len(self.panes()) > 0 and not self.is_visible():
self.master.insert("auto", self)
def _update_appearance(self, event=None):
self.configure(sashwidth=lookup_style_option("Sash", "sashthickness", ems_to_pixels(0.6)))
self.configure(background=lookup_style_option("TPanedWindow", "background"))
class ClosableNotebook(ttk.Notebook):
def __init__(self, master, style="ButtonNotebook.TNotebook", **kw):
super().__init__(master, style=style, **kw)
self.tab_menu = self.create_tab_menu()
self._popup_index = None
self.pressed_index = None
self.bind("<ButtonPress-1>", self._letf_btn_press, True)
self.bind("<ButtonRelease-1>", self._left_btn_release, True)
if running_on_mac_os():
self.bind("<ButtonPress-2>", self._right_btn_press, True)
self.bind("<Control-Button-1>", self._right_btn_press, True)
else:
self.bind("<ButtonPress-3>", self._right_btn_press, True)
# self._check_update_style()
def create_tab_menu(self):
menu = tk.Menu(self.winfo_toplevel(), tearoff=False, **get_style_configuration("Menu"))
menu.add_command(label=tr("Close"), command=self._close_tab_from_menu)
menu.add_command(label=tr("Close others"), command=self._close_other_tabs)
menu.add_command(label=tr("Close all"), command=self.close_tabs)
return menu
def _letf_btn_press(self, event):
try:
elem = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
if "closebutton" in elem:
self.state(["pressed"])
self.pressed_index = index
except Exception:
# may fail, if clicked outside of tab
return
def _left_btn_release(self, event):
if not self.instate(["pressed"]):
return
try:
elem = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
except Exception:
# may fail, when mouse is dragged
return
else:
if "closebutton" in elem and self.pressed_index == index:
self.close_tab(index)
self.state(["!pressed"])
finally:
self.pressed_index = None
def _right_btn_press(self, event):
try:
index = self.index("@%d,%d" % (event.x, event.y))
self._popup_index = index
self.tab_menu.tk_popup(*self.winfo_toplevel().winfo_pointerxy())
except Exception:
logger.exception("Opening tab menu")
def _close_tab_from_menu(self):
self.close_tab(self._popup_index)
def _close_other_tabs(self):
self.close_tabs(self._popup_index)
def close_tabs(self, except_index=None):
for tab_index in reversed(range(len(self.winfo_children()))):
if except_index is not None and tab_index == except_index:
continue
else:
self.close_tab(tab_index)
def close_tab(self, index):
child = self.get_child_by_index(index)
if hasattr(child, "close"):
child.close()
else:
self.forget(index)
child.destroy()
def get_child_by_index(self, index):
tab_id = self.tabs()[index]
if tab_id:
return self.nametowidget(tab_id)
else:
return None
def get_current_child(self):
child_id = self.select()
if child_id:
return self.nametowidget(child_id)
else:
return None
def focus_set(self):
editor = self.get_current_child()
if editor:
editor.focus_set()
else:
super().focus_set()
def _check_update_style(self):
style = ttk.Style()
if "closebutton" in style.element_names():
# It's done already
return
# respect if required images have been defined already
if "img_close" not in self.image_names():
img_dir = os.path.join(os.path.dirname(__file__), "res")
ClosableNotebook._close_img = tk.PhotoImage(
"img_tab_close", file=os.path.join(img_dir, "tab_close.gif")
)
ClosableNotebook._close_active_img = tk.PhotoImage(
"img_tab_close_active", file=os.path.join(img_dir, "tab_close_active.gif")
)
style.element_create(
"closebutton",
"image",
"img_tab_close",
("active", "pressed", "!disabled", "img_tab_close_active"),
("active", "!disabled", "img_tab_close_active"),
border=8,
sticky="",
)
style.layout(
"ButtonNotebook.TNotebook.Tab",
[
(
"Notebook.tab",
{
"sticky": "nswe",
"children": [
(
"Notebook.padding",
{
"side": "top",
"sticky": "nswe",
"children": [
(
"Notebook.focus",
{
"side": "top",
"sticky": "nswe",
"children": [
(
"Notebook.label",
{"side": "left", "sticky": ""},
),
(
"Notebook.closebutton",
{"side": "left", "sticky": ""},
),
],
},
)
],
},
)
],
},
)
],
)
def _check_remove_padding(self, kw):
# Windows themes produce 1-pixel padding to the bottom of the pane
# Don't know how to get rid of it using themes
if "padding" not in kw and ttk.Style().theme_use().lower() in (
"windows",
"xpnative",
"vista",
):
kw["padding"] = (0, 0, 0, -1)
def add(self, child, **kw):
self._check_remove_padding(kw)
super().add(child, **kw)
def insert(self, pos, child, **kw):
self._check_remove_padding(kw)
super().insert(pos, child, **kw)
class AutomaticNotebook(ClosableNotebook):
"""
Enables inserting views according to their position keys.
Remember its own position key. Automatically updates its visibility.
"""
def __init__(self, master, position_key, preferred_size_in_pw=None):
if get_workbench().in_simple_mode():
style = "TNotebook"
else:
style = "ButtonNotebook.TNotebook"
super().__init__(master, style=style, padding=0)
self.position_key = position_key
# should be in the end, so that it can be detected when
# constructor hasn't completed yet
self.preferred_size_in_pw = preferred_size_in_pw
def add(self, child, **kw):
super().add(child, **kw)
self._update_visibility()
def insert(self, pos, child, **kw):
if pos == "auto":
for sibling in map(self.nametowidget, self.tabs()):
if (
not hasattr(sibling, "position_key")
or sibling.position_key == None
or sibling.position_key > child.position_key
):
pos = sibling
break
else:
pos = "end"
super().insert(pos, child, **kw)
self._update_visibility()
def hide(self, tab_id):
super().hide(tab_id)
self._update_visibility()
def forget(self, tab_id):
if tab_id in self.tabs() or tab_id in self.winfo_children():
super().forget(tab_id)
self._update_visibility()
def is_visible(self):
return self in self.master.pane_widgets()
def get_visible_child(self):
for child in self.winfo_children():
if str(child) == str(self.select()):
return child
return None
def _update_visibility(self):
if not isinstance(self.master, AutomaticPanedWindow):
return
if len(self.tabs()) == 0 and self.is_visible():
self.master.remove(self)
if len(self.tabs()) > 0 and not self.is_visible():
self.master.insert("auto", self)
class TreeFrame(ttk.Frame):
def __init__(
self,
master,
columns,
displaycolumns="#all",
show_scrollbar=True,
show_statusbar=False,
borderwidth=0,
relief="flat",
**tree_kw,
):
ttk.Frame.__init__(self, master, borderwidth=borderwidth, relief=relief)
# http://wiki.tcl.tk/44444#pagetoc50f90d9a
self.vert_scrollbar = ttk.Scrollbar(
self, orient=tk.VERTICAL, style=scrollbar_style("Vertical")
)
if show_scrollbar:
self.vert_scrollbar.grid(
row=0, column=1, sticky=tk.NSEW, rowspan=2 if show_statusbar else 1
)
self.tree = ttk.Treeview(
self,
columns=columns,
displaycolumns=displaycolumns,
yscrollcommand=self.vert_scrollbar.set,
**tree_kw,
)
self.tree["show"] = "headings"
self.tree.grid(row=0, column=0, sticky=tk.NSEW)
self.vert_scrollbar["command"] = self.tree.yview
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.tree.bind("<<TreeviewSelect>>", self.on_select, "+")
self.tree.bind("<Double-Button-1>", self.on_double_click, "+")
self.error_label = ttk.Label(self.tree)
if show_statusbar:
self.statusbar = ttk.Frame(self)
self.statusbar.grid(row=1, column=0, sticky="nswe")
else:
self.statusbar = None
def _clear_tree(self):
for child_id in self.tree.get_children():
self.tree.delete(child_id)
def clear(self):
self._clear_tree()
def on_select(self, event):
pass
def on_double_click(self, event):
pass
def show_error(self, error_text):
self.error_label.configure(text=error_text)
self.error_label.grid()
def clear_error(self):
self.error_label.grid_remove()
def scrollbar_style(orientation):
# In mac ttk.Scrollbar uses native rendering unless style attribute is set
# see http://wiki.tcl.tk/44444#pagetoc50f90d9a
# Native rendering doesn't look good in dark themes
if running_on_mac_os() and get_workbench().uses_dark_ui_theme():
return orientation + ".TScrollbar"
else:
return None
def sequence_to_accelerator(sequence):
"""Translates Tk event sequence to customary shortcut string
for showing in the menu"""
if not sequence:
return ""
if not sequence.startswith("<"):
return sequence
accelerator = (
sequence.strip("<>").replace("Key-", "").replace("KeyPress-", "").replace("Control", "Ctrl")
)
# Tweaking individual parts
parts = accelerator.split("-")
# tkinter shows shift with capital letter, but in shortcuts it's customary to include it explicitly
if len(parts[-1]) == 1 and parts[-1].isupper() and not "Shift" in parts:
parts.insert(-1, "Shift")
# even when shift is not required, it's customary to show shortcut with capital letter
if len(parts[-1]) == 1:
parts[-1] = parts[-1].upper()
accelerator = "+".join(parts)
# Post processing
accelerator = (
accelerator.replace("Minus", "-")
.replace("minus", "-")
.replace("Plus", "+")
.replace("plus", "+")
)
return accelerator
def get_zoomed(toplevel):
if "-zoomed" in toplevel.wm_attributes(): # Linux
return bool(toplevel.wm_attributes("-zoomed"))
else: # Win/Mac
return toplevel.wm_state() == "zoomed"
def set_zoomed(toplevel, value):
if "-zoomed" in toplevel.wm_attributes(): # Linux
toplevel.wm_attributes("-zoomed", str(int(value)))
else: # Win/Mac
if value:
toplevel.wm_state("zoomed")
else:
toplevel.wm_state("normal")
class EnhancedTextWithLogging(tktextext.EnhancedText):
def __init__(self, master=None, style="Text", tag_current_line=False, cnf={}, **kw):
super().__init__(
master=master, style=style, tag_current_line=tag_current_line, cnf=cnf, **kw
)
self._last_event_changed_line_count = False
def direct_insert(self, index, chars, tags=None, **kw):
# try removing line numbers
# TODO: shouldn't it take place only on paste?
# TODO: does it occur when opening a file with line numbers in it?
# if self._propose_remove_line_numbers and isinstance(chars, str):
# chars = try_remove_linenumbers(chars, self)
concrete_index = self.index(index)
line_before = self.get(concrete_index + " linestart", concrete_index + " lineend")
self._last_event_changed_line_count = "\n" in chars
result = tktextext.EnhancedText.direct_insert(self, index, chars, tags=tags, **kw)
line_after = self.get(concrete_index + " linestart", concrete_index + " lineend")
trivial_for_coloring, trivial_for_parens = self._is_trivial_edit(
chars, line_before, line_after
)
get_workbench().event_generate(
"TextInsert",
index=concrete_index,
text=chars,
tags=tags,
text_widget=self,
trivial_for_coloring=trivial_for_coloring,
trivial_for_parens=trivial_for_parens,
)
return result
def direct_delete(self, index1, index2=None, **kw):
try:
# index1 may be eg "sel.first" and it doesn't make sense *after* deletion
concrete_index1 = self.index(index1)
if index2 is not None:
concrete_index2 = self.index(index2)
else:
concrete_index2 = None
chars = self.get(index1, index2)
self._last_event_changed_line_count = "\n" in chars
line_before = self.get(
concrete_index1 + " linestart",
(concrete_index1 if concrete_index2 is None else concrete_index2) + " lineend",
)
return tktextext.EnhancedText.direct_delete(self, index1, index2=index2, **kw)
finally:
line_after = self.get(
concrete_index1 + " linestart",
(concrete_index1 if concrete_index2 is None else concrete_index2) + " lineend",
)
trivial_for_coloring, trivial_for_parens = self._is_trivial_edit(
chars, line_before, line_after
)
get_workbench().event_generate(
"TextDelete",
index1=concrete_index1,
index2=concrete_index2,
text_widget=self,
trivial_for_coloring=trivial_for_coloring,
trivial_for_parens=trivial_for_parens,
)
def _is_trivial_edit(self, chars, line_before, line_after):
# line is taken after edit for insertion and before edit for deletion
if not chars.strip():
# linebreaks, including with automatic indent
# check it doesn't break a triple-quote
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = trivial_for_coloring
elif len(chars) > 1:
# paste, cut, load or something like this
trivial_for_coloring = False
trivial_for_parens = False
elif chars == "#":
trivial_for_coloring = "''''" not in line_before and '"""' not in line_before
trivial_for_parens = trivial_for_coloring and not re.search(PARENS_REGEX, line_before)
elif chars in "()[]{}":
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = False
elif chars == "'":
trivial_for_coloring = "'''" not in line_before and "'''" not in line_after
trivial_for_parens = False # can put parens into open string
elif chars == '"':
trivial_for_coloring = '"""' not in line_before and '"""' not in line_after
trivial_for_parens = False # can put parens into open string
elif chars == "\\":
# can shorten closing quote
trivial_for_coloring = '"""' not in line_before and '"""' not in line_after
trivial_for_parens = False
else:
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = trivial_for_coloring
return trivial_for_coloring, trivial_for_parens
class SafeScrollbar(ttk.Scrollbar):
def __init__(self, master=None, **kw):
super().__init__(master=master, **kw)
def set(self, first, last):
try:
ttk.Scrollbar.set(self, first, last)
except Exception:
traceback.print_exc()
class AutoScrollbar(SafeScrollbar):
# http://effbot.org/zone/tkinter-autoscrollbar.htm
# a vert_scrollbar that hides itself if it's not needed. only
# works if you use the grid geometry manager.
def __init__(self, master=None, **kw):
super().__init__(master=master, **kw)
def set(self, first, last):
if float(first) <= 0.0 and float(last) >= 1.0:
self.grid_remove()
elif float(first) > 0.001 or float(last) < 0.009:
# with >0 and <1 it occasionally made scrollbar wobble back and forth
self.grid()
ttk.Scrollbar.set(self, first, last)
def pack(self, **kw):
raise tk.TclError("cannot use pack with this widget")
def place(self, **kw):
raise tk.TclError("cannot use place with this widget")
def update_entry_text(entry, text):
original_state = entry.cget("state")
entry.config(state="normal")
entry.delete(0, "end")
entry.insert(0, text)
entry.config(state=original_state)
class VerticallyScrollableFrame(ttk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
def __init__(self, master):
ttk.Frame.__init__(self, master)
# set up scrolling with canvas
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
self.canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set)
vscrollbar.config(command=self.canvas.yview)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
vscrollbar.grid(row=0, column=1, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.interior = ttk.Frame(self.canvas)
self.interior_id = self.canvas.create_window(0, 0, window=self.interior, anchor=tk.NW)
self.bind("<Configure>", self._configure_interior, "+")
self.bind("<Expose>", self._expose, "+")
def _expose(self, event):
self.update_idletasks()
self.update_scrollbars()
def _configure_interior(self, event):
self.update_scrollbars()
def update_scrollbars(self):
# update the scrollbars to match the size of the inner frame
size = (self.canvas.winfo_width(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
if (
self.interior.winfo_reqwidth() != self.canvas.winfo_width()
and self.canvas.winfo_width() > 10
):
# update the interior's width to fit canvas
# print("CAWI", self.canvas.winfo_width())
self.canvas.itemconfigure(self.interior_id, width=self.canvas.winfo_width())
class ScrollableFrame(ttk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
def __init__(self, master):
ttk.Frame.__init__(self, master)
# set up scrolling with canvas
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
hscrollbar = ttk.Scrollbar(self, orient=tk.HORIZONTAL)
self.canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set)
vscrollbar.config(command=self.canvas.yview)
hscrollbar.config(command=self.canvas.xview)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
vscrollbar.grid(row=0, column=1, sticky=tk.NSEW)
hscrollbar.grid(row=1, column=0, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.interior = ttk.Frame(self.canvas)
self.interior.columnconfigure(0, weight=1)
self.interior.rowconfigure(0, weight=1)
self.interior_id = self.canvas.create_window(0, 0, window=self.interior, anchor=tk.NW)
self.bind("<Configure>", self._configure_interior, "+")
self.bind("<Expose>", self._expose, "+")
def _expose(self, event):
self.update_idletasks()
self._configure_interior(event)
def _configure_interior(self, event):
# update the scrollbars to match the size of the inner frame
size = (self.canvas.winfo_reqwidth(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
class ThemedListbox(tk.Listbox):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
self._ui_theme_change_binding = self.bind(
"<<ThemeChanged>>", self._reload_theme_options, True
)
self._reload_theme_options()
def _reload_theme_options(self, event=None):
style = ttk.Style()
states = []
if self["state"] == "disabled":
states.append("disabled")
# Following crashes when a combobox is focused
# if self.focus_get() == self:
# states.append("focus")
opts = {}
for key in [
"background",
"foreground",
"highlightthickness",
"highlightcolor",
"highlightbackground",
]:
value = style.lookup(self.get_style_name(), key, states)
if value:
opts[key] = value
self.configure(opts)
def get_style_name(self):
return "Listbox"
def destroy(self):
self.unbind("<<ThemeChanged>>", self._ui_theme_change_binding)
super().destroy()
class ToolTip:
"""Taken from http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml"""
def __init__(self, widget, options):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
self.options = options
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() + self.widget.winfo_height() + 2
self.tipwindow = tw = tk.Toplevel(self.widget)
if running_on_mac_os():
try:
# Must be the first thing to do after creating window
# https://wiki.tcl-lang.org/page/MacWindowStyle
tw.tk.call(
"::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "noActivates"
)
if get_tk_version_info() >= (8, 6, 10) and running_on_mac_os():
tw.wm_overrideredirect(1)
except tk.TclError:
pass
else:
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
if running_on_mac_os():
# TODO: maybe it's because of Tk 8.5, not because of Mac
tw.wm_transient(self.widget)
label = tk.Label(tw, text=self.text, **self.options)
label.pack()
# get_workbench().bind("WindowFocusOut", self.hidetip, True)
def hidetip(self, event=None):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
# get_workbench().unbind("WindowFocusOut", self.hidetip)
def create_tooltip(widget, text, **kw):
options = get_style_configuration("Tooltip").copy()
options.setdefault("background", "#ffffe0")
options.setdefault("foreground", "#000000")
options.setdefault("relief", "solid")
options.setdefault("borderwidth", 1)
options.setdefault("padx", 1)
options.setdefault("pady", 0)
options.update(kw)
toolTip = ToolTip(widget, options)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind("<Enter>", enter)
widget.bind("<Leave>", leave)
class NoteBox(CommonDialog):
def __init__(self, master=None, max_default_width=300, **kw):
super().__init__(master=master, highlightthickness=0, **kw)
self._max_default_width = max_default_width
self.wm_overrideredirect(True)
if running_on_mac_os():
# TODO: maybe it's because of Tk 8.5, not because of Mac
self.wm_transient(master)
try:
# For Mac OS
self.tk.call(
"::tk::unsupported::MacWindowStyle", "style", self._w, "help", "noActivates"
)
except tk.TclError:
pass
self._current_chars = ""
self._click_bindings = {}
self.padx = 5
self.pady = 5
self.text = TweakableText(
self,
background="#ffffe0",
borderwidth=1,
relief="solid",
undo=False,
read_only=True,
font="TkDefaultFont",
highlightthickness=0,
padx=self.padx,
pady=self.pady,
wrap="word",
)
self.text.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.text.bind("<Escape>", self.close, True)
# tk._default_root.bind_all("<1>", self._close_maybe, True)
# tk._default_root.bind_all("<Key>", self.close, True)
self.withdraw()
def clear(self):
for tag in self._click_bindings:
self.text.tag_unbind(tag, "<1>", self._click_bindings[tag])
self.text.tag_remove(tag, "1.0", "end")
self.text.direct_delete("1.0", "end")
self._current_chars = ""
self._click_bindings.clear()
def set_content(self, *items):
self.clear()
for item in items:
if isinstance(item, str):
self.text.direct_insert("1.0", item)
self._current_chars = item
else:
assert isinstance(item, (list, tuple))
chars, *props = item
if len(props) > 0 and callable(props[-1]):
tags = tuple(props[:-1])
click_handler = props[-1]
else:
tags = tuple(props)
click_handler = None
self.append_text(chars, tags, click_handler)
self.text.see("1.0")
def append_text(self, chars, tags=(), click_handler=None):
tags = tuple(tags)
if click_handler is not None:
click_tag = "click_%d" % len(self._click_bindings)
tags = tags + (click_tag,)
binding = self.text.tag_bind(click_tag, "<1>", click_handler, True)
self._click_bindings[click_tag] = binding
self.text.direct_insert("end", chars, tags)
self._current_chars += chars
def place(self, target, focus=None):
# Compute the area that will be described by this Note
focus_x = target.winfo_rootx()
focus_y = target.winfo_rooty()
focus_height = target.winfo_height()
if isinstance(focus, TextRange):
assert isinstance(target, tk.Text)
topleft = target.bbox("%d.%d" % (focus.lineno, focus.col_offset))
if focus.end_col_offset == 0:
botright = target.bbox(
"%d.%d lineend" % (focus.end_lineno - 1, focus.end_lineno - 1)
)
else:
botright = target.bbox("%d.%d" % (focus.end_lineno, focus.end_col_offset))
if topleft and botright:
focus_x += topleft[0]
focus_y += topleft[1]
focus_height = botright[1] - topleft[1] + botright[3]
elif isinstance(focus, (list, tuple)):
focus_x += focus[0]
focus_y += focus[1]
focus_height = focus[3]
elif focus is None:
pass
else:
raise TypeError("Unsupported focus")
# Compute dimensions of the note
font = self.text["font"]
if isinstance(font, str):
font = tk.font.nametofont(font)
lines = self._current_chars.splitlines()
max_line_width = 0
for line in lines:
max_line_width = max(max_line_width, font.measure(line))
width = min(max_line_width, self._max_default_width) + self.padx * 2 + 2
self.wm_geometry("%dx%d+%d+%d" % (width, 100, focus_x, focus_y + focus_height))
self.update_idletasks()
line_count = int(float(self.text.index("end")))
line_height = font.metrics()["linespace"]
self.wm_geometry(
"%dx%d+%d+%d" % (width, line_count * line_height, focus_x, focus_y + focus_height)
)
# TODO: detect the situation when note doesn't fit under
# the focus box and should be placed above
self.deiconify()
def show_note(self, *content_items: Union[str, List], target=None, focus=None) -> None:
self.set_content(*content_items)
self.place(target, focus)
def _close_maybe(self, event):
if event.widget not in [self, self.text]:
self.close(event)
def close(self, event=None):
self.withdraw()
def get_widget_offset_from_toplevel(widget):
x = 0
y = 0
toplevel = widget.winfo_toplevel()
while widget != toplevel:
x += widget.winfo_x()
y += widget.winfo_y()
widget = widget.master
return x, y
class EnhancedVar(tk.Variable):
def __init__(self, master=None, value=None, name=None, modification_listener=None):
if master is not None and not isinstance(master, (tk.Widget, tk.Wm)):
raise TypeError("First positional argument 'master' must be None, Widget or Wm")
super().__init__(master=master, value=value, name=name)
self.modified = False
self.modification_listener = modification_listener
if sys.version_info < (3, 6):
self.trace("w", self._on_write)
else:
self.trace_add("write", self._on_write)
def _on_write(self, *args):
self.modified = True
if self.modification_listener:
try:
self.modification_listener()
except Exception:
# Otherwise whole process will be brought down
# because for some reason Tk tries to call non-existing method
# on variable
get_workbench().report_exception()
class EnhancedStringVar(EnhancedVar, tk.StringVar):
pass
class EnhancedIntVar(EnhancedVar, tk.IntVar):
pass
class EnhancedBooleanVar(EnhancedVar, tk.BooleanVar):
pass
class EnhancedDoubleVar(EnhancedVar, tk.DoubleVar):
pass
def create_string_var(value, modification_listener=None) -> EnhancedStringVar:
"""Creates a tk.StringVar with "modified" attribute
showing whether the variable has been modified after creation"""
return EnhancedStringVar(None, value, None, modification_listener)
def create_int_var(value, modification_listener=None) -> EnhancedIntVar:
"""See create_string_var"""
return EnhancedIntVar(None, value, None, modification_listener)
def create_double_var(value, modification_listener=None) -> EnhancedDoubleVar:
"""See create_string_var"""
return EnhancedDoubleVar(None, value, None, modification_listener)
def create_boolean_var(value, modification_listener=None) -> EnhancedBooleanVar:
"""See create_string_var"""
return EnhancedBooleanVar(None, value, None, modification_listener)
def shift_is_pressed(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0001
def caps_lock_is_on(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0002
def control_is_pressed(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0004
def modifier_is_pressed(event_state: int) -> bool:
return event_state != 0 and event_state != 0b10000
def sequence_to_event_state_and_keycode(sequence: str) -> Optional[Tuple[int, int]]:
# remember handlers for certain shortcuts which require
# different treatment on non-latin keyboards
if sequence[0] != "<":
return None
parts = sequence.strip("<").strip(">").split("-")
# support only latin letters for now
if parts[-1].lower() not in list("abcdefghijklmnopqrstuvwxyz"):
return None
letter = parts.pop(-1)
if "Key" in parts:
parts.remove("Key")
if "key" in parts:
parts.remove("key")
modifiers = {part.lower() for part in parts}
if letter.isupper():
modifiers.add("shift")
if modifiers not in [{"control"}, {"control", "shift"}]:
# don't support others for now
return None
event_state = 0
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# https://stackoverflow.com/questions/32426250/python-documentation-and-or-lack-thereof-e-g-keyboard-event-state
for modifier in modifiers:
if modifier == "shift":
event_state |= 0x0001
elif modifier == "control":
event_state |= 0x0004
else:
# unsupported modifier
return None
# for latin letters keycode is same as its ascii code
return (event_state, ord(letter.upper()))
def select_sequence(win_version, mac_version, linux_version=None):
if running_on_windows():
return win_version
elif running_on_mac_os():
return mac_version
elif running_on_linux() and linux_version:
return linux_version
else:
return win_version
def try_remove_linenumbers(text, master):
try:
if has_line_numbers(text) and messagebox.askyesno(
title="Remove linenumbers",
message="Do you want to remove linenumbers from pasted text?",
default=messagebox.YES,
master=master,
):
return remove_line_numbers(text)
else:
return text
except Exception:
traceback.print_exc()
return text
def has_line_numbers(text):
lines = text.splitlines()
return len(lines) > 2 and all([len(split_after_line_number(line)) == 2 for line in lines])
def split_after_line_number(s):
parts = re.split(r"(^\s*\d+\.?)", s)
if len(parts) == 1:
return parts
else:
assert len(parts) == 3 and parts[0] == ""
return parts[1:]
def remove_line_numbers(s):
cleaned_lines = []
for line in s.splitlines():
parts = split_after_line_number(line)
if len(parts) != 2:
return s
else:
cleaned_lines.append(parts[1])
return textwrap.dedent(("\n".join(cleaned_lines)) + "\n")
def center_window(win, master=None):
# for backward compat
return assign_geometry(win, master)
def assign_geometry(win, master=None, min_left=0, min_top=0):
if master is None:
master = tk._default_root
size = get_workbench().get_option(get_size_option_name(win))
if size:
width, height = size
saved_size = True
else:
fallback_width = 600
fallback_height = 400
# need to wait until size is computed
# (unfortunately this causes dialog to jump)
if getattr(master, "initializing", False):
# can't get reliable positions when main window is not in mainloop yet
width = fallback_width
height = fallback_height
else:
if not running_on_linux():
# better to avoid in Linux because it causes ugly jump
win.update_idletasks()
# looks like it doesn't take window border into account
width = win.winfo_width()
height = win.winfo_height()
if width < 10:
# ie. size measurement is not correct
width = fallback_width
height = fallback_height
saved_size = False
left = master.winfo_rootx() + master.winfo_width() // 2 - width // 2
top = master.winfo_rooty() + master.winfo_height() // 2 - height // 2
left = max(left, min_left)
top = max(top, min_top)
if saved_size:
win.geometry("%dx%d+%d+%d" % (width, height, left, top))
else:
win.geometry("+%d+%d" % (left, top))
class WaitingDialog(CommonDialog):
def __init__(self, master, async_result, description, title="Please wait!", timeout=None):
self._async_result = async_result
super().__init__(master)
if misc_utils.running_on_mac_os():
self.configure(background="systemSheetBackground")
self.title(title)
self.resizable(height=tk.FALSE, width=tk.FALSE)
# self.protocol("WM_DELETE_WINDOW", self._close)
self.desc_label = ttk.Label(self, text=description, wraplength=300)
self.desc_label.grid(padx=20, pady=20)
self.update_idletasks()
self.timeout = timeout
self.start_time = time.time()
self.after(500, self._poll)
def _poll(self):
if self._async_result.ready():
self._close()
elif self.timeout and time.time() - self.start_time > self.timeout:
raise TimeoutError()
else:
self.after(500, self._poll)
self.desc_label["text"] = self.desc_label["text"] + "."
def _close(self):
self.destroy()
def run_with_waiting_dialog(master, action, args=(), description="Working"):
# http://stackoverflow.com/a/14299004/261181
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=1)
async_result = pool.apply_async(action, args)
dlg = WaitingDialog(master, async_result, description=description)
show_dialog(dlg, master)
return async_result.get()
class FileCopyDialog(CommonDialog):
def __init__(self, master, source, destination, description=None, fsync=True):
self._source = source
self._destination = destination
self._old_bytes_copied = 0
self._bytes_copied = 0
self._fsync = fsync
self._done = False
self._cancelled = False
self._closed = False
super().__init__(master)
main_frame = ttk.Frame(self) # To get styled background
main_frame.grid(row=0, column=0, sticky="nsew")
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.title(tr("Copying"))
if description is None:
description = tr("Copying\n %s\nto\n %s") % (source, destination)
label = ttk.Label(main_frame, text=description)
label.grid(row=0, column=0, columnspan=2, sticky="nw", padx=15, pady=15)
self._bar = ttk.Progressbar(main_frame, maximum=os.path.getsize(source), length=200)
self._bar.grid(row=1, column=0, columnspan=2, sticky="nsew", padx=15, pady=0)
self._cancel_button = ttk.Button(main_frame, text=tr("Cancel"), command=self._cancel)
self._cancel_button.grid(row=2, column=1, sticky="ne", padx=15, pady=15)
self._bar.focus_set()
main_frame.columnconfigure(0, weight=1)
self._update_progress()
self.bind("<Escape>", self._cancel, True) # escape-close only if process has completed
self.protocol("WM_DELETE_WINDOW", self._cancel)
self._start()
def _start(self):
def work():
self._copy_progess = 0
with open(self._source, "rb") as fsrc:
with open(self._destination, "wb") as fdst:
while True:
buf = fsrc.read(16 * 1024)
if not buf:
break
fdst.write(buf)
fdst.flush()
if self._fsync:
os.fsync(fdst)
self._bytes_copied += len(buf)
self._done = True
threading.Thread(target=work, daemon=True).start()
def _update_progress(self):
if self._done:
if not self._closed:
self._close()
return
self._bar.step(self._bytes_copied - self._old_bytes_copied)
self._old_bytes_copied = self._bytes_copied
self.after(100, self._update_progress)
def _close(self):
self.destroy()
self._closed = True
def _cancel(self, event=None):
self._cancelled = True
self._close()
class ChoiceDialog(CommonDialogEx):
def __init__(
self,
master=None,
title="Choose one",
question: str = "Choose one:",
choices=[],
initial_choice_index=None,
) -> None:
super().__init__(master=master)
self.title(title)
self.resizable(False, False)
self.main_frame.columnconfigure(0, weight=1)
row = 0
question_label = ttk.Label(self.main_frame, text=question)
question_label.grid(row=row, column=0, columnspan=2, sticky="w", padx=20, pady=20)
row += 1
self.var = tk.StringVar("")
if initial_choice_index is not None:
self.var.set(choices[initial_choice_index])
for choice in choices:
rb = ttk.Radiobutton(self.main_frame, text=choice, variable=self.var, value=choice)
rb.grid(row=row, column=0, columnspan=2, sticky="w", padx=20)
row += 1
ok_button = ttk.Button(self.main_frame, text=tr("OK"), command=self._ok, default="active")
ok_button.grid(row=row, column=0, sticky="e", pady=20)
cancel_button = ttk.Button(self.main_frame, text=tr("Cancel"), command=self._cancel)
cancel_button.grid(row=row, column=1, sticky="e", padx=20, pady=20)
self.bind("<Escape>", self._cancel, True)
self.bind("<Return>", self._ok, True)
self.protocol("WM_DELETE_WINDOW", self._cancel)
def _ok(self):
self.result = self.var.get()
if not self.result:
self.result = None
self.destroy()
def _cancel(self):
self.result = None
self.destroy()
class LongTextDialog(CommonDialog):
def __init__(self, title, text_content, parent=None):
if parent is None:
parent = tk._default_root
super().__init__(master=parent)
self.title(title)
main_frame = ttk.Frame(self)
main_frame.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
default_font = tk.font.nametofont("TkDefaultFont")
self._text = tktextext.TextFrame(
main_frame,
read_only=True,
wrap="none",
font=default_font,
width=80,
height=10,
relief="sunken",
borderwidth=1,
)
self._text.grid(row=1, column=0, columnspan=2, sticky="nsew", padx=20, pady=20)
self._text.text.direct_insert("1.0", text_content)
self._text.text.see("1.0")
copy_button = ttk.Button(
main_frame, command=self._copy, text=tr("Copy to clipboard"), width=20
)
copy_button.grid(row=2, column=0, sticky="w", padx=20, pady=(0, 20))
close_button = ttk.Button(
main_frame, command=self._close, text=tr("Close"), default="active"
)
close_button.grid(row=2, column=1, sticky="w", padx=20, pady=(0, 20))
close_button.focus_set()
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
self.protocol("WM_DELETE_WINDOW", self._close)
self.bind("<Escape>", self._close, True)
def _copy(self, event=None):
self.clipboard_clear()
self.clipboard_append(self._text.text.get("1.0", "end"))
def _close(self, event=None):
self.destroy()
def ask_one_from_choices(
master=None,
title="Choose one",
question: str = "Choose one:",
choices=[],
initial_choice_index=None,
):
dlg = ChoiceDialog(master, title, question, choices, initial_choice_index)
show_dialog(dlg, master)
return dlg.result
def get_busy_cursor():
if running_on_windows():
return "wait"
elif running_on_mac_os():
return "spinning"
else:
return "watch"
def get_tk_version_str():
return tk._default_root.tk.call("info", "patchlevel")
def get_tk_version_info():
result = []
for part in get_tk_version_str().split("."):
try:
result.append(int(part))
except Exception:
result.append(0)
return tuple(result)
def get_style_configuration(style_name, default={}):
style = ttk.Style()
# NB! style.configure seems to reuse the returned dict
# Don't change it without copying first
result = style.configure(style_name)
if result is None:
return default
else:
return result
def lookup_style_option(style_name, option_name, default=None):
style = ttk.Style()
setting = style.lookup(style_name, option_name)
if setting in [None, ""]:
return default
elif setting == "True":
return True
elif setting == "False":
return False
else:
return setting
def scale(value):
return get_workbench().scale(value)
def open_path_in_system_file_manager(path):
if running_on_mac_os():
# http://stackoverflow.com/a/3520693/261181
# -R doesn't allow showing hidden folders
subprocess.Popen(["open", path])
elif running_on_linux():
subprocess.Popen(["xdg-open", path])
else:
assert running_on_windows()
subprocess.Popen(["explorer", path])
def _get_dialog_provider():
if platform.system() != "Linux" or get_workbench().get_option("file.avoid_zenity"):
return filedialog
import shutil
if shutil.which("zenity"):
return _ZenityDialogProvider
# fallback
return filedialog
def asksaveasfilename(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getSaveFile.htm
_check_dialog_parent(options)
return _get_dialog_provider().asksaveasfilename(**options)
def askopenfilename(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_check_dialog_parent(options)
return _get_dialog_provider().askopenfilename(**options)
def askopenfilenames(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_check_dialog_parent(options)
return _get_dialog_provider().askopenfilenames(**options)
def askdirectory(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/chooseDirectory.htm
_check_dialog_parent(options)
return _get_dialog_provider().askdirectory(**options)
def _check_dialog_parent(options):
if options.get("parent") and options.get("master"):
parent = options["parent"].winfo_toplevel()
master = options["master"].winfo_toplevel()
if parent is not master:
logger.warning(
"Dialog with different parent/master toplevels:\n%s",
"".join(traceback.format_stack()),
)
elif options.get("parent"):
parent = options["parent"].winfo_toplevel()
master = options["parent"].winfo_toplevel()
elif options.get("master"):
parent = options["master"].winfo_toplevel()
master = options["master"].winfo_toplevel()
else:
logger.warning("Dialog without parent:\n%s", "".join(traceback.format_stack()))
parent = tk._default_root
master = tk._default_root
options["parent"] = parent
options["master"] = master
if running_on_mac_os():
# used to require master/parent (https://bugs.python.org/issue34927)
# but this is deprecated in Catalina (https://github.com/thonny/thonny/issues/840)
# TODO: Consider removing this when upgrading from Tk 8.6.8
del options["master"]
del options["parent"]
class _ZenityDialogProvider:
# https://www.writebash.com/bash-gui/zenity-create-file-selection-dialog-224.html
# http://linux.byexamples.com/archives/259/a-complete-zenity-dialog-examples-1/
# http://linux.byexamples.com/archives/265/a-complete-zenity-dialog-examples-2/
# another possibility is to use PyGobject: https://github.com/poulp/zenipy
@classmethod
def askopenfilename(cls, **options):
args = cls._convert_common_options("Open file", **options)
return cls._call(args)
@classmethod
def askopenfilenames(cls, **options):
args = cls._convert_common_options("Open files", **options)
return cls._call(args + ["--multiple"]).split("|")
@classmethod
def asksaveasfilename(cls, **options):
args = cls._convert_common_options("Save as", **options)
args.append("--save")
if options.get("confirmoverwrite", True):
args.append("--confirm-overwrite")
filename = cls._call(args)
if not filename:
return None
if "defaultextension" in options and "." not in os.path.basename(filename):
filename += options["defaultextension"]
return filename
@classmethod
def askdirectory(cls, **options):
args = cls._convert_common_options("Select directory", **options)
args.append("--directory")
return cls._call(args)
@classmethod
def _convert_common_options(cls, default_title, **options):
args = ["--file-selection", "--title=%s" % options.get("title", default_title)]
filename = _options_to_zenity_filename(options)
if filename:
args.append("--filename=%s" % filename)
parent = options.get("parent", options.get("master", None))
if parent is not None:
args.append("--modal")
args.append("--attach=%s" % hex(parent.winfo_id()))
for desc, pattern in options.get("filetypes", ()):
# zenity requires star before extension
pattern = pattern.replace(" .", " *.")
if pattern.startswith("."):
pattern = "*" + pattern
if pattern == "*.*":
# ".*" was provided to make the pattern safe for Tk dialog
# not required with Zenity
pattern = "*"
args.append("--file-filter=%s | %s" % (desc, pattern))
return args
@classmethod
def _call(cls, args):
args = ["zenity", "--name=Thonny", "--class=Thonny"] + args
result = subprocess.run(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
if result.returncode == 0:
return result.stdout.strip()
else:
# TODO: log problems
print(result.stderr, file=sys.stderr)
# could check stderr, but it may contain irrelevant warnings
return None
def _options_to_zenity_filename(options):
if options.get("initialdir"):
if options.get("initialfile"):
return os.path.join(options["initialdir"], options["initialfile"])
else:
return options["initialdir"] + os.path.sep
return None
def register_latin_shortcut(
registry, sequence: str, handler: Callable, tester: Optional[Callable]
) -> None:
res = sequence_to_event_state_and_keycode(sequence)
if res is not None:
if res not in registry:
registry[res] = []
registry[res].append((handler, tester))
def handle_mistreated_latin_shortcuts(registry, event):
# tries to handle Ctrl+LatinLetter shortcuts
# given from non-Latin keyboards
# See: https://bitbucket.org/plas/thonny/issues/422/edit-keyboard-shortcuts-ctrl-c-ctrl-v-etc
# only consider events with Control held
if not event.state & 0x04:
return
if running_on_mac_os():
return
# consider only part of the state,
# because at least on Windows, Ctrl-shortcuts' state
# has something extra
simplified_state = 0x04
if shift_is_pressed(event.state):
simplified_state |= 0x01
# print(simplified_state, event.keycode)
if (simplified_state, event.keycode) in registry:
if event.keycode != ord(event.char) and event.keysym in (None, "??"):
# keycode and char doesn't match,
# this means non-latin keyboard
for handler, tester in registry[(simplified_state, event.keycode)]:
if tester is None or tester():
handler()
def show_dialog(dlg, master=None, geometry=True, min_left=0, min_top=0):
if getattr(dlg, "closed", False):
return
if master is None:
master = getattr(dlg, "parent", None) or getattr(dlg, "master", None) or tk._default_root
master = master.winfo_toplevel()
get_workbench().event_generate("WindowFocusOut")
# following order seems to give most smooth appearance
focused_widget = master.focus_get()
dlg.transient(master.winfo_toplevel())
if geometry:
# dlg.withdraw() # unfortunately inhibits size calculations in assign_geometry
if isinstance(geometry, str):
dlg.geometry(geometry)
else:
assign_geometry(dlg, master, min_left, min_top)
# dlg.wm_deiconify()
dlg.lift()
dlg.focus_set()
try:
dlg.grab_set()
except TclError as e:
print("Can't grab:", e, file=sys.stderr)
master.winfo_toplevel().wait_window(dlg)
dlg.grab_release()
master.winfo_toplevel().lift()
master.winfo_toplevel().focus_force()
master.winfo_toplevel().grab_set()
if running_on_mac_os():
master.winfo_toplevel().grab_release()
if focused_widget is not None:
try:
focused_widget.focus_force()
except TclError:
pass
def popen_with_ui_thread_callback(*Popen_args, on_completion, poll_delay=0.1, **Popen_kwargs):
if "encoding" not in Popen_kwargs:
if "env" not in Popen_kwargs:
Popen_kwargs["env"] = os.environ.copy()
Popen_kwargs["env"]["PYTHONIOENCODING"] = "utf-8"
if sys.version_info >= (3, 6):
Popen_kwargs["encoding"] = "utf-8"
proc = subprocess.Popen(*Popen_args, **Popen_kwargs)
# Need to read in thread in order to avoid blocking because
# of full pipe buffer (see https://bugs.python.org/issue1256)
out_lines = []
err_lines = []
def read_stream(stream, target_list):
while True:
line = stream.readline()
if line:
target_list.append(line)
else:
break
t_out = threading.Thread(target=read_stream, daemon=True, args=(proc.stdout, out_lines))
t_err = threading.Thread(target=read_stream, daemon=True, args=(proc.stderr, err_lines))
t_out.start()
t_err.start()
def poll():
if proc.poll() is not None:
t_out.join(3)
t_err.join(3)
on_completion(proc, out_lines, err_lines)
return
tk._default_root.after(int(poll_delay * 1000), poll)
poll()
return proc
class MenuEx(tk.Menu):
def __init__(self, target):
self._testers = {}
super().__init__(
target, tearoff=False, postcommand=self.on_post, **get_style_configuration("Menu")
)
def on_post(self, *args):
self.update_item_availability()
def update_item_availability(self):
for i in range(self.index("end") + 1):
item_data = self.entryconfigure(i)
if "label" in item_data:
tester = self._testers.get(item_data["label"])
if tester and not tester():
self.entryconfigure(i, state=tk.DISABLED)
else:
self.entryconfigure(i, state=tk.NORMAL)
def add(self, itemType, cnf={}, **kw):
cnf = cnf or kw
tester = cnf.get("tester")
if "tester" in cnf:
del cnf["tester"]
super().add(itemType, cnf)
itemdata = self.entryconfigure(self.index("end"))
labeldata = itemdata.get("label")
if labeldata:
self._testers[labeldata] = tester
class TextMenu(MenuEx):
def __init__(self, target):
self.text = target
MenuEx.__init__(self, target)
self.add_basic_items()
self.add_extra_items()
def add_basic_items(self):
self.add_command(label=tr("Cut"), command=self.on_cut, tester=self.can_cut)
self.add_command(label=tr("Copy"), command=self.on_copy, tester=self.can_copy)
self.add_command(label=tr("Paste"), command=self.on_paste, tester=self.can_paste)
def add_extra_items(self):
self.add_separator()
self.add_command(label=tr("Select All"), command=self.on_select_all)
def on_cut(self):
self.text.event_generate("<<Cut>>")
def on_copy(self):
self.text.event_generate("<<Copy>>")
def on_paste(self):
self.text.event_generate("<<Paste>>")
def on_select_all(self):
self.text.event_generate("<<SelectAll>>")
def can_cut(self):
return self.get_selected_text() and not self.selection_is_read_only()
def can_copy(self):
return self.get_selected_text()
def can_paste(self):
return not self.selection_is_read_only()
def get_selected_text(self):
try:
return self.text.get("sel.first", "sel.last")
except TclError:
return ""
def selection_is_read_only(self):
if hasattr(self.text, "is_read_only"):
return self.text.is_read_only()
return False
def create_url_label(master, url, text=None):
import webbrowser
return create_action_label(master, text or url, lambda _: webbrowser.open(url))
def create_action_label(master, text, click_handler, **kw):
url_font = tkinter.font.nametofont("TkDefaultFont").copy()
url_font.configure(underline=1)
url_label = ttk.Label(
master, text=text, style="Url.TLabel", cursor="hand2", font=url_font, **kw
)
url_label.bind("<Button-1>", click_handler)
return url_label
def get_size_option_name(window):
return "layout." + type(window).__name__ + "_size"
def get_default_theme():
if running_on_windows():
return "Windows"
elif running_on_rpi():
return "Raspberry Pi"
else:
return "Enhanced Clam"
def get_default_basic_theme():
if running_on_windows():
return "vista"
else:
return "clam"
EM_WIDTH = None
def ems_to_pixels(x):
global EM_WIDTH
if EM_WIDTH is None:
EM_WIDTH = tkinter.font.nametofont("TkDefaultFont").measure("m")
return int(EM_WIDTH * x)
_btn_padding = None
def set_text_if_different(widget, text) -> bool:
if widget["text"] != text:
widget["text"] = text
return True
else:
return False
def tr_btn(s):
"""Translates button caption, adds padding to make sure text fits"""
global _btn_padding
if _btn_padding is None:
_btn_padding = get_button_padding()
return _btn_padding + tr(s) + _btn_padding
def add_messagebox_parent_checker():
def wrap_with_parent_checker(original):
def wrapper(*args, **options):
_check_dialog_parent(options)
return original(*args, **options)
return wrapper
from tkinter import messagebox
for name in [
"showinfo",
"showwarning",
"showerror",
"askquestion",
"askokcancel",
"askyesno",
"askyesnocancel",
"askretrycancel",
]:
fun = getattr(messagebox, name)
setattr(messagebox, name, wrap_with_parent_checker(fun))
if __name__ == "__main__":
root = tk.Tk()
|
electronic_control_unit.py | import logging
import can
from can import Listener
import time
import threading
try:
# Python27
import Queue as queue
except ImportError:
# Python35
import queue
import j1939
logger = logging.getLogger(__name__)
class ElectronicControlUnit(object):
"""ElectronicControlUnit (ECU) holding one or more ControllerApplications (CAs)."""
class ConnectionMode(object):
RTS = 16
CTS = 17
EOM_ACK = 19
BAM = 32
ABORT = 255
class ConnectionAbortReason(object):
BUSY = 1 # Already in one or more connection managed sessions and cannot support another
RESOURCES = 2 # System resources were needed for another task so this connection managed session was terminated
TIMEOUT = 3 # A timeout occured
# 4..250 Reserved by SAE
CTS_WHILE_DT = 4 # according AUTOSAR: CTS messages received when data transfer is in progress
# 251..255 Per J1939/71 definitions - but there are none?
class Timeout(object):
"""Timeouts according SAE J1939/21"""
Tr = 0.200 # Response Time
Th = 0.500 # Holding Time
T1 = 0.750
T2 = 1.250
T3 = 1.250
T4 = 1.050
# timeout for multi packet broadcast messages 50..200ms
Tb = 0.050
class SendBufferState(object):
WAITING_CTS = 0 # waiting for CTS
SENDING_IN_CTS = 1 # sending packages (temporary state)
SENDING_BM = 2 # sending broadcast packages
def __init__(self, bus=None):
"""
:param can.BusABC bus:
A python-can bus instance to re-use.
"""
#: A python-can :class:`can.BusABC` instance
self._bus = bus
# Locking object for send
self._send_lock = threading.Lock()
#: Includes at least MessageListener.
self._listeners = [MessageListener(self)]
self._notifier = None
self._subscribers = []
# List of ControllerApplication
self._cas = []
# Receive buffers
self._rcv_buffer = {}
# Send buffers
self._snd_buffer = {}
# List of timer events the job thread should care of
self._timer_events = []
self._job_thread_end = threading.Event()
logger.info("Starting ECU async thread")
self._job_thread_wakeup_queue = queue.Queue()
self._job_thread = threading.Thread(target=self._async_job_thread, name='j1939.ecu job_thread')
# A thread can be flagged as a "daemon thread". The significance of
# this flag is that the entire Python program exits when only daemon
# threads are left.
self._job_thread.daemon = True
self._job_thread.start()
# TODO: do we have to stop the tread somehow?
def _async_job_thread(self):
"""Asynchronous thread for handling various jobs
This Thread handles various tasks:
- Event trigger for associated CAs
- Timeout monitoring of communication objects
To construct a blocking wait with timeout the task waits on a
queue-object. When other tasks are adding timer-events they can
wakeup the timeout handler to recalculate the new sleep-time
to awake at the new events.
"""
while not self._job_thread_end.isSet():
now = time.time()
next_wakeup = time.time() + 5.0 # wakeup in 5 seconds
# check receive buffers for timeout
# using "list(x)" to prevent "RuntimeError: dictionary changed size during iteration"
for bufid in list(self._rcv_buffer):
buf = self._rcv_buffer[bufid]
if buf['deadline'] != 0:
if buf['deadline'] > now:
if next_wakeup > buf['deadline']:
next_wakeup = buf['deadline']
else:
# deadline reached
logger.info("Deadline reached for rcv_buffer src 0x%02X dst 0x%02X", buf['src_address'], buf['dest_address'] )
if buf['dest_address'] != j1939.ParameterGroupNumber.Address.GLOBAL:
# TODO: should we handle retries?
self.send_tp_abort(buf['dest_address'], buf['src_address'], ElectronicControlUnit.ConnectionAbortReason.TIMEOUT, buf['pgn'])
# TODO: should we notify our CAs about the cancelled transfer?
del self._rcv_buffer[bufid]
# check send buffers
# using "list(x)" to prevent "RuntimeError: dictionary changed size during iteration"
for bufid in list(self._snd_buffer):
buf = self._snd_buffer[bufid]
if buf['deadline'] != 0:
if buf['deadline'] > now:
if next_wakeup > buf['deadline']:
next_wakeup = buf['deadline']
else:
# deadline reached
if buf['state'] == ElectronicControlUnit.SendBufferState.WAITING_CTS:
logger.info("Deadline WAITING_CTS reached for snd_buffer src 0x%02X dst 0x%02X", buf['src_address'], buf['dest_address'] )
self.send_tp_abort(buf['src_address'], buf['dest_address'], ElectronicControlUnit.ConnectionAbortReason.TIMEOUT, buf['pgn'])
# TODO: should we notify our CAs about the cancelled transfer?
del self._snd_buffer[bufid]
elif buf['state'] == ElectronicControlUnit.SendBufferState.SENDING_IN_CTS:
# do not care about deadlines while sending (from within other function)
# TODO: maybe we can implement an asynchronous send queue here?
pass
elif buf['state'] == ElectronicControlUnit.SendBufferState.SENDING_BM:
# send next broadcast message...
offset = buf['next_packet_to_send'] * 7
data = buf['data'][offset:]
if len(data)>7:
data = data[:7]
else:
while len(data)<7:
data.append(255)
data.insert(0, buf['next_packet_to_send']+1)
self.send_tp_dt(buf['src_address'], buf['dest_address'], data)
buf['next_packet_to_send'] += 1
if buf['next_packet_to_send'] < buf['num_packages']:
buf['deadline'] = time.time() + ElectronicControlUnit.Timeout.Tb
# recalc next wakeup
if next_wakeup > buf['deadline']:
next_wakeup = buf['deadline']
else:
# done
del self._snd_buffer[bufid]
else:
logger.critical("unknown SendBufferState %d", buf['state'])
del self._snd_buffer[bufid]
# check timer events
for event in self._timer_events:
if event['deadline'] > now:
if next_wakeup > event['deadline']:
next_wakeup = event['deadline']
else:
# deadline reached
logger.debug("Deadline for event reached")
if event['callback']( event['cookie'] ) == True:
# "true" means the callback wants to be called again
while event['deadline'] < now:
# just to take care of overruns
event['deadline'] += event['delta_time']
# recalc next wakeup
if next_wakeup > event['deadline']:
next_wakeup = event['deadline']
else:
# remove from list
self._timer_events.remove( event )
time_to_sleep = next_wakeup - time.time()
if time_to_sleep > 0:
try:
self._job_thread_wakeup_queue.get(True, time_to_sleep)
except queue.Empty:
# do nothing
pass
def stop(self):
"""Stops the ECU background handling
This Function explicitely stops the background handling of the ECU.
"""
self._job_thread_end.set()
self._job_thread_wakeup()
self._job_thread.join()
def _job_thread_wakeup(self):
"""Wakeup the async job thread
By calling this function we wakeup the asyncronous job thread to
force a recalculation of his next wakeup event.
"""
self._job_thread_wakeup_queue.put(1)
def add_timer(self, delta_time, callback, cookie=None):
"""Adds a callback to the list of timer events
:param delta_time:
The time in seconds after which the event is to be triggered.
:param callback:
The callback function to call
"""
d = {
'delta_time': delta_time,
'callback': callback,
'deadline': (time.time() + delta_time),
'cookie': cookie,
}
self._timer_events.append( d )
self._job_thread_wakeup()
def remove_timer(self, callback):
"""Removes ALL entries from the timer event list for the given callback
:param callback:
The callback to be removed from the timer event list
"""
for event in self._timer_events:
if event['callback'] == callback:
self._timer_events.remove( event )
self._job_thread_wakeup()
def connect(self, *args, **kwargs):
"""Connect to CAN bus using python-can.
Arguments are passed directly to :class:`can.BusABC`. Typically these
may include:
:param channel:
Backend specific channel for the CAN interface.
:param str bustype:
Name of the interface. See
`python-can manual <https://python-can.readthedocs.io/en/latest/configuration.html#interface-names>`__
for full list of supported interfaces.
:param int bitrate:
Bitrate in bit/s.
:raises can.CanError:
When connection fails.
"""
self._bus = can.interface.Bus(*args, **kwargs)
logger.info("Connected to '%s'", self._bus.channel_info)
self._notifier = can.Notifier(self._bus, self._listeners, 1)
def disconnect(self):
"""Disconnect from the CAN bus.
Must be overridden in a subclass if a custom interface is used.
"""
self._notifier.stop()
self._bus.shutdown()
self._bus = None
def subscribe(self, callback):
"""Add the given callback to the message notification stream.
:param callback:
Function to call when message is received.
"""
self._subscribers.append(callback)
def unsubscribe(self, callback):
"""Stop listening for message.
:param callback:
Function to call when message is received.
"""
self._subscribers.remove(callback)
def _buffer_hash(self, src_address, dest_address):
"""Calcluates a hash value for the given address pair
:param src_address:
The Source-Address the connection should bound to.
:param dest_address:
The Destination-Address the connection should bound to.
:return:
The calculated hash value.
:rtype: int
"""
return ((src_address & 0xFF) << 8) | (dest_address & 0xFF)
def _process_tp_cm(self, mid, dest_address, data, timestamp):
"""Processes a Transport Protocol Connection Management (TP.CM) message
:param j1939.MessageId mid:
A MessageId object holding the information extracted from the can_id.
:param int dest_address:
The destination address of the message
:param bytearray data:
The data contained in the can-message.
:param float timestamp:
The timestamp the message was received (mostly) in fractions of Epoch-Seconds.
"""
control_byte = data[0]
pgn = data[5] | (data[6] << 8) | (data[7] << 16)
src_address = mid.source_address
if control_byte == ElectronicControlUnit.ConnectionMode.RTS:
message_size = data[1] | (data[2] << 8)
num_packages = data[3]
buffer_hash = self._buffer_hash(src_address, dest_address)
if buffer_hash in self._rcv_buffer:
# according SAE J1939-21 we have to send an ABORT if an active
# transmission is already established
self.send_tp_abort(dest_address, src_address, ElectronicControlUnit.ConnectionAbortReason.BUSY, pgn)
return
# open new buffer for this connection
self._rcv_buffer[buffer_hash] = {
"pgn": pgn,
"message_size": message_size,
"num_packages": num_packages,
"next_packet": 1,
"data": [],
"deadline": time.time() + ElectronicControlUnit.Timeout.T2,
'src_address' : src_address,
'dest_address' : dest_address,
}
self.send_tp_cts(dest_address, src_address, 1, 1, pgn)
self._job_thread_wakeup()
elif control_byte == ElectronicControlUnit.ConnectionMode.CTS:
num_packages = data[1]
next_package_number = data[2] - 1
buffer_hash = self._buffer_hash(dest_address, src_address)
if buffer_hash not in self._snd_buffer:
self.send_tp_abort(dest_address, src_address, ElectronicControlUnit.ConnectionAbortReason.RESOURCES, pgn)
return
if num_packages == 0:
# SAE J1939/21
# receiver requests a pause
self._snd_buffer[buffer_hash]['deadline'] = time.time() + ElectronicControlUnit.Timeout.Th
self._job_thread_wakeup()
return
self._snd_buffer[buffer_hash]['deadline'] = time.time() + 10.0 # do not monitor deadlines while sending
self._snd_buffer[buffer_hash]['state'] = ElectronicControlUnit.SendBufferState.SENDING_IN_CTS
self._job_thread_wakeup()
# TODO: should we send the answer packets asynchronously
# maybe in our _job_thread?
for package in range(next_package_number, next_package_number + num_packages):
offset = package * 7
data = self._snd_buffer[buffer_hash]['data'][offset:]
if len(data)>7:
data = data[:7]
else:
while len(data)<7:
data.append(255)
data.insert(0, package+1)
self.send_tp_dt(dest_address, src_address, data)
self._snd_buffer[buffer_hash]['deadline'] = time.time() + ElectronicControlUnit.Timeout.T3
self._snd_buffer[buffer_hash]['state'] = ElectronicControlUnit.SendBufferState.WAITING_CTS
self._job_thread_wakeup()
elif control_byte == ElectronicControlUnit.ConnectionMode.EOM_ACK:
buffer_hash = self._buffer_hash(dest_address, src_address)
if buffer_hash not in self._snd_buffer:
self.send_tp_abort(dest_address, src_address, ElectronicControlUnit.ConnectionAbortReason.RESOURCES, pgn)
return
# TODO: should we inform the application about the successful transmission?
del self._snd_buffer[buffer_hash]
self._job_thread_wakeup()
elif control_byte == ElectronicControlUnit.ConnectionMode.BAM:
message_size = data[1] | (data[2] << 8)
num_packages = data[3]
buffer_hash = self._buffer_hash(src_address, dest_address)
if buffer_hash in self._rcv_buffer:
# TODO: should we deliver the partly received message to our CAs?
del self._rcv_buffer[buffer_hash]
self._job_thread_wakeup()
# init new buffer for this connection
self._rcv_buffer[buffer_hash] = {
"pgn": pgn,
"message_size": message_size,
"num_packages": num_packages,
"next_packet": 1,
"data": [],
"deadline": timestamp + ElectronicControlUnit.Timeout.T1,
'src_address' : src_address,
'dest_address' : dest_address,
}
self._job_thread_wakeup()
elif control_byte == ElectronicControlUnit.ConnectionMode.ABORT:
# TODO
pass
else:
raise RuntimeError("Received TP.CM with unknown control_byte %d", control_byte)
def _process_tp_dt(self, mid, dest_address, data, timestamp):
sequence_number = data[0]
src_address = mid.source_address
buffer_hash = self._buffer_hash(src_address, dest_address)
if buffer_hash not in self._rcv_buffer:
# TODO: LOG/TRACE/EXCEPTION?
return
if sequence_number != self._rcv_buffer[buffer_hash]['next_packet']:
if dest_address == j1939.ParameterGroupNumber.Address.GLOBAL:
# TODO:
return
self.send_tp_cts(dest_address, src_address, 1, self._rcv_buffer[buffer_hash]['next_packet'], self._rcv_buffer[buffer_hash]['pgn'])
self._rcv_buffer[buffer_hash]['deadline'] = time.time() + ElectronicControlUnit.Timeout.T2
self._job_thread_wakeup()
return
self._rcv_buffer[buffer_hash]['next_packet'] += 1
self._rcv_buffer[buffer_hash]['deadline'] = time.time() + ElectronicControlUnit.Timeout.T1
self._job_thread_wakeup()
self._rcv_buffer[buffer_hash]['data'].extend(data[1:])
# TODO: should we check the number of received messages instead?
if len(self._rcv_buffer[buffer_hash]['data']) >= self._rcv_buffer[buffer_hash]['message_size']:
logger.info("finished RCV of PGN {} with size {}".format(self._rcv_buffer[buffer_hash]['pgn'], self._rcv_buffer[buffer_hash]['message_size']))
# shorten data to message_size
self._rcv_buffer[buffer_hash]['data'] = self._rcv_buffer[buffer_hash]['data'][:self._rcv_buffer[buffer_hash]['message_size']]
# finished reassembly
if dest_address != j1939.ParameterGroupNumber.Address.GLOBAL:
self.send_tp_eom_ack(dest_address, src_address, self._rcv_buffer[buffer_hash]['message_size'], self._rcv_buffer[buffer_hash]['num_packages'], self._rcv_buffer[buffer_hash]['pgn'])
self.notify_subscribers(self._rcv_buffer[buffer_hash]['pgn'], self._rcv_buffer[buffer_hash]['data'])
del self._rcv_buffer[buffer_hash]
self._job_thread_wakeup()
return
if dest_address != j1939.ParameterGroupNumber.Address.GLOBAL:
self.send_tp_cts(dest_address, src_address, 1, self._rcv_buffer[buffer_hash]['next_packet'], self._rcv_buffer[buffer_hash]['pgn'])
self._rcv_buffer[buffer_hash]['deadline'] = time.time() + ElectronicControlUnit.Timeout.T2
self._job_thread_wakeup()
def notify(self, can_id, data, timestamp):
"""Feed incoming CAN message into this ecu.
If a custom interface is used, this function must be called for each
29-bit standard message read from the CAN bus.
:param int can_id:
CAN-ID of the message (always 29-bit)
:param bytearray data:
Data part of the message (0 - 8 bytes)
:param float timestamp:
The timestamp field in a CAN message is a floating point number
representing when the message was received since the epoch in
seconds.
Where possible this will be timestamped in hardware.
"""
mid = j1939.MessageId(can_id=can_id)
pgn = j1939.ParameterGroupNumber()
pgn.from_message_id(mid)
if pgn.is_pdu2_format:
# direct broadcast
self.notify_subscribers(pgn.value, data)
return
# peer to peer
# pdu_specific is destination Address
pgn_value = pgn.value & 0x1FF00
dest_address = pgn.pdu_specific # may be Address.GLOBAL
# TODO: iterate all CAs to check if we have to handle this destination address!
if dest_address != j1939.ParameterGroupNumber.Address.GLOBAL:
reject = True
for ca in self._cas:
if ca.message_acceptable(dest_address):
reject = False
break
if reject == True:
return
if pgn_value == j1939.ParameterGroupNumber.PGN.ADDRESSCLAIM:
for ca in self._cas:
ca._process_addressclaim(mid, data, timestamp)
elif pgn_value == j1939.ParameterGroupNumber.PGN.REQUEST:
for ca in self._cas:
if ca.message_acceptable(dest_address):
ca._process_request(mid, dest_address, data, timestamp)
elif pgn_value == j1939.ParameterGroupNumber.PGN.TP_CM:
self._process_tp_cm(mid, dest_address, data, timestamp)
elif pgn_value == j1939.ParameterGroupNumber.PGN.DATATRANSFER:
self._process_tp_dt(mid, dest_address, data, timestamp)
else:
self.notify_subscribers(pgn_value, data)
return
def notify_subscribers(self, pgn, data):
"""Feed incoming message to subscribers.
:param int pgn:
Parameter Group Number of the message
:param bytearray data:
Data of the PDU
"""
logger.debug("notify subscribers for PGN {}".format(pgn))
# TODO: we have to filter the dest_address here!
for callback in self._subscribers:
callback(pgn, data)
def add_ca(self, **kwargs):
"""Add a ControllerApplication to the ECU.
:param controller_application:
A :class:`j1939.ControllerApplication` object.
:param name:
A :class:`j1939.Name` object.
:param device_address:
An integer representing the device address to announce to the bus.
:return:
The CA object that was added.
:rtype: r3964.ControllerApplication
"""
if 'controller_application' in kwargs:
ca = kwargs['controller_application']
else:
if 'name' not in kwargs:
raise ValueError("either 'controller_application' or 'name' must be provided")
name = kwargs.get('name')
da = kwargs.get('device_address', None)
ca = j1939.ControllerApplication(name, da)
self._cas.append(ca)
ca.associate_ecu(self)
return ca
class Acknowledgement(object):
ACK = 0
NACK = 1
AccessDenied = 2
CannotRespond = 3
def send_message(self, can_id, data):
"""Send a raw CAN message to the bus.
This method may be overridden in a subclass if you need to integrate
this library with a custom backend.
It is safe to call this from multiple threads.
:param int can_id:
CAN-ID of the message (always 29-bit)
:param data:
Data to be transmitted (anything that can be converted to bytes)
:raises can.CanError:
When the message fails to be transmitted
"""
if not self._bus:
raise RuntimeError("Not connected to CAN bus")
msg = can.Message(extended_id=True,
arbitration_id=can_id,
data=data
)
with self._send_lock:
self._bus.send(msg)
# TODO: check error receivement
def send_tp_dt(self, src_address, dest_address, data):
pgn = j1939.ParameterGroupNumber(0, 235, dest_address)
mid = j1939.MessageId(priority=7, parameter_group_number=pgn.value, source_address=src_address)
self.send_message(mid.can_id, data)
def send_tp_abort(self, src_address, dest_address, reason, pgn_value):
pgn = j1939.ParameterGroupNumber(0, 236, dest_address)
mid = j1939.MessageId(priority=7, parameter_group_number=pgn.value, source_address=src_address)
data = [ElectronicControlUnit.ConnectionMode.ABORT, reason, 0xFF, 0xFF, 0xFF, pgn_value & 0xFF, (pgn_value >> 8) & 0xFF, (pgn_value >> 16) & 0xFF]
self.send_message(mid.can_id, data)
def send_tp_cts(self, src_address, dest_address, num_packets, next_packet, pgn_value):
pgn = j1939.ParameterGroupNumber(0, 236, dest_address)
mid = j1939.MessageId(priority=7, parameter_group_number=pgn.value, source_address=src_address)
data = [ElectronicControlUnit.ConnectionMode.CTS, num_packets, next_packet, 0xFF, 0xFF, pgn_value & 0xFF, (pgn_value >> 8) & 0xFF, (pgn_value >> 16) & 0xFF]
self.send_message(mid.can_id, data)
def send_tp_eom_ack(self, src_address, dest_address, message_size, num_packets, pgn_value):
pgn = j1939.ParameterGroupNumber(0, 236, dest_address)
mid = j1939.MessageId(priority=7, parameter_group_number=pgn.value, source_address=src_address)
data = [ElectronicControlUnit.ConnectionMode.EOM_ACK, message_size & 0xFF, (message_size >> 8) & 0xFF, num_packets, 0xFF, pgn_value & 0xFF, (pgn_value >> 8) & 0xFF, (pgn_value >> 16) & 0xFF]
self.send_message(mid.can_id, data)
def send_tp_rts(self, src_address, dest_address, priority, pgn_value, message_size, num_packets):
pgn = j1939.ParameterGroupNumber(0, 236, dest_address)
mid = j1939.MessageId(priority=priority, parameter_group_number=pgn.value, source_address=src_address)
data = [ElectronicControlUnit.ConnectionMode.RTS, message_size & 0xFF, (message_size >> 8) & 0xFF, num_packets, 0xFF, pgn_value & 0xFF, (pgn_value >> 8) & 0xFF, (pgn_value >> 16) & 0xFF]
self.send_message(mid.can_id, data)
def send_acknowledgement(self, control_byte, group_function_value, address_acknowledged, pgn):
data = [control_byte, group_function_value, 0xFF, 0xFF, address_acknowledged, (pgn & 0xFF), ((pgn >> 8) & 0xFF), ((pgn >> 16) & 0xFF)]
mid = j1939.MessageId(priority=6, parameter_group_number=0x00E800, source_address=255)
self.send_message(mid.can_id, data)
def send_tp_bam(self, src_address, priority, pgn_value, message_size, num_packets):
pgn = j1939.ParameterGroupNumber(0, 236, j1939.ParameterGroupNumber.Address.GLOBAL)
mid = j1939.MessageId(priority=priority, parameter_group_number=pgn.value, source_address=src_address)
data = [ElectronicControlUnit.ConnectionMode.BAM, message_size & 0xFF, (message_size >> 8) & 0xFF, num_packets, 0xFF, pgn_value & 0xFF, (pgn_value >> 8) & 0xFF, (pgn_value >> 16) & 0xFF]
self.send_message(mid.can_id, data)
def send_pgn(self, data_page, pdu_format, pdu_specific, priority, src_address, data):
pgn = j1939.ParameterGroupNumber(data_page, pdu_format, pdu_specific)
if len(data) <= 8:
# send normal message
mid = j1939.MessageId(priority=priority, parameter_group_number=pgn.value, source_address=src_address)
self.send_message(mid.can_id, data)
else:
# init sequence
buffer_hash = self._buffer_hash(src_address, pdu_specific)
if buffer_hash in self._snd_buffer:
# There is already a sequence active for this pair
return False
message_size = len(data)
num_packets = int(message_size / 7) if (message_size % 7 == 0) else int(message_size / 7) + 1
if pdu_specific == j1939.ParameterGroupNumber.Address.GLOBAL:
# send BAM
self.send_tp_bam(src_address, priority, pgn.value, message_size, num_packets)
# init new buffer for this connection
self._snd_buffer[buffer_hash] = {
"pgn": pgn.value,
"priority": priority,
"message_size": message_size,
"num_packages": num_packets,
"data": data,
"state": ElectronicControlUnit.SendBufferState.SENDING_BM,
"deadline": time.time() + ElectronicControlUnit.Timeout.Tb,
'src_address' : src_address,
'dest_address' : pdu_specific,
'next_packet_to_send' : 0,
}
else:
# send RTS/CTS
# init new buffer for this connection
self._snd_buffer[buffer_hash] = {
"pgn": pgn.value,
"priority": priority,
"message_size": message_size,
"num_packages": num_packets,
"data": data,
"state": ElectronicControlUnit.SendBufferState.WAITING_CTS,
"deadline": time.time() + ElectronicControlUnit.Timeout.T3,
'src_address' : src_address,
'dest_address' : pdu_specific,
}
self.send_tp_rts(src_address, pdu_specific, priority, pgn.value, message_size, num_packets)
self._job_thread_wakeup()
return True
class MessageListener(Listener):
"""Listens for messages on CAN bus and feeds them to an ECU instance.
:param j1939.ElectronicControlUnit ecu:
The ECU to notify on new messages.
"""
def __init__(self, ecu):
self.ecu = ecu
def on_message_received(self, msg):
if msg.is_error_frame or msg.is_remote_frame:
return
try:
self.ecu.notify(msg.arbitration_id, msg.data, msg.timestamp)
except Exception as e:
# Exceptions in any callbaks should not affect CAN processing
logger.error(str(e))
|
mqtt_wss_example_test.py | from __future__ import unicode_literals
import os
import re
import ssl
import sys
from builtins import str
from threading import Event, Thread
import paho.mqtt.client as mqtt
import ttfw_idf
from tiny_test_fw import DUT
event_client_connected = Event()
event_stop_client = Event()
event_client_received_correct = Event()
message_log = ''
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print('Connected with result code ' + str(rc))
event_client_connected.set()
client.subscribe('/topic/qos0')
def mqtt_client_task(client):
while not event_stop_client.is_set():
client.loop()
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global message_log
payload = msg.payload.decode()
if not event_client_received_correct.is_set() and payload == 'data':
client.publish('/topic/qos0', 'data_to_esp32')
if msg.topic == '/topic/qos0' and payload == 'data':
event_client_received_correct.set()
message_log += 'Received data:' + msg.topic + ' ' + payload + '\n'
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_Protocols')
def test_examples_protocol_mqtt_wss(env, extra_data):
broker_url = ''
broker_port = 0
"""
steps: |
1. join AP and connects to wss broker
2. Test connects a client to the same broker
3. Test evaluates it received correct qos0 message
4. Test ESP32 client received correct qos0 message
"""
dut1 = env.get_dut('mqtt_websocket_secure', 'examples/protocols/mqtt/wss', dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, 'mqtt_websocket_secure.bin')
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('mqtt_websocket_secure_bin_size', '{}KB'.format(bin_size // 1024))
# Look for host:port in sdkconfig
try:
value = re.search(r'\:\/\/([^:]+)\:([0-9]+)', dut1.app.get_sdkconfig()['CONFIG_BROKER_URI'])
broker_url = value.group(1)
broker_port = int(value.group(2))
except Exception:
print('ENV_TEST_FAILURE: Cannot find broker url in sdkconfig')
raise
client = None
# 1. Test connects to a broker
try:
client = mqtt.Client(transport='websockets')
client.on_connect = on_connect
client.on_message = on_message
client.tls_set(None,
None,
None, cert_reqs=ssl.CERT_NONE, tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None)
print('Connecting...')
client.connect(broker_url, broker_port, 60)
except Exception:
print('ENV_TEST_FAILURE: Unexpected error while connecting to broker {}: {}:'.format(broker_url, sys.exc_info()[0]))
raise
# Starting a py-client in a separate thread
thread1 = Thread(target=mqtt_client_task, args=(client,))
thread1.start()
try:
print('Connecting py-client to broker {}:{}...'.format(broker_url, broker_port))
if not event_client_connected.wait(timeout=30):
raise ValueError('ENV_TEST_FAILURE: Test script cannot connect to broker: {}'.format(broker_url))
dut1.start_app()
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
print('ENV_TEST_FAILURE: Cannot connect to AP')
raise
print('Checking py-client received msg published from esp...')
if not event_client_received_correct.wait(timeout=30):
raise ValueError('Wrong data received, msg log: {}'.format(message_log))
print('Checking esp-client received msg published from py-client...')
dut1.expect(re.compile(r'DATA=data_to_esp32'), timeout=30)
finally:
event_stop_client.set()
thread1.join()
if __name__ == '__main__':
test_examples_protocol_mqtt_wss()
|
14_edf_wait_die_Speak.py | # Author Emeka Ugwuanyi Emmanuel
from functools import reduce
from sys import *
import numpy as np
import random as r
import ping_code as pc
import socket
import struct
import subprocess as sp
from threading import Thread
import threading
import ast
import time
import os
import psutil
import datetime as dt
import getpass as gp
import paho.mqtt.client as mqtt
from netifaces import interfaces, ifaddresses, AF_INET
import smtplib
import config
import paramiko
hosts = {} # {hostname: ip}
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
memory = []
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
# received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
received_task_queue = [] # [(task_list,wait_time), ....]
thread_record = []
port = 65000
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
stop = 0
t_track = 1
shared_resource_lock = threading.Lock()
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(algo.memory_percent(), 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
_time_.append(g[1])
return _time_
def get_rtt(host):
rtt = pc.verbose_ping(host)
return round(rtt, 4)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id)
connect_client.subscribe('mec')
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
data = data[2:]
received_task = ast.literal_eval(data)
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
_client.publish(received_task.split('.')[2], str({received_task: get_time()}))
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
'''
else:
print('data: ', data)
elif data[0] == 't':
print('send: ', data[2:])
'''
def connect_to_broker():
global _client
global broker_ip
global topic
username = 'mec'
password = 'password'
broker_ip = 'localhost'
broker_port_no = 1883
topic = 'mec' # topic used to exchange mec details to clients
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_forever()
def edf():
t_lcm = lcm([tasks[i]['period'] for i in tasks])
t_dead = {i: tasks[i]['deadline'] for i in tasks}
sorted_dead = sorted(t_dead.items(), key=lambda kv: (kv[1], kv[0]))
# print(sorted_dead)
ready_task = []
for i in sorted_dead:
period = tasks[i[0]]['period']
# print('lcm: ', t_lcm, ' period: ', period)
t_range = int(t_lcm/period)
last_dead = 0
for j in range(t_range):
ready_task.append((i[0], last_dead+tasks[i[0]]['deadline']))
last_dead += period
ready_task = sorted(ready_task, key=lambda t: t[1])
print(ready_task)
t_time_ = 0
schedule = []
missed = []
register = {i: 0 for i in tasks.keys()} # {ti : amount executed}
for i in ready_task:
if (t_time_//tasks[i[0]]['period'])+1 <= register[i[0]]:
while (t_time_//tasks[i[0]]['period'])+1 <= register[i[0]]:
t_time_ += 1
# schedule.append(('idle', t_time))
if (t_time_//tasks[i[0]]['period'])+1 > register[i[0]]:
if t_time_ + tasks[i[0]]['wcet'] <= i[1]:
register[i[0]] += 1
t_time_ += tasks[i[0]]['wcet']
schedule.append(i[0])
else:
print('Deadline missed: ', i)
missed.append(i[0])
# print('s (task, execution_time): ', schedule)
# print('r: ', register)
if len(missed) > 0:
print('missed deadline: ', missed)
cooperative_mec(missed)
return schedule
# generate execution sequence
def wait_die(processes, avail, n_need, allocat):
global deadlock
offload = []
# To store execution sequence
exec_seq = []
# Make a copy of available resources
work = [0] * len(processes)
# While all processes are not finished
# or system is not in safe state.
while 'w' or 0 in work:
if 0 in work:
ind = work.index(0)
i = processes[ind]
elif 'w' in work:
# print('wk: ', work)
ind = work.index('w')
i = processes[ind]
else:
break
# print('comparing| process: ', i, _need[i], 'work: ', avail)
if not (False in list(np.greater_equal(avail, n_need[i]))):
exec_seq.append(i)
avail = np.add(avail, allocat[i])
work[ind] = 1
# print('added: ', exec_seq)
else:
a = list(set(processes) - set(exec_seq) - set(offload))
n = {}
for j in a:
n[j] = sum(allocat[j])
_max = max(n, key=n.get)
# print('work: ', work, 'need: ', _need[_max])
if processes.index(_max) > processes.index(i): # if true, i is older
# if process is already waiting then offload process
if work[ind] == 'w':
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
# print('offload reentry: ', i, offload)
else:
# wait put process to waiting
work[processes.index(i)] = 'w'
# print('waiting: ', i)
else:
# abort i
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
# print('offload: ', i)
if len(offload) > 0:
print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
print('Execution seq: ', exec_seq)
return exec_seq
def get_exec_seq(pro):
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return wait_die(processes, avail, n_need, allot)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]]/2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # multi-casting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def algo_id():
no = int(os.path.basename(__file__).split('_')[0])
if no <= 2:
return 2
elif no <= 4:
return 3
elif no <= 7:
return 7
elif no <= 10:
return 10
elif no <= 13:
return 12
else:
return 16
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str(['speaker', ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
elif mg == 'update':
ho = hosts.copy()
ho[get_hostname()] = host_ip
smg = mg + ' ' + str(ho)
sock1.sendto(str.encode(smg), _multicast_group)
# print('\n===**====**==update message sent===**======**=========')
elif mg == 'client':
ho = hosts.copy()
ho[get_hostname()] = host_ip
smg = f'm {ho}_{algo_id()}'
_client.publish(topic, smg, retain=True)
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message():
global hosts
while True:
if stop == 1:
print('Stopped : receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
# print('received: ', hosts)
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
elif data.decode().strip() == 'user':
send_message('update')
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_client.publish(cloud_ip, str([i.split('_')[0], t_time[i.split('_')[0]][0]]))
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [j, t_time[j][0]]))
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
mec_waiting_time[_host].append(
round(mec_waiting_time[_host][-1] + (t_time[j][0]) / 2, 3)) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_client.publish(cloud_ip, str([j, t_time[j][0]]))
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
def execute_re_offloaded_task(offloaded_task):
exec_list = get_exec_seq(offloaded_task[0])
for i in exec_list:
j = i.split('_')[0]
time.sleep(offloaded_task[1][j]/2)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
time.sleep(t_time[j][0]/2)
print('#' * ((local.index(i) + 1) * 3), ' Executed: ', i)
if j.split('.')[1] != node_id:
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], j))
elif j.split('.')[1] == node_id:
# send_client({j: get_time()}, send_back_host)
_client.publish(j.split('.')[2], str({j: get_time()}))
print('============== EXECUTION DONE ===============')
def receive_offloaded_task_mec(): # run as a thread
global _inward_mec
global t_track
while True:
if stop == 1:
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and da[0] == node_id: # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
_client.publish(da[1].split('.')[2], str({da[1]: get_time()}))
elif (address[0] not in ip_set) and da[0] == 'ex' and da[1] == node_id:
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload():
global reoffload_list
while True:
if stop == 1:
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t]/2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
time.sleep(1)
def send_email(msg):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results {}'.format(get_hostname())
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def run_me():
global discovering
initialization()
while True:
if len(hosts) == mec_no:
print('MEC Details: ', hosts)
del hosts['speaker']
discovering = 1
break
time.sleep(2)
speak = Thread(target=speaking_node)
thread_record.append(speak)
speak.daemon = True
speak.start()
start_loop()
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
s_port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, s_port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def save_and_abort():
global stop
_id_ = get_hostname()[-1]
result = f"wt{_id_}_16_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_16_{mec_no} = {mec_rtt} \ncpu{_id_}_16_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_16_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_16_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_16_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_16_{mec_no} = {deadlock} \nmemory{_id_}_16_{mec_no} = {memory}"
list_result = [
f"wt{_id_}_16_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_16_{mec_no} = {mec_rtt} \ncpu{_id_}_16_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_16_{mec_no} = {_off_mec} \noff_cloud{_id_}_16_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_16_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_16_{mec_no} = {deadlock} \nmemory{_id_}_16_{mec_no} = {memory}"
]
for i in list_result:
cmd = 'echo "{}" >> data.py'.format(i)
os.system(cmd)
send_result(hosts['osboxes-0'], list_result)
send_email(result)
stop += 1
'''
for i in thread_record:
i.join()
'''
_client.loop_stop()
time.sleep(1)
print('done')
os.system('kill -9 {}'.format(os.getpid()))
def start_loop():
global _loc
global tasks
global t_time
global node_id
global stop
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
_threads_ = [receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
for i in _threads_:
Thread(target=i).daemon = True
Thread(target=i).start()
time.sleep(2)
send_message('client') # send mec details to clients
x = gp.getpass('Press any key to Start...').lower()
if x != 'exit':
print('========= Waiting for tasks ==========')
_time_ = dt.datetime.now()
while True:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('EDF List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
list_seq = get_exec_seq(edf())
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
_time_ = dt.datetime.now()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(.5)
now = dt.datetime.now()
delta = now - _time_
if delta > dt.timedelta(minutes=3):
print('terminating programme 3 mins elapsed')
save_and_abort()
break
except KeyboardInterrupt:
print('\nProgramme Terminated')
save_and_abort()
break
def speaking_node():
global mec_no
while True:
if len(hosts) > (mec_no - 1):
send_message('update')
mec_no = len(hosts) + 1
time.sleep(2)
def initialization():
global mec_no
global host_ip
global cloud_ip
host_ip = ip_address()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
print('Broker IP: ', s.getsockname()[0])
try:
mec_no = int(input('Number of MECs: ').strip())
cloud_ip = input('Cloud Server IP: ').strip()
print('\nCompiling MEC Details')
h1 = Thread(target=receive_message)
h2 = Thread(target=receive_offloaded_task_mec)
h1.daemon = True
h2.daemon = True
h1.start()
h2.start()
time.sleep(1.5)
while True:
b = input('Send Hello Message (Y/N): ').strip().lower()
if b == 'y':
send_message('hello')
break
else:
print('\nPlease Type "y" to send Hello message\n')
except KeyboardInterrupt:
print('\nProgramme Terminated')
exit(0)
def main():
global algo
os.system('clear')
print('mec ip: ', ip_address())
algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
run_me()
if __name__ == "__main__":
main()
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9732
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
kaldi.py | # -*- coding: UTF-8 -*-
from __future__ import absolute_import
from gi.repository import GObject
from threading import Thread
from .decoder import DecoderPipeline
import time
import pyaudio # Provides Python bindings for PortAudio, the cross platform audio API
import audioop # Operates on sound fragments consisting of signed integer samples 8, 16 or 32 bits wide, stored in Python strings.
from dragonfire import VirtualAssistant
import os
from ctypes import *
from contextlib import contextmanager
CHUNK = 8000 # Smallest unit of audio. 1024 bytes
FORMAT = pyaudio.paInt16 # Data format
CHANNELS = 1 # Number of channels
RATE = 16000 # Bit Rate of audio stream / Frame Rate
THRESHOLD = 1000 # Threshhold value for detecting stimulant
SILENCE_DETECTION = 5 # Wait number of frames to decide whether it fell silent or not
LISTENING = False
ENGLISH_MODEL_PATH = os.path.dirname(os.path.realpath(__file__)) + "/models/english/"
class KaldiRecognizer():
def __init__(self):
#logging.basicConfig(level=logging.INFO)
# voxforge/tri2b_mmi_b0.05 model:
decoder_conf = {"model" : ENGLISH_MODEL_PATH + "final.mdl",
"lda-mat" : ENGLISH_MODEL_PATH + "final.mat",
"word-syms" : ENGLISH_MODEL_PATH + "words.txt",
"fst" : ENGLISH_MODEL_PATH + "HCLG.fst",
"silence-phones" : "6"}
self.decoder_pipeline = DecoderPipeline({"decoder" : decoder_conf})
self.__class__.words = []
self.__class__.finished = False
self.decoder_pipeline.set_word_handler(self.word_getter)
self.decoder_pipeline.set_eos_handler(self.set_finished, self.finished)
GObject.threads_init()
self.loop = GObject.MainLoop()
self.gi_thread = Thread(target=self.loop.run, args=())
self.gi_thread.start()
@classmethod
def word_getter(self, word):
self.words.append(word)
@classmethod
def set_finished(self, finished):
self.finished = True
def reset(self):
self.__class__.words = []
self.__class__.finished = False
def recognize(self, args):
with noalsaerr():
p = pyaudio.PyAudio() # Create a PyAudio session
# Create a stream
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=CHUNK)
try:
data = stream.read(CHUNK) # Get first data frame from the microphone
# Loop over the frames of the audio / data chunks
while data != '':
rms = audioop.rms(data, 2) # Calculate Root Mean Square of current chunk
if rms >= THRESHOLD: # If Root Mean Square value is greater than THRESHOLD constant
self.decoder_pipeline.init_request("recognize", "audio/x-raw, layout=(string)interleaved, rate=(int)16000, format=(string)S16LE, channels=(int)1")
self.decoder_pipeline.process_data(data)
silence_counter = 0 # Define silence counter
while silence_counter < SILENCE_DETECTION: # While silence counter value less than SILENCE_DETECTION constant
data = stream.read(CHUNK) # Read a new chunk from the stream
if LISTENING: stream.write(data, CHUNK)
self.decoder_pipeline.process_data(data)
rms = audioop.rms(data, 2) # Calculate Root Mean Square of current chunk again
if rms < THRESHOLD: # If Root Mean Square value is less than THRESHOLD constant
silence_counter += 1 # Then increase silence counter
else: # Else
silence_counter = 0 # Assign zero value to silence counter
stream.stop_stream()
self.decoder_pipeline.end_request()
while not self.finished:
time.sleep(0.1)
stream.start_stream()
words = self.words
words = [x for x in words if x != '<#s>']
com = ' '.join(words)
t = Thread(target=VirtualAssistant.command, args=(com, args))
t.start()
self.reset()
data = stream.read(CHUNK) # Read a new chunk from the stream
if LISTENING: stream.write(data, CHUNK)
except KeyboardInterrupt:
stream.stop_stream()
stream.close()
p.terminate()
self.loop.quit()
raise KeyboardInterrupt
ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)
def py_error_handler(filename, line, function, err, fmt):
pass
c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
@contextmanager
def noalsaerr():
asound = cdll.LoadLibrary('libasound.so')
asound.snd_lib_error_set_handler(c_error_handler)
yield
asound.snd_lib_error_set_handler(None)
if __name__ == '__main__':
recognizer = KaldiRecognizer()
recognizer.recognize()
|
pydoc.py | #!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages. Port number 0 can be
used to get an arbitrary unused port.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 88564 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
result = _encode(result)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- Unicode support helpers
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one that nothing will match, and make
# the _encode function that do nothing.
class _unicode(object):
pass
_encoding = 'ascii'
def _encode(text, encoding='ascii'):
return text
else:
import locale
_encoding = locale.getpreferredencoding()
def _encode(text, encoding=None):
if isinstance(text, unicode):
return text.encode(encoding or _encoding, 'xmlcharrefreplace')
else:
return text
def _binstr(obj):
# Ensure that we have an encoded (binary) string representation of obj,
# even if it is a unicode string.
if isinstance(obj, _unicode):
return obj.encode(_encoding, 'xmlcharrefreplace')
return str(obj)
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://docs.python.org/library")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return _encode('''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta charset="utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents), 'ascii')
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif selfdot:
# Create a link for methods like 'self.method(...)'
# and use <strong> for attributes like 'self.attr'
if text[end:end+1] == '(':
results.append('self.' + self.namelink(name, methods))
else:
results.append('self.<strong>%s</strong>' % name)
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(_binstr(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(_binstr(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(_binstr(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', _binstr(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', _binstr(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', _binstr(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not hasattr(sys.stdin, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(_encode(text))
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(_encode(text))
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding))).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
try:
h = int(os.environ.get('LINES', 0))
except ValueError:
h = 0
if h <= 1:
h = 25
r = inc = h - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding)))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = __builtin__
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
self.url = 'http://%s:%d/' % (self.address[0], self.server_port)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
pydoc.py | #!/usr/pkg/bin/python2.7
# -*- coding: latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision$"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (0, None))
if lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://docs.python.org/library")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name), name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
value = getattr(object, key)
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name),
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = split(plain(text), '\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
for part in parts[n:]:
try: object = getattr(object, part)
except AttributeError: return None
return object
else:
if hasattr(__builtin__, path):
return getattr(__builtin__, path)
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
return thing, getattr(thing, '__name__', None)
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % sys.version[:3])
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
try: import warnings
except ImportError: pass
else: warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
simulation_2.py | '''
Created on Oct 12, 2016
@author: mwittie
'''
import network_2 as network
import link_2 as link
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 #0 means unlimited
simulation_time = 3 #give the network sufficient time to transfer all packets before quitting
if __name__ == '__main__':
object_L = [] #keeps track of objects, so we can kill their threads
#create network nodes
client = network.Host(1)
object_L.append(client)
server = network.Host(2)
object_L.append(server)
router_a = network.Router(name='A', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_a)
#create a Link Layer to keep track of links between network nodes
link_layer = link.LinkLayer()
object_L.append(link_layer)
#add all the links
#link parameters: from_node, from_intf_num, to_node, to_intf_num, mtu
link_layer.add_link(link.Link(client, 0, router_a, 0, 50))
link_layer.add_link(link.Link(router_a, 0, server, 0, 30))
#start all the objects
thread_L = []
thread_L.append(threading.Thread(name=client.__str__(), target=client.run))
thread_L.append(threading.Thread(name=server.__str__(), target=server.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
#create some send events
for i in range(3):
message = 'Sample data yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaay %d' % i
client.udt_send(2, message)
#give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
#join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically
|
HelpSearch.py | import Utils
import wx
import os
import io
import sys
import six
import time
import threading
import traceback
import webbrowser
StringIO = six.StringIO
from six.moves.urllib.request import url2pathname
from six.moves.urllib.parse import urlparse
from whoosh.index import open_dir
from whoosh.qparser import QueryParser
import wx.html as html
import wx.lib.wxpTag
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
PORT_NUMBER = 8761
try:
with io.open( os.path.join(Utils.getImageFolder(), 'CrossMgr.ico'), 'rb' ) as f:
favicon = f.read()
except:
favicon = None
class HelpHandler( BaseHTTPRequestHandler ):
html_content = 'text/html; charset=utf-8'
def do_GET(self):
up = urlparse( self.path )
try:
if up.path=='/favicon.ico' and favicon:
content = favicon
content_type = 'image/x-icon'
else:
file = url2pathname(os.path.basename(up.path.split('#')[0]))
fname = os.path.join( Utils.getHelpFolder(), file )
with io.open(fname, 'r', encoding='utf-8') as fp:
content = fp.read()
content_type = self.html_content
except Exception as e:
self.send_error(404,'File Not Found: {} {}\n{}'.format(self.path, e, traceback.format_exc()))
return
self.send_response( 200 )
self.send_header('Content-type',content_type)
if content_type == self.html_content:
self.send_header( 'Cache-Control', 'no-cache, no-store, must-revalidate' )
self.send_header( 'Pragma', 'no-cache' )
self.send_header( 'Expires', '0' )
self.end_headers()
self.wfile.write( content.encode() )
def log_message(self, format, *args):
return
def getHelpURL( fname ):
return 'http://localhost:{}/{}'.format(PORT_NUMBER, os.path.basename(fname))
def showHelp( url ):
if not url.startswith('http://'):
url = getHelpURL( url )
try:
webbrowser.open( url )
except Exception as e:
pass
class HelpSearch( wx.Panel ):
def __init__( self, parent, id = wx.ID_ANY, style = 0, size=(-1-1) ):
wx.Panel.__init__(self, parent, id, style=style, size=size )
self.searchLabel = wx.StaticText( self, label=_('Search Text:') )
self.search = wx.SearchCtrl( self, style=wx.TE_PROCESS_ENTER, value='main screen', size=(200,-1) )
self.Bind( wx.EVT_TEXT_ENTER, self.doSearch, self.search )
self.Bind( wx.EVT_TEXT, self.doSearch, self.search )
self.Bind( wx.EVT_SEARCHCTRL_SEARCH_BTN, self.doSearch, self.search )
hs = wx.BoxSizer( wx.HORIZONTAL )
hs.Add( self.searchLabel, 0, flag=wx.ALIGN_CENTRE_VERTICAL )
hs.Add( self.search, 1, flag=wx.EXPAND|wx.LEFT, border=4 )
self.vbs = wx.BoxSizer(wx.VERTICAL)
self.html = html.HtmlWindow( self, size=(800,600), style=wx.BORDER_SUNKEN )
self.Bind( wx.html.EVT_HTML_LINK_CLICKED, self.doLink, self.html )
self.vbs.Add( hs, 0, flag=wx.BOTTOM|wx.EXPAND, border=4 )
self.vbs.Add( self.html, 1, flag=wx.EXPAND )
self.SetSizer(self.vbs)
self.doSearch()
def doLink( self, event ):
info = event.GetLinkInfo()
href = info.GetHref()
showHelp( href )
def doSearch( self, event = None ):
busy = wx.BusyCursor()
text = self.search.GetValue()
f = StringIO()
try:
ix = open_dir( Utils.getHelpIndexFolder(), readonly=True )
except Exception as e:
Utils.logException( e, sys.exc_info() )
ix = None
f.write( u'<html>\n' )
if ix is not None:
with ix.searcher() as searcher:
query = QueryParser('content', ix.schema).parse(text)
results = searcher.search(query, limit=20)
# Allow larger fragments
results.formatter.maxchars = 300
# Show more context before and after
results.formatter.surround = 50
f.write( u'<table>\n' )
for i, hit in enumerate(results):
file = os.path.splitext(hit['path'].split('#')[0])[0]
url = getHelpURL( os.path.basename(hit['path']) )
if not file.startswith('Menu'):
section = u'{}: {}'.format(file, hit['section'])
else:
section = u'Menu: {}'.format( hit['section'] )
f.write( u'''<tr>
<td valign="top">
<font size=+1><a href="{url}">{section}</a></font><br></br>
{content}
<font size=+1><br></br></font>
</td>
</tr>\n'''.format(url=url, section=section, content=hit.highlights('content') ) )
f.write( u'</table>\n' )
ix.close()
f.write( u'</html>\n' )
self.html.SetPage( f.getvalue() )
class HelpSearchDialog( wx.Dialog ):
def __init__(
self, parent, ID = wx.ID_ANY, title='Help Search', size=wx.DefaultSize, pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER ):
super( HelpSearchDialog, self ).__init__(parent, ID, title, pos, size, style)
sizer = wx.BoxSizer(wx.VERTICAL)
self.search = HelpSearch( self, size=(600,400) )
sizer.Add(self.search, 1, wx.ALIGN_CENTRE|wx.ALL|wx.EXPAND, 5)
self.SetSizer(sizer)
sizer.Fit(self)
server = None
def HelpServer():
global server
while 1:
try:
server = HTTPServer(('localhost', PORT_NUMBER), HelpHandler)
server.serve_forever( poll_interval = 2 )
except Exception as e:
server = None
time.sleep( 5 )
webThread = threading.Thread( target=HelpServer, name='HelpServer' )
webThread.daemon = True
webThread.start()
if __name__ == '__main__':
app = wx.App(False)
mainWin = wx.Frame(None,title="CrossMan", size=(600,400))
mainWin.Show()
searchDialog = HelpSearchDialog( mainWin, size=(600,400) )
searchDialog.Show()
app.MainLoop()
|
User_Interface.py | import tkinter
from tkinter import ttk
import cv2
from PIL import Image, ImageTk
import socket
import json
from vidgear.gears import NetGear
import threading
import os
import webbrowser
HOST = '10.0.0.8'
PORT = 6005
class Menubar(ttk.Frame):
def __init__(self, parent):
"""
Constructor:
Initializes the menubar for the GUI
Calls the setup (init_menubar) for the rest of the menubar.
Args:
parent: Root window for the MenuBar.
Returns:
None.
"""
ttk.Frame.__init__(self, parent)
self.root = parent
self.init_menubar()
def display_help(self):
"""
Displays the help document (How to use the GUI).
Args:
None.
Returns:
None.
"""
webbrowser.open("Help_Document.txt")
pass
def display_about(self):
"""
Displays info about program (purpose of the system).
Args:
None
Returns:
None.
"""
pass
def init_menubar(self):
"""
Creates the menubar (attached to root) and adds the functionaly for the menubar.
Args:
None.
Returns:
None.
"""
self.menubar = tkinter.Menu(self.root)
self.menu_help = tkinter.Menu(self.menubar) #Creates a "Help" menu
self.menu_help.add_command(label='Help', command=self.display_help)
self.menu_help.add_command(label='About', command=self.display_about)
self.menubar.add_cascade(menu=self.menu_help, label='Info')
self.root.config(menu=self.menubar)
class GUI(ttk.Frame):
def __init__(self, parent):
"""
Constructor:
Initializes the GUI with the parmeters set by the user.
Calls the setup (init_gui) for the rest of the GUI.
Args:
parent: Root window for the Main Window.
Returns:
None.
"""
ttk.Frame.__init__(self, parent)
self.root = parent
self.systemStatus = "Offline"
self.init_gui()
def init_gui(self):
"""
Setups all the parameters (and their values) needed by the GUI.
Includes window size, components, background, and more.
Args:
None
Returns:
None
"""
self.root.title('Arm Solutions')
self.root.geometry("1200x800")
self.background_image = Image.open("Flat_colorful_background.png")
self.background_image = self.background_image.resize((2000,2000), Image.ANTIALIAS)
self.background_image = ImageTk.PhotoImage(self.background_image)
self.label = tkinter.Label(self.root, image=self.background_image)
self.label.image = self.background_image
self.label.place(x=0,y=0, relwidth=1, relheight=1)
self.root.grid_columnconfigure(1, weight=1)
self.root.grid_columnconfigure(0, weight=1)
self.root.option_add('*tearOff', 'FALSE') # Disables ability to tear menu bar into own window
#
self.systemStatusLabelText = tkinter.StringVar()
self.systemStatusLabel = tkinter.Label(textvariable= self.systemStatusLabelText , bg = '#e81a1a', width = 25)
self.startup_button = tkinter.Button(self.root, text ="Start Up/Resume System", command = self.startSystem, height=3, width= 35, bg = '#499c5f')
self.pause_button = tkinter.Button(self.root, text ="Put System in Standby", command = self.pauseSystem, height=3, width= 35, bg ='#f9ff54')
self.settings_button = tkinter.Button(self.root, text ="System Settings", command = self.systemSettings, height=3, width= 35, bg = '#adaaaa')
self.exit_button = tkinter.Button(self.root, text ="Shut Down System", command = self.on_exit, height=3, width= 35, bg = '#e81a1a')
self.imageFrame = tkinter.Frame(self.root)
self.imageFrame.grid(row=0, column=1, padx=10, pady=50, rowspan=4)
self.startup_button.grid(row=0,column=0, padx=50, pady=(60,100))
self.pause_button.grid(row=1,column=0, padx=50, pady=(0,100))
self.settings_button.grid(row=2, column=0, padx=50, pady=(0,100))
self.exit_button.grid(row=3, column=0, padx=50)
self.systemStatusLabel.grid(row=3, column=1, pady=10, padx=240)
self.systemStatusLabel.config(font=("New Times Roman", 20))
self.startup_button.config(font=("New Times Roman", 16))
self.pause_button.config(font=("New Times Roman", 16))
self.settings_button.config(font=("New Times Roman", 16))
self.exit_button.config(font=("New Times Roman", 16))
# Menu Bar
self.menubar = Menubar(self.root)
# Padding
for child in self.winfo_children():
child.grid_configure(padx=10, pady=5)
self.lmain = tkinter.Label(self.imageFrame)
#Variables used later on
self.speed = tkinter.DoubleVar()
self.systemStatusLabelText.set("System Status - Offline")
def sendPlaceLocation(self):
"""
Sends updated drop (place) location of parcel to the server set by the user.
Args:
self.place_position (string): Drop location of parcel set by user.
Returns:
None
"""
try:
data = self.place_position_data.get()
self.place_position_data.set("")
data = data.split()
if(len(data) == 6):
if(self.systemStatus == "Online"):
print()
jsonData = {"first": "Client 2", "second": "Drop Location", "third": data[0], "fourth": data[1], "fifth": data[2], "sixth": data[3], "seventh": data[4], "eight": data[5] }
self.send(jsonData)
self.placeLocationStatus.set("Parcel Drop Location Updated")
else:
self.placeLocationStatus.set("System status: " + self.systemStatus)
else:
self.placeLocationStatus.set("Incorrect format")
except Exception as error:
print(error)
self.placeLocationStatus.set("Error")
#Work the styling for settings window
def systemSettings(self):
"""
Settings window for the User Interface.
Allows the user to edit multiple aspects of the system. Including velocity, drop location, and more.
Args:
None
Returns:
None.
"""
self.settingsWindow = tkinter.Toplevel(self.root)
self.settingsWindow.title("Settings")
self.settingsWindow.geometry("400x400")
self.velocity_scale = tkinter.Scale(self.settingsWindow, variable = self.speed, from_ = 1, to = 100, orient= tkinter.HORIZONTAL, length = 250, width=30)
self.display_velocity = tkinter.Label(self.settingsWindow)
self.set_velocity = tkinter.Button(self.settingsWindow, text ="Set Velocity",
command = self.setVelocity)
self.eco_button = tkinter.Button(self.settingsWindow, text = "ECO Mode", command = self.ecoMode)
self.place_position_data = tkinter.StringVar()
self.placeLocationStatus = tkinter.StringVar()
self.placeLocationStatus.set("")
# creating a label for
# name using widget Label
place_position_label = tkinter.Label(self.settingsWindow, text = 'Parcel Drop Location', font=('calibre',10, 'bold'))
# creating a entry for input
# name using widget Entry
place_position_entry = tkinter.Entry(self.settingsWindow, textvariable = self.place_position_data, font=('calibre',10,'normal'))
# creating a button using the widget
# Button that will call the submit function
sub_btn=tkinter.Button(self.settingsWindow, text = 'Update Location', command = self.sendPlaceLocation)
notify_label = tkinter.Label(self.settingsWindow, textvariable = self.placeLocationStatus, font=('calibre',10, 'bold'))
self.eco_button.pack()
self.velocity_scale.pack(anchor = tkinter.CENTER)
self.set_velocity.pack()
self.display_velocity.pack()
place_position_label.pack()
place_position_entry.pack()
sub_btn.pack()
notify_label.pack()
# # function for video streaming
#Used for testing, change to network stream for final version
def video_stream(self):
"""
Creates a NetGear socket (TCP for network protocol) to handle video stream from server.
Args:
self.systemStatus (string): Used to determine if the system needs to shutdown or go to standby.
Returns:
None
"""
hostname = socket.gethostname()
IPAddr = socket.gethostbyname(hostname)
# define tweak flags
options = {"compression_format": ".jpg", "compression_param": cv2.IMREAD_COLOR}
# Define Netgear Client at given IP address and define parameters
# !!! change following IP address '192.168.x.xxx' with yours !!!
self.NetGearclient = NetGear(
address=IPAddr,
port="5454",
protocol="tcp",
pattern=0,
receive_mode=True,
**options
)
self.lmain.grid(row=0, column=1, rowspan=3)
# loop over
while True:
if(self.systemStatus == "Online"):
# receive frames from network
frame = self.NetGearclient.recv()
# check for received frame if Nonetype
if frame is None:
break
thread = threading.Thread(target = self.updateVideoWindow, args = (frame, ))
thread.start()
def updateVideoWindow(self, frame):
"""
Function is called by video_stream() to update the frame (image) on the GUI.
Runs in a thread to avoid blocking of new frame (update frame as fast as network transmits).
Args:
frame (OpenCV frame): image recieved by the server.
self.lmain (tkinter Label): Label used to display image on the GUI
Returns:
None
"""
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
self.lmain.imgtk = imgtk
self.lmain.configure(image=imgtk)
#Modify function to return data decoded
#Not sure if this function is really needed, used mostly for testing purposes
# def receive(self):
# jsonReceived = self.conn.recv(1024)
# jsonReceived = json.loads(jsonReceived.decode("utf-8"))
# print(jsonReceived["first"])
def send(self, data):
"""
Converts data (json dict.) into a string and transmits to connected server socket.
Args:
self.conn (socket): Uses socket connection (conn) defined in creation of class.
data (dictionary): Data to be sent over network in the form of json.
Returns:
None
"""
jsonResult = json.dumps(data)
self.conn.send(bytes(jsonResult, encoding="utf-8"))
def on_exit(self):
"""
Sends shutdown notification over the network to rest of the system (connected to button event).
Shuts down video stream (NetGear) and the normal socket.
Finally exits program
Args:
self.conn (socket): Uses socket connection (conn) defined in creation of class
self.NetGearclient (NetGearSocket): Uses NetGearSocket defined in video_stream()
Returns:
None
"""
try:
jsonResult = {"first":"Client 2", "second":"Shut Down System"}
self.send(jsonResult)
except:
print("Error sending close command to server")
try:
self.root.destroy()
self.NetGearclient.close()
self.conn.shutdown(socket.SHUT_RDWR)
self.conn.close()
except:
print("Error closing sockets")
finally:
os._exit(1)
def setVelocity(self):
"""
Transmits new velocity (set by user) over the network to the arm.
Args:
self.display_velocity (Tkinter Label): Updates display_velocity to notify user velocity has been updated
Returns:
None
"""
try:
jsonResult = {"first":"Client 2", "second":"Set Velocity", "third": str(self.speed.get())}
self.send(jsonResult)
sel = "New Velocity = " + str(self.speed.get())
self.display_velocity.config(text = sel, font =("New Times Roman", 14))
except Exception as e:
print(e)
#This is mostly a joke
#However, it is possible to save alot of energy (up to 21% according to some research papers).
#It seems as torque increases, running at about half betwen the torque-rpm curve is best
#It seems the best thing to do is run at 80 to 90% speed with no payload, and reduce as payload increases (torque increases).
#Can reduce velocity by about 20% with payload vs no payload
#The e-series robots can measure torque change.
#No way of knowing power savings (if any) without testing.
def ecoMode(self):
"""
Transmits new velocity (ECO) over the network to the arm.
Args:
self.display_velocity (Tkinter Label): Updates display_velocity to notify user velocity has been updated
Returns:
None
"""
try:
jsonResult = {"first":"Client 2", "second":"Set Velocity", "third": "ECO"}
self.send(jsonResult)
sel = "New Velocity = ECO "
self.display_velocity.config(text = sel, font =("New Times Roman", 14))
except Exception as e:
print(e)
#Does not shut down the GUI program, goes into a standby mode, ready to start up again.
def pauseSystem(self):
"""
Sends standby notification over the network to the rest of the system.
Args:
self.systemStatusLabel (Tkinter Label): Updates systemStatusLabel (displays system status on Interface) text and color.
Returns:
None
"""
print("Putting system in Standby")
try:
jsonResult = {"first":"Client 2", "second":"Pause System"}
self.send(jsonResult)
self.systemStatusLabelText.set("System Status - Standby")
self.systemStatusLabel.config(bg = '#f9ff54')
self.systemStatus = "Standby"
except Exception as e:
print(e)
def startSystem(self):
"""
Sends startup notification over the network to the rest of the system.
Starts up the video stream in a seperate thread.
Args:
self.systemStatusLabel (Tkinter Label): Updates systemStatusLabel (displays system status on Interface) text and color.
Returns:
None
"""
#Create another client (with a different port) to handle the video stream.
if(self.systemStatus == "Offline"):
print("Starting up system")
try:
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect((HOST, PORT))
jsonResult = {"first":"Client 2", "second":"Start Up System"}
self.send(jsonResult)
self.systemStatusLabelText.set("System Status - Online")
self.systemStatusLabel.config(bg = '#499c5f')
self.systemStatus = "Online"
#Start video stream here
t1 = threading.Thread(target = self.video_stream)
t1.start()
except Exception as e:
print(e)
#At this point, if function is called, assuming system is in standby, send message to resume system
elif(self.systemStatus == "Standby"):
try:
jsonResult = {"first":"Client 2", "second":"Resume System"}
self.send(jsonResult)
self.systemStatus = "Online"
self.systemStatusLabelText.set("System Status - Online")
self.systemStatusLabel.config(bg = '#499c5f')
except Exception as error:
print(error)
if __name__ == '__main__':
root = tkinter.Tk()
GUI(root)
root.mainloop()
|
base.py | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Distributed remote client and server."""
import threading
class RemoteBase():
"""Distributed remote client class."""
def __init__(self):
super().__init__()
self.on_done = None
self.on_failed = None
def call(self, func, *args, on_done=None, on_failed=None, **kwargs):
"""Call function on remote client with callbacks."""
self.on_done = on_done
self.on_failed = on_failed
self.th_rpc = threading.Thread(target=self.rpc, args=(func,) + args, kwargs=kwargs)
self.th_rpc.start()
def rpc(self, func, *args, **kwargs):
"""Call function on remote client."""
raise NotImplementedError
def on_rpc_done(self, ret):
"""Invoke callback when remote call finishes."""
self.ret = ret
self.on_done(ret)
def on_rpc_failed(self, ret):
"""Invoke callback when remote call fails."""
self.on_failed(ret)
class WorkerBase():
"""Distributed remote worker (server) class."""
def run(self, estim):
"""Run worker."""
raise NotImplementedError
|
sl_network_dapp.py | from shadowlands.sl_dapp import SLDapp
from shadowlands.sl_contract.sloader import SLoader, DappNotFound
from pathlib import Path
import threading
import wget, zipfile, zipimport
import sys, textwrap, os, types, importlib, re, shutil
from subprocess import call, DEVNULL
from shadowlands.utils import filehasher
from shadowlands.tui.debug import debug
import pdb
class SLNetworkDapp(SLDapp):
def initialize(self):
pass
def __init__(self, screen, scene, eth_node, config, block_watcher, dapp_location, destroy_window=None):
self.dapp_location = dapp_location
super(SLNetworkDapp, self).__init__(screen, scene, eth_node, config, block_watcher, destroy_window=None)
self.show_wait_frame()
threading.Thread(target=self.run_network_dapp, args=[self.dapp_location]).start()
#self.run_network_dapp(self.dapp_location)
def run_network_dapp(self, dapp_target):
self.sloader_contract = SLoader(self.node)
try:
uri, checksum = self.sloader_contract.package(dapp_target)
except DappNotFound:
self.hide_wait_frame()
self.add_message_dialog("Could not find dapp at that address/name.")
return
# check to see if anything in the cache meets our requirements.
shadowlands_cache_dir = Path.home().joinpath(".shadowlands").joinpath("cache")
app_zipfile = None
for cached_file in shadowlands_cache_dir.iterdir():
if checksum == filehasher(cached_file):
app_zipfile = cached_file
break
if app_zipfile is None:
try:
app_zipfile = wget.download(uri, out=str(shadowlands_cache_dir), bar=None)
except:
self.hide_wait_frame()
self.add_message_dialog("Could not download dapp URI. Aborting.")
return
if checksum != filehasher(str(app_zipfile)):
self.hide_wait_frame()
self.add_message_dialog("Checksum did not match dapp. Aborting.")
return
archive = zipfile.ZipFile(str(app_zipfile), 'r')
# Assumes only one directory in top of zip, containing dapp.
top_level = archive.namelist()[0]
try:
requirements = archive.read(top_level + 'requirements.txt')
reqs = requirements.split()
except KeyError:
# No requirements.txt.
reqs = []
try:
pipbin = Path.home().joinpath('.shadowlands').joinpath('bin').joinpath('pip')
call([str(pipbin), 'install'] + reqs, stderr=DEVNULL, stdout=DEVNULL)
except Exception as e:
# Our dependencies were not installed, we have to scrap the file and try again next time.
os.remove(str(app_zipfile))
self.hide_wait_frame()
self.add_message_dialog("Error while gathering dependencies.")
return
archive.close()
#debug(); pdb.set_trace()
# app_zipfile exists and all reqs installed.
importer = zipimport.zipimporter(str(app_zipfile))
archive = zipfile.ZipFile(str(app_zipfile), 'r')
module_name = archive.namelist()[0].replace('/', '')
dapp_module = importer.load_module(module_name)
try:
Dapp = getattr(dapp_module, 'Dapp')
except AttributeError:
self.add_message_dialog("Possible module name conflict.")
self.hide_wait_frame()
return
self.hide_wait_frame()
Dapp(
self._screen,
self._scene,
self._node,
self._config,
self._block_callback_watcher
)
|
test_create_collection.py | import pdb
import copy
import logging
import itertools
import time
import threading
from multiprocessing import Process
import sklearn.preprocessing
import pytest
from utils.utils import *
from common.constants import *
uid = "create_collection"
class TestCreateCollection:
"""
******************************************************************
The following cases are used to test `create_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_segment_row_limits()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_collection_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields
method: create collection with diff fields: metric/field_type/...
expected: no exception raised
'''
filter_field = get_filter_field
logging.getLogger().info(filter_field)
vector_field = get_vector_field
collection_name = gen_unique_str(uid)
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
# "segment_row_limit": default_segment_row_limit
}
logging.getLogger().info(fields)
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_segment_row_limit(self, connect, get_segment_row_limit):
'''
target: test create normal collection with different fields
method: create collection with diff segment_row_limit
expected: no exception raised
'''
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
# fields["segment_row_limit"] = get_segment_row_limit
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_collection_after_insert(self, connect, collection):
'''
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
'''
# pdb.set_trace()
connect.insert(collection, default_entity)
try:
connect.create_collection(collection, default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "Create collection failed: meta table add collection failed,error = collection %s exist" % collection
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_collection_after_insert_flush(self, connect, collection):
'''
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
'''
connect.insert(collection, default_entity)
# connect.flush([collection])
try:
connect.create_collection(collection, default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "Create collection failed: meta table add collection failed,error = collection %s exist" % collection
# TODO: assert exception
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_without_connection(self, dis_connect):
'''
target: test create collection, without connection
method: create collection with correct params, with a disconnected instance
expected: error raised
'''
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
dis_connect.create_collection(collection_name, default_fields)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_collection_existed(self, connect):
'''
target: test create collection but the collection name have already existed
method: create collection with the same collection_name
expected: error raised
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
try:
connect.create_collection(collection_name, default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "Create collection failed: meta table add collection failed,error = collection %s exist" % collection_name
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_after_drop_collection(self, connect, collection):
'''
target: create with the same collection name after collection dropped
method: delete, then create
expected: create success
'''
connect.drop_collection(collection)
time.sleep(2)
connect.create_collection(collection, default_fields)
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_multithread(self, connect):
'''
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
'''
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid)
collection_names.append(collection_name)
connect.create_collection(collection_name, default_fields)
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert item in connect.list_collections()
connect.drop_collection(item)
class TestCreateCollectionInvalid(object):
"""
Test creating collections with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_metric_types()
)
def get_metric_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_dim(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_string(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_field_types()
)
def get_field_type(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_with_invalid_segment_row_limit(self, connect, get_segment_row_limit):
collection_name = gen_unique_str()
fields = copy.deepcopy(default_fields)
fields["segment_row_limit"] = get_segment_row_limit
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_with_invalid_dimension(self, connect, get_dim):
dimension = get_dim
collection_name = gen_unique_str()
fields = copy.deepcopy(default_fields)
fields["fields"][-1]["params"]["dim"] = dimension
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_with_invalid_collection_name(self, connect, get_invalid_string):
collection_name = get_invalid_string
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, default_fields)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("collection_name", ('', None))
def test_create_collection_with_empty_or_None_collection_name(self, connect, collection_name):
# collection_name = ''
try:
connect.create_collection(collection_name, default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "Collection name should not be empty"
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_no_dimension(self, connect):
'''
target: test create collection with no dimension params
method: create collection with correct params
expected: create status return ok
'''
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
fields["fields"][-1]["params"].pop("dim")
try:
connect.create_collection(collection_name, fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "dimension is not defined in field type params"
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_no_segment_row_limit(self, connect):
'''
target: test create collection with no segment_row_limit params
method: create collection with correct params
expected: use default default_segment_row_limit
'''
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
fields.pop("segment_row_limit")
connect.create_collection(collection_name, fields)
res = connect.get_collection_info(collection_name)
logging.getLogger().info(res)
assert res["segment_row_limit"] == default_server_segment_row_limit
# TODO: assert exception
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_limit_fields(self, connect):
collection_name = gen_unique_str(uid)
limit_num = 64
fields = copy.deepcopy(default_fields)
for i in range(limit_num):
field_name = gen_unique_str("field_name")
field = {"name": field_name, "type": DataType.INT64}
fields["fields"].append(field)
try:
connect.create_collection(collection_name, fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "maximum field's number should be limited to 64"
# TODO: assert exception
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_invalid_field_name(self, connect, get_invalid_string):
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
field_name = get_invalid_string
field = {"name": field_name, "type": DataType.INT64}
fields["fields"].append(field)
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
# TODO: assert exception
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_invalid_field_type(self, connect, get_field_type):
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
field_type = get_field_type
field = {"name": "test_field", "type": field_type}
fields["fields"].append(field)
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
|
q12_Queue.py | import multiprocessing
# 创建子队列函数 子进程,内存独立,相当于数据传递
def foo(subQueue):
subQueue.put('zhangsan')
if __name__ == "__main__":
# 创建队列
q = multiprocessing.Queue()
# 创建进程对象
p = multiprocessing.Process(target=foo, args=(q, ))
# 开始进程
p.start()
# 获取队列中的资源
print(q.get()) |
objective.py | import pika
import pickle
from multiprocessing import Process
class Objective:
"""Base class for creating objective function objects.
It provides functionalities for evaluate objective function via message broker.
Parameters
----------
num : int
The ordinal number of the objective function.
It is using by main algorithm and players to identify the objective function.
Must be a unique value from the range $[0, n-1]$, where $n$ is the number of all objective functions.
qname : str
The queue name
host : str
Hostname or IP Address to connect to message broker
port : int
TCP port to connect to message broker
"""
def __init__(self, num, qname, host, port):
"""Constructor method
"""
self.num = num
self.qname = qname
self.host = host
self.port = port
self._p = None
def call(self, x):
"""Abstract method. Override this method by formula of objective function.
Parameters
----------
x : numpy.ndarray
A 2-d numpy array of solutions.
Returns
-------
y : numpy.ndarray
A 1-d numpy array of values of objective function for given x.
"""
raise NotImplementedError('You must override this method in your class!')
def run(self):
p = Process(target=self._consumer, args=(self,))
p.daemon = True
p.start()
self._p = p
def is_alive(self):
if self._p is not None:
return self._p.is_alive()
return False
def close(self):
if self._p is not None:
self._p.terminate()
connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.host, port=self.port))
channel = connection.channel()
channel.queue_delete(queue=self.qname)
def __del__(self):
self.close()
@staticmethod
def _consumer(self):
def on_request(ch, method, props, body):
x = pickle.loads(body, encoding='bytes')
response = pickle.dumps(self.call(x))
ch.basic_publish(exchange='', routing_key=props.reply_to, properties=pika.BasicProperties(
correlation_id=props.correlation_id), body=response)
ch.basic_ack(delivery_tag=method.delivery_tag)
connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.host, port=self.port))
channel = connection.channel()
channel.queue_declare(queue=self.qname)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue=self.qname, on_message_callback=on_request)
channel.start_consuming()
|
sending_multiple_requests.py | # python3 send_tons_of_requests.py
import requests
import threading
url = 'https://www.google.com'
data = {}
def send_a_ton (url, data):
while True:
# response = requests.post(url, data=data).text
response = requests.get(url, data=data).text
print(response)
threads = []
# Fill the threads array with threads
for i in range(50):
t = threading.Thread(target=send_a_ton, args=[url, data])
t.daemon = True
threads.append(t)
# Start each of the threads
for i in range(50):
threads[i].start()
# Join each of the threads
for i in range(50):
threads[i].join()
|
threads.py | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 20 17:58:47 2021
@author: Clauber
"""
#Com este exemplo, podemos criar um teste de stress, scan simultaneo, ou seja,
#envio de requisições
#biblioteca para fazer o multiThread
from threading import Thread
import time
#função para colocar dois carros de corrida que competirão entre si.
def carro(velocidade, piloto):
trajeto = 0
while trajeto <= 100:
trajeto += velocidade
time.sleep(0.5)
print('Piloto: {} km: {} \n '.format(piloto, trajeto))
t_carro1 = Thread(target = carro, args = [1, 'Clauber'])
t_carro2 = Thread(target = carro, args = [2, 'Bot'])
t_carro1.start()
t_carro2.start()
|
bilkent.py |
from selenium import webdriver
import time
from selenium.webdriver.chrome.options import Options
from colorama import init
from termcolor import colored
import json
import os
import threading
import sys
import itertools
init()
main_url = "https://stars.bilkent.edu.tr/"
tab_url = "https://webmail.bilkent.edu.tr/"
version = 2.0
done = False
mode = sys.argv[1]
driver = webdriver.Chrome(
executable_path=f'{os.getcwd()}/chromedriver')
options = Options()
preferences = {"profile.default_content_setting_values.notifications": 2}
options.add_experimental_option("prefs", preferences)
options.page_load_strategy = 'eager'
def getCode(mb, pb):
driver.execute_script("window.open('');")
driver.switch_to.window(driver.window_handles[1])
driver.get(tab_url)
driver.find_element_by_xpath(
'/html/body/div[1]/div/form/table/tbody/tr[1]/td[2]/input').send_keys(mb)
time.sleep(0.2)
driver.find_element_by_xpath(
'/html/body/div[1]/div/form/table/tbody/tr[2]/td[2]/input').send_keys(pb)
time.sleep(0.1)
driver.find_element_by_xpath('/html/body/div[1]/div/form/p/button').click()
time.sleep(1)
driver.find_elements_by_class_name('message')[0].click()
frame = driver.find_element_by_tag_name("iframe")
driver.switch_to.default_content()
driver.switch_to.frame(frame)
time.sleep(1)
text = driver.find_element_by_xpath(
'/html/body/div[1]/div/div[2]/div[2]/div[2]/div/div').text
driver.switch_to.window(driver.window_handles[1])
driver.find_element_by_id(
'rcmbtn123').click()
code = text.split()[2]
driver.close()
driver.switch_to.window(driver.window_handles[0])
return code
def join():
with open("credentials.json") as cred:
data = json.load(cred)
uid = data["bilkent_id"]
upw = data["stars_password"]
mb = data["bilkent_mail"]
pb = data["mail_password"]
driver.get(main_url)
driver.find_element_by_xpath('//*[@id="services"]/li[3]/a').click()
driver.find_element_by_xpath(
'//*[@id="LoginForm_username"]').send_keys(uid)
driver.find_element_by_xpath(
'/html/body/div/div[1]/div[2]/div[1]/div/section/form/fieldset/div/div[1]/div[2]/div/div/input').send_keys(upw)
driver.find_element_by_xpath(
'/html/body/div/div[1]/div[2]/div[1]/div/section/form/fieldset/div/div[1]/div[3]/button').click()
time.sleep(0.5)
time.sleep(0.5)
driver.find_element_by_xpath(
'/html/body/div/div[1]/div[2]/div[1]/div/section/form/fieldset/div/div[1]/div[1]/div/div/input').send_keys(getCode(mb, pb))
driver.find_element_by_xpath(
'/html/body/div/div[1]/div[2]/div[1]/div/section/form/fieldset/div/div[1]/div[2]/button').click()
time.sleep(1)
try:
driver.execute_script("closeMessage();")
except:
pass
if (mode == "zoom"):
global done
done = True
time.sleep(1.2)
driver.find_element_by_xpath(
'/html/body/div[13]/div[2]/div/div[7]/div[2]/div[2]/a[1]').click()
for i in range(10, 0, -1):
# print(colored("\r>>", "cyan", "on_grey", ["bold"]),
# colored(f"Windows will be closed in {i}", "white"))
sys.stdout.write(f"\rWindows will be closed in {i} ")
time.sleep(1)
sys.stdout.write("\r ")
driver.close()
driver.switch_to.window(driver.window_handles[0])
driver.close()
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rRunning ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\r ')
if __name__ == "__main__":
print(colored(">>", "cyan", "on_grey", ["bold"]),
colored(f"berkegokmen bilkent script version {version}", "white"))
print(colored(">>", "cyan", "on_grey", ["bold"]),
colored(f"Target: {mode}", "white"))
t = threading.Thread(target=animate)
t.start()
join()
done = True
sys.stdout.write("\r")
sys.stdout.flush()
print(colored(">>", "cyan", "on_grey", ["bold"]),
colored(f"Done. \n", "white"))
|
stressTestServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from stressTest.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'stressTest'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from stressTest.stressTestImpl import stressTest # noqa @IgnorePep8
impl_stressTest = stressTest(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'stressTest'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_stressTest.run_stressTest,
name='stressTest.run_stressTest',
types=[dict])
self.method_authentication['stressTest.run_stressTest'] = 'required' # noqa
self.rpc_service.add(impl_stressTest.run_stressTest2,
name='stressTest.run_stressTest2',
types=[dict])
self.method_authentication['stressTest.run_stressTest2'] = 'required' # noqa
self.rpc_service.add(impl_stressTest.run_stressTest3,
name='stressTest.run_stressTest3',
types=[dict])
self.method_authentication['stressTest.run_stressTest3'] = 'required' # noqa
self.rpc_service.add(impl_stressTest.status,
name='stressTest.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'stressTest ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
word2vec.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec mini-batched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does traditional minibatching.
The key ops used are:
* placeholder for feeding in tensors for each example.
* embedding_lookup for fetching rows from the embedding matrix.
* sigmoid_cross_entropy_with_logits to calculate the loss.
* GradientDescentOptimizer for optimizing the loss.
* skipgram custom op that does input processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
word2vec = tf.load_op_library(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'word2vec_ops.so'))
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "File consisting of analogies of four tokens."
"embedding 2 - embedding 1 + embedding 3 should be close "
"to embedding 4."
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 100,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 16,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval).")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval).")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [vocab_size].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.multiply(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise labels for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(true_logits), logits=true_logits)
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(sampled_logits), logits=sampled_logits)
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram_word2vec(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.summary.scalar("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
break
print("unknown")
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
test_thread.py | """TestCases for multi-threaded access to a DB.
"""
import os
import sys
import time
import errno
from random import random
DASH = '-'
try:
WindowsError
except NameError:
class WindowsError(Exception):
pass
import unittest
from test_all import db, dbutils, test_support, verbose, have_threads, \
get_new_environment_path, get_new_database_path
if have_threads :
from threading import Thread
if sys.version_info[0] < 3 :
from threading import currentThread
else :
from threading import current_thread as currentThread
#----------------------------------------------------------------------
class BaseThreadedTestCase(unittest.TestCase):
dbtype = db.DB_UNKNOWN # must be set in derived class
dbopenflags = 0
dbsetflags = 0
envflags = 0
def setUp(self):
if verbose:
dbutils._deadlock_VerboseFile = sys.stdout
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.setEnvOpts()
self.env.open(self.homeDir, self.envflags | db.DB_CREATE)
self.filename = self.__class__.__name__ + '.db'
self.d = db.DB(self.env)
if self.dbsetflags:
self.d.set_flags(self.dbsetflags)
self.d.open(self.filename, self.dbtype, self.dbopenflags|db.DB_CREATE)
def tearDown(self):
self.d.close()
self.env.close()
test_support.rmtree(self.homeDir)
def setEnvOpts(self):
pass
def makeData(self, key):
return DASH.join([key] * 5)
#----------------------------------------------------------------------
class ConcurrentDataStoreBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD
envflags = db.DB_THREAD | db.DB_INIT_CDB | db.DB_INIT_MPOOL
readers = 0 # derived class should set
writers = 0
records = 1000
def test01_1WriterMultiReaders(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_1WriterMultiReaders..." % \
self.__class__.__name__
keys=range(self.records)
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertEqual(records_per_writer%readers_per_writer, 0)
readers = []
for x in xrange(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers=[]
for x in xrange(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
a.sort() # Generate conflicts
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
for t in writers:
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
def writerThread(self, d, keys, readers):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if verbose:
print "%s: creating records %d - %d" % (name, start, stop)
count=len(keys)//len(readers)
count2=count
for x in keys :
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
count2-=1
if not count2 :
readers.pop().start()
count2=count
if verbose:
print "%s: finished creating records" % name
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
for i in xrange(5) :
c = d.cursor()
count = 0
rec = c.first()
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = c.next()
if verbose:
print "%s: found %d records" % (name, count)
c.close()
if verbose:
print "%s: thread finished" % name
class BTreeConcurrentDataStore(ConcurrentDataStoreBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
class HashConcurrentDataStore(ConcurrentDataStoreBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
#----------------------------------------------------------------------
class SimpleThreadedBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
readers = 10
writers = 2
records = 1000
def setEnvOpts(self):
self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
def test02_SimpleLocks(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_SimpleLocks..." % self.__class__.__name__
keys=range(self.records)
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertEqual(records_per_writer%readers_per_writer, 0)
readers = []
for x in xrange(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers = []
for x in xrange(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
a.sort() # Generate conflicts
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
for t in writers:
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
def writerThread(self, d, keys, readers):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if verbose:
print "%s: creating records %d - %d" % (name, start, stop)
count=len(keys)//len(readers)
count2=count
for x in keys :
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
count2-=1
if not count2 :
readers.pop().start()
count2=count
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
c = d.cursor()
count = 0
rec = dbutils.DeadlockWrap(c.first, max_retries=10)
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = dbutils.DeadlockWrap(c.next, max_retries=10)
if verbose:
print "%s: found %d records" % (name, count)
c.close()
if verbose:
print "%s: thread finished" % name
class BTreeSimpleThreaded(SimpleThreadedBase):
dbtype = db.DB_BTREE
class HashSimpleThreaded(SimpleThreadedBase):
dbtype = db.DB_HASH
#----------------------------------------------------------------------
class ThreadedTransactionsBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT
envflags = (db.DB_THREAD |
db.DB_INIT_MPOOL |
db.DB_INIT_LOCK |
db.DB_INIT_LOG |
db.DB_INIT_TXN
)
readers = 0
writers = 0
records = 2000
txnFlag = 0
def setEnvOpts(self):
#self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
pass
def test03_ThreadedTransactions(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_ThreadedTransactions..." % \
self.__class__.__name__
keys=range(self.records)
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertEqual(records_per_writer%readers_per_writer, 0)
readers=[]
for x in xrange(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers = []
for x in xrange(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
dt = Thread(target = self.deadlockThread)
if sys.version_info[0] < 3 :
dt.setDaemon(True)
else :
dt.daemon = True
dt.start()
for t in writers:
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in writers:
t.join()
for t in readers:
t.join()
self.doLockDetect = False
dt.join()
def writerThread(self, d, keys, readers):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
count=len(keys)//len(readers)
while len(keys):
try:
txn = self.env.txn_begin(None, self.txnFlag)
keys2=keys[:count]
for x in keys2 :
key = '%04d' % x
d.put(key, self.makeData(key), txn)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
txn.commit()
keys=keys[count:]
readers.pop().start()
except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val:
if verbose:
if sys.version_info < (2, 6) :
print "%s: Aborting transaction (%s)" % (name, val[1])
else :
print "%s: Aborting transaction (%s)" % (name,
val.args[1])
txn.abort()
if verbose:
print "%s: thread finished" % name
def readerThread(self, d, readerNum):
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
finished = False
while not finished:
try:
txn = self.env.txn_begin(None, self.txnFlag)
c = d.cursor(txn)
count = 0
rec = c.first()
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = c.next()
if verbose: print "%s: found %d records" % (name, count)
c.close()
txn.commit()
finished = True
except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val:
if verbose:
if sys.version_info < (2, 6) :
print "%s: Aborting transaction (%s)" % (name, val[1])
else :
print "%s: Aborting transaction (%s)" % (name,
val.args[1])
c.close()
txn.abort()
if verbose:
print "%s: thread finished" % name
def deadlockThread(self):
self.doLockDetect = True
while self.doLockDetect:
time.sleep(0.05)
try:
aborted = self.env.lock_detect(
db.DB_LOCK_RANDOM, db.DB_LOCK_CONFLICT)
if verbose and aborted:
print "deadlock: Aborted %d deadlocked transaction(s)" \
% aborted
except db.DBError:
pass
class BTreeThreadedTransactions(ThreadedTransactionsBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
class HashThreadedTransactions(ThreadedTransactionsBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
class BTreeThreadedNoWaitTransactions(ThreadedTransactionsBase):
dbtype = db.DB_BTREE
writers = 2
readers = 10
records = 1000
txnFlag = db.DB_TXN_NOWAIT
class HashThreadedNoWaitTransactions(ThreadedTransactionsBase):
dbtype = db.DB_HASH
writers = 2
readers = 10
records = 1000
txnFlag = db.DB_TXN_NOWAIT
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if have_threads:
suite.addTest(unittest.makeSuite(BTreeConcurrentDataStore))
suite.addTest(unittest.makeSuite(HashConcurrentDataStore))
suite.addTest(unittest.makeSuite(BTreeSimpleThreaded))
suite.addTest(unittest.makeSuite(HashSimpleThreaded))
suite.addTest(unittest.makeSuite(BTreeThreadedTransactions))
suite.addTest(unittest.makeSuite(HashThreadedTransactions))
suite.addTest(unittest.makeSuite(BTreeThreadedNoWaitTransactions))
suite.addTest(unittest.makeSuite(HashThreadedNoWaitTransactions))
else:
print "Threads not available, skipping thread tests."
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
process.py | import sys
import time
import shlex
import atexit
import subprocess
from queue import Queue, Empty
from threading import Thread
from typing import List, Tuple, Callable, Optional, Generator, Union
from .log import log
def terminator(process: any, terminate_on_exit: bool=True) -> Callable[[], None]:
"""Creates a function that tries to terminate, then kills a subprocess"""
def terminate():
log(f'Terminating subprocess: {process.pid}')
try:
process.terminate()
except ProcessLookupError:
pass
if terminate_on_exit:
atexit.register(terminate)
return terminate
def split_run_args(command: str) -> List[str]:
"""Splits a shell command string into arguments."""
return shlex.split(command)
def escape_run_arg(run_arg: any) -> str:
"""Quotes a shell argument."""
return shlex.quote(str(run_arg))
def escape_run_args(run_args: List[str]) -> List[str]:
"""Quotes shell arguments."""
# TODO: use shlex.join after upgrading to Python 3.8:
# return shlex.quote(shlex.join(run_args))
return [escape_run_arg(a) for a in run_args]
def escape_command(run_args: List[str]) -> str:
"""Quotes a shell command so that it can be used as a run argument."""
# TODO: use shlex.join after upgrading to Python 3.8:
# return shlex.quote(shlex.join(run_args))
return shlex.quote(' '.join(escape_run_args(run_args=run_args)))
def ssh_command(run_args: List[str], user: str, host: str, escape_run_args: bool=True) -> List[str]:
"""Wraps a command in an SSH command to run it on a remote host."""
return [
'ssh',
f'{user}@{host}',
' '.join(escape_run_arg(a) if escape_run_args else a for a in run_args),
]
def script_command(script: str, shell: str='bash', should_eval: bool=False) -> List[str]:
"""Runs a script string in a specified shell"""
return [
*(['eval'] if should_eval else []),
f'echo {escape_run_arg(script)} | {shell}',
]
def call_process(run_args: List[str], shell=False) -> int:
"""Runs a subprocess, blocking until it finishes.
Returns:
int: the return code.
"""
return_code = subprocess.call(args=run_args, shell=shell)
return return_code
def read_stream(stream: any, chunk_size: int=1024, empty_sleep: float=0.01) -> Tuple[Queue, Queue, Thread]:
"""Reads one byte at a time from a stream on a separate thread.
Returns:
tuple: a tuple containing a queue to receive output from the reader, a queue to send a stop signal to the readier, and the thread on which the reader runs.
"""
queue = Queue()
stop_queue = Queue()
def reader():
stop = False
while True:
try:
stop = stop_queue.get_nowait()
time.sleep(empty_sleep)
except Empty:
pass
output = stream.read(1)
if not output:
if stop:
queue.put(b'')
break
else:
time.sleep(empty_sleep)
continue
try:
queue_bytes = queue.get_nowait()
queue.task_done()
output = queue_bytes + output
except Empty:
pass
queue.put(output)
if len(output) >= chunk_size:
queue.join()
thread = Thread(target=reader)
thread.setDaemon(True)
thread.start()
return (queue, stop_queue, thread)
def run_process_output(run_args: List[str], shell=False) -> Tuple[int, bytes, bytes]:
"""Runs a process, blocking, and returns a tuple of its return code, output bytes, and error bytes"""
result = subprocess.run(
args=run_args,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
return (result.returncode, result.stdout, result.stderr)
def spawn_process(run_args: List[str], terminate_on_exit: bool=True) -> Tuple[subprocess.Popen, Callable[[], None]]:
"""Runs a subprocess, without blocking.
Returns:
tuple: a tuple containing the process and its terminate function.
"""
process = subprocess.Popen(args=run_args)
return (process, terminator(process=process, terminate_on_exit=terminate_on_exit))
def run_process(run_args: List[str], terminate_on_exit: bool=True, chunk_size: int=1024, message_delimiters: List[bytes]=[b'\n'], encoding: Optional[str]=None, echo: bool=False, empty_sleep: float=0.01) -> Tuple[subprocess.Popen, Callable[[], None], Generator[Tuple[Optional[bytes], List[Union[bytes, str]], bool], bytes, int]]:
"""Runs a subprocess, without blocking and supports two-way interaction.
Returns:
tuple: a tuple containing the process, its terminate function, and a generator that will generate tuples with output bytes, output messages, a flag to indicate whether the output is error output and an optional return code.
"""
process = subprocess.Popen(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
terminate = terminator(process=process, terminate_on_exit=terminate_on_exit)
def output():
def handle_input(input_bytes: Optional[Union[bytes, str]]):
if input_bytes is not None:
process.stdin.write(input_bytes)
process.stdin.flush()
partial_messages = {
False: b'',
True: b'',
}
readers = {
False: read_stream(stream=process.stdout, chunk_size=chunk_size, empty_sleep=empty_sleep),
True: read_stream(stream=process.stderr, chunk_size=chunk_size, empty_sleep=empty_sleep),
}
is_stderr = True
def parse_messages(output_bytes: bytes):
nonlocal partial_messages
nonlocal is_stderr
messages = [partial_messages[is_stderr] + output_bytes]
for delimiter in message_delimiters:
messages = [m for o in message_delimiters for m in o.split(delimiter)]
partial_messages[is_stderr] = messages[-1]
messages = messages[:-1]
if encoding:
messages = map(lambda m: m.decode(encoding), messages)
return messages
input_bytes = yield (b'', [], False)
handle_input(input_bytes)
return_code = None
empty_countdown = 2
stop_countdown = 2
while True:
is_stderr = not is_stderr
try:
output = readers[is_stderr][0].get_nowait()
readers[is_stderr][0].task_done()
if not output:
stop_countdown -= 1
except Empty:
if return_code is not None and stop_countdown:
time.sleep(empty_sleep)
continue
else:
output = b''
if output == b'':
empty_countdown -= 1
if empty_countdown:
continue
time.sleep(empty_sleep)
if return_code is None:
return_code = process.poll()
empty_countdown = 2
if return_code is not None:
for _, reader in readers.items():
reader[1].put(True)
continue
else:
break
empty_countdown = 2
messages = parse_messages(output)
if echo:
if encoding:
if messages:
print('\n'.join(messages))
sys.stdout.flush()
else:
string_output = '\n'.join(str(b)[2:-1] for b in output.split(b'\n'))
print(string_output, end='')
sys.stdout.flush()
input_bytes = yield (output, message_delimiters, is_stderr)
handle_input(input_bytes)
if terminate_on_exit:
atexit.unregister(terminate)
for is_stderr in (False, True):
if partial_messages[is_stderr] == b'':
continue
last_messages = [partial_messages[is_stderr].decode(encoding)] if encoding else [partial_messages[is_stderr]]
yield (b'', last_messages, is_stderr)
return return_code
return (process, terminate, output())
def run_process_combined(run_args: List[str], on_output: Optional[Callable[[subprocess.Popen, str, bytes], Optional[bytes]]]=None, echo: bool=False) -> Tuple[int, str, bytes]:
"""Runs a subprocess, without blocking, supports two-way interaction, and combines then stdout and stderr streams.
This function is a simpler implementation of run_process() at the cost of combining stdout and stderr.
Returns:
tuple: a tuple containing the return code, the combined collected output string and the combined collected output bytes.
"""
process = subprocess.Popen(run_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE)
collected_bytes = b''
collected_output = ''
while True:
output = process.stdout.read(1)
if output == b'':
if process.poll() is not None:
break
else:
continue
collected_bytes += output
string_output = '\n' if output == b'\n' else str(output)[2:-1]
collected_output += string_output
if echo:
print(string_output, end='')
sys.stdout.flush()
if on_output:
input_bytes = on_output(process, collected_output, collected_bytes)
if input_bytes is not None:
process.stdin.write(input_bytes)
process.stdin.flush()
if echo:
print(str(input_bytes)[2:-1], end='')
return_code = process.poll()
return (return_code, collected_output, collected_bytes) |
pimp3clock.py | #!/usr/bin/python
from Adafruit_CharLCDPlate import Adafruit_CharLCDPlate
from subprocess import *
from time import sleep, strftime
from datetime import datetime
from mpd import *
import threading
import signal
import sys
import os
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import json
PLAY=0
PAUSE=1
STOP=2
VOL=3
LCDon=4
lcd = Adafruit_CharLCDPlate() # create LCD object
client = MPDClient() # create MPD client object
lock = threading.Lock()
home=os.path.dirname(os.path.realpath(__file__))
class pimp3clock_HTTPRequesthandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
if '?' in self.path:
self.path,q = self.path.split('?', 1)
if self.path.endswith(".js") or self.path.endswith(".css") or self.path.endswith(".png") or self.path.endswith(".gif") or self.path.endswith(".html"):
f = open(home + "/web/" + self.path)
self.send_response(200)
if self.path.endswith(".js"):
self.send_header('Content-type', 'text/javascript')
elif self.path.endswith(".css"):
self.send_header('Content-type', 'text/css')
elif self.path.endswith(".png"):
self.send_header('Content-type', 'image/png')
elif self.path.endswith(".gif"):
self.send_header('Content-type', 'image/gif')
elif self.path.endswith(".html"):
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
elif self.path.endswith(".json"):
self.send_response(200)
self.send_header('Content-type', 'text/javascript')
self.end_headers()
if self.path.endswith("status.json"):
lock.acquire()
song = client.currentsong()
status = client.status()
lock.release()
self.wfile.write(json.dumps({'song': song, 'status': status}))
return
elif self.path.endswith("select.json"):
lock.acquire()
status = client.status()
if status['state'] == "stop":
client.play()
elif status['state'] == "play":
client.pause(1)
elif status['state'] == "pause":
client.pause(0)
lock.release()
self.wfile.write(json.dumps("OK"))
return
elif self.path.endswith("next.json"):
lock.acquire()
client.next()
lock.release()
self.wfile.write(json.dumps("OK"))
return
elif self.path.endswith("previous.json"):
lock.acquire()
client.previous()
lock.release()
self.wfile.write(json.dumps("OK"))
return
elif self.path.endswith("volume.json"):
key, value = q.split('=',1)
if (value < 1):
value=1
lock.acquire()
client.setvol(value)
lock.release()
self.wfile.write(json.dumps("OK"))
return
elif self.path.endswith("update.json"):
lock.acquire()
mpd_update()
lock.release()
return
elif self.path.endswith("background.json"):
lock.acquire()
key, value = q.split('=',1)
LCDon=int(value)
lcd.backlight(LCDon)
lock.release()
return
return
else:
self.send_response(301)
self.send_header('Location', 'index.html')
self.end_headers()
return
return
except IOError:
self.send_error(404,'File Not Found: {0} (Home: {1})'.format(self.path, home))
def do_POST(self):
try:
print "POST"
except:
pass
def mpd_update():
# Load Database into current playlist
client.update()
client.clear()
database=client.listall("/")
for (i) in range(len(database)):
if 'file' in database[i]:
client.add(database[i]['file'])
client.random(1)
client.shuffle(1)
client.crossfade(2)
def display_lcd(title_a,st_a,vol_a):
LCDoff=lcd.OFF
LCDState=LCDoff
LCDOffDelay=30
LCDOffCountdown=LCDOffDelay
lcd.backlight(LCDon)
lcd.clear()
lcd.begin(16,1)
play=[
0b10000,
0b11000,
0b11100,
0b11110,
0b11100,
0b11000,
0b10000,
0b00000
]
lcd.createChar(PLAY,play)
pause=[
0b11011,
0b11011,
0b11011,
0b11011,
0b11011,
0b11011,
0b11011,
0b11011
]
lcd.createChar(PAUSE,pause)
stop=[
0b00000,
0b11111,
0b10001,
0b10001,
0b10001,
0b10001,
0b11111,
0b00000
]
lcd.createChar(STOP,stop)
t=0
i=0
fr=1
oldtitle=""
while 1:
lock.acquire()
vol=[]
vol.append([0b00000,0b00000,0b00000,0b00000,0b00000,0b00000,0b00000,0b00000])
vol.append([0b00000,0b00000,0b00000,0b00000,0b00000,0b00000,0b10000,0b10000])
vol.append([0b00000,0b00000,0b00000,0b00000,0b00000,0b01000,0b11000,0b11000])
vol.append([0b00000,0b00000,0b00000,0b00000,0b00100,0b01100,0b11100,0b11100])
vol.append([0b00000,0b00000,0b00000,0b00010,0b00110,0b01110,0b11110,0b11110])
vol.append([0b00000,0b00000,0b00001,0b00011,0b00111,0b01111,0b11111,0b11111])
volbar=int((vol_a[0]+5)/(100/5))
lcd.createChar(VOL,vol[volbar])
try:
if (t % 2) == 0:
lcd.home()
lcd.write(VOL,True) # Special Characters
lcd.message(datetime.now().strftime('%d.%b %H:%M:%S'))
else:
title=title_a[0]
if title != oldtitle:
fr=1
i=0
oldtitle=title
st=st_a[0]
lcd.clear()
lcd.write(VOL,True) # Special Characters
lcd.message(datetime.now().strftime('%d.%b %H %M %S\n'))
lcd.write(st,True) # Special Characters
lcd.message('%s' % (title[i:15+i]) )
if ((st == PAUSE) or (st == STOP)):
LCDOffCountdown=LCDOffCountdown-1
else:
if (LCDOffCountdown==0):
lcd.backlight(LCDon)
LCDOffCountdown=LCDOffDelay
if (LCDOffCountdown < 1):
lcd.backlight(LCDoff)
LCDOffCountdown=0
if fr==1:
i=i+1
else:
i=i-1
if i>len(title)-15:
fr=0
if i==0:
fr=1
finally:
lock.release()
t=t+1
sleep(0.5)
def webserver():
server.serve_forever()
def main_loop():
i=0;
title_a=[None]
st_a=[None]
vol_a=[None]
title_a[0]=""
st_a[0]=STOP
vol_a[0]=0
display_thread = threading.Thread(target=display_lcd, args=(title_a,st_a,vol_a))
display_thread.daemon=True # Causing thread to stop when main process ends.
display_thread.start()
webserver_thread = threading.Thread(target=webserver, args=())
webserver_thread.daemon=True # Causing thread to stop when main process ends.
webserver_thread.start()
client.connect("localhost", 6600) # connect to localhost:6600
mpd_update()
last_button=100;
while 1:
lock.acquire()
status = client.status()
vol_a[0]=int(status['volume'])
lock.release()
if (i % 5) == 0:
lock.acquire()
song = client.currentsong()
lock.release()
if song == {}:
title_a[0]=""
else:
title_a[0]=song['artist'] + " - " + song['title']
if status['state'] == "stop":
st_a[0]=STOP
elif status['state'] == "play":
st_a[0]=PLAY
elif status['state'] == "pause":
st_a[0]=PAUSE
lock.acquire()
try:
button = lcd.buttons()
finally:
lock.release()
if ((button & 1) == 1) and (last_button != button): # SELECT
if status['state'] == "stop":
lock.acquire()
client.play()
lock.release()
elif status['state'] == "play":
lock.acquire()
client.pause(1)
lock.release()
elif status['state'] == "pause":
lock.acquire()
client.pause(0)
lock.release()
elif ((button & 2) == 2) and (last_button != button): # RIGHT
client.next()
elif (button & 4) == 4: # DOWN
if int(status['volume']) >1:
lock.acquire()
client.setvol(int(status['volume']) - 1)
lock.release()
elif (button & 8) == 8: # UP
if int(status['volume']) <100:
lock.acquire()
client.setvol(int(status['volume']) + 1)
lock.release()
elif ((button & 16) == 16) and (last_button != button): # LEFT
lock.acquire()
client.previous()
lock.release()
last_button=button
i=i+1;
sleep(0.1)
def shutdown():
client.stop()
client.close() # send the close command
client.disconnect() # disconnect from the server
lcd.clear();
lcd.stop();
def sig_handler(signum = None, frame = None):
shutdown()
sys.exit(0)
try:
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
signal.signal(sig, sig_handler)
server = HTTPServer(('',80), pimp3clock_HTTPRequesthandler)
main_loop()
except (KeyboardInterrupt, SystemExit):
shutdown()
|
geneticOptimization.py | import numpy as np
from numpy import random
from functools import reduce
import Pucks
import Gates
import compare_func
from checkFeasibility import *
import repair
import multiprocessing as mp
import os
import time
import copy
# from multiprocessing import Pro
loopTime = 10000
populationSize = 500
crossPercent = 0.01
mutaPercent = 0.02
curOptimalSolution = 0
curOptimalScore = 0
numofMutedGenes = 50
def repairOperator(singleSolution,pucks,gates):
return repair.repair(singleSolution,pucks,gates)
def evaluateOperator(population,pucks,gates):
if check_feasibility(population, pucks, gates):
if np.random.ranf() <= 0.5:
population = repair.repair(population,pucks,gates)
return [population,population.sum() - 0.5*np.sign(np.sum(population, axis=0)).sum()]
else:
if np.random.ranf() <= 0.5:
try:
population = repair.repair(population,pucks,gates)
return [population,population.sum() - 0.5*np.sign(np.sum(population, axis=0)).sum()]
except Exception:
print('Evaluator Error')
# np.savetxt('wa.txt',population,fmt='%d',delimiter=',')
exit(-1)
else:
return [population,1]
def selectOperator(pop2ScoreSet):
try:
tmp = [s for s in pop2ScoreSet]
pop2ScoreSet = []
sorted(tmp,key=lambda k:k[1])
except:
print('Sort Error',[k[1] for k in tmp])
exit(-1)
#add the top 2 opt solution to save directly
for i in range(1):
pop2ScoreSet.append(tmp[min(i,len(tmp)-1)])
# remove the first two from tmp list
try:
totalScore = reduce(lambda x,y:x+y[1], tmp, 0)
except:
print(totalScore)
weighted_probability = np.array([s[1] / totalScore for s in tmp])
weighted_probability /= weighted_probability.sum()
for item in random.choice(range(len(tmp)), populationSize-2, p = weighted_probability):
pop2ScoreSet.append(copy.deepcopy(tmp[item]))
return pop2ScoreSet
def crossoverOperator(pop2ScoreSet):
# oldPolulation = [s for s in pop2ScoreSet]
# print('CrossOver Running')
Index = list(range(len(pop2ScoreSet)))
numofGenes = pop2ScoreSet[0][0].shape[0]
crossTimes = int(crossPercent*len(pop2ScoreSet))
for index in range(crossTimes):
[index1,index2] = random.choice(Index,2,replace=False)
try:
Index.remove(index1)
Index.remove(index2)
except:
print('Cross Error')
exit(-1)
crossoverIndex = random.randint(0,numofGenes)
a = pop2ScoreSet[index1][0][crossoverIndex][:]
b = pop2ScoreSet[index2][0][crossoverIndex][:]
pop2ScoreSet[index1][0][crossoverIndex][:] = b
pop2ScoreSet[index2][0][crossoverIndex][:] = a
# for item in oldPolulation:
# pop2ScoreSet.append(item)
return pop2ScoreSet
def mutationOperator(pop2ScoreSet,pucks,gates):
Index = list(range(len(pop2ScoreSet)))
numofGenes = pop2ScoreSet[0][0].shape[0]
numofGenesDim = pop2ScoreSet[0][0].shape[1]
mutaTimes = int(mutaPercent*len(pop2ScoreSet))
for index in range(random.randint(0,mutaTimes)):
#每轮随机挑选 mutaTimes个解进行变异
[index1] = random.choice(Index,1)
# Index.remove(index1)
for loop in range(random.randint(0,numofMutedGenes)):
#每次变异 numofMutedGenes 个基因
try:
mutaIndex = random.randint(0,numofGenes-1)
pop2ScoreSet[index1][0][mutaIndex] = np.zeros(numofGenesDim)
mutaSwitch = np.random.ranf()
if mutaSwitch < 0.5:
changeIndex = random.choice(pucks[mutaIndex].available_gates,1)
pop2ScoreSet[index1][0][mutaIndex][changeIndex] = 1
except Exception as ex:
print('Muta Error')
exit(-1)
return pop2ScoreSet
def geneticOptimization(populationSet,pucks,gates,id):
global curOptimalSolution
global curOptimalScore
pop2ScoreSet = [[s,0] for s in populationSet]
loopIndex = 0
while(True):
loopIndex += 1
for index in range(len(pop2ScoreSet)):
pop2ScoreSet[index] = evaluateOperator(pop2ScoreSet[index][0],pucks,gates)
scores = [s[1] for s in pop2ScoreSet]
localOptimalScore = max(scores)
print(loopIndex,'round','optimal score',curOptimalScore,len(pop2ScoreSet),[s[1] for s in pop2ScoreSet])
# print('1',len(pop2ScoreSet))
#
pop2ScoreSet = selectOperator(pop2ScoreSet)
if(localOptimalScore > curOptimalScore):
for item in pop2ScoreSet:
if item[1] >= localOptimalScore:
curOptimalScore = item[1]
curOptimalSolution = item[0]
np.savetxt('opt'+str(id)+'.txt',curOptimalSolution,fmt='%d',delimiter=',')
pop2ScoreSet.append([curOptimalSolution,curOptimalScore])
# print('3',[s[1] for s in pop2ScoreSet])
pop2ScoreSet = crossoverOperator(pop2ScoreSet)
# print('3',[s[1] for s in pop2ScoreSet])
pop2ScoreSet = mutationOperator(pop2ScoreSet,pucks,gates)
# print('3',[s[1] for s in pop2ScoreSet])
if __name__ == "__main__":
populationSet = []
curOptimalScore = 0
gates = Gates.Gates().all_gates
pucks = Pucks.Pucks(gates=gates).all_pucks
a = np.loadtxt("result-greedy.csv", delimiter=',')
# populationSet.append(a)
# a = np.loadtxt("result.csv", delimiter=',')
populationSet.append(a)
# pool = mp.Pool()
# info('main line')
pset = [ ]
for process in range(min(mp.cpu_count()-2,100)):
p = mp.Process(target=geneticOptimization, args=([s for s in populationSet],pucks,gates,process,))
p.start()
pset.append(p)
for p in pset:
p.join()
# geneticOptimization(populationSet,pucks,gates,1)
from initialize import *
# coding:utf-8
from helper import *
import numpy as np
from numpy import random
from functools import reduce
import Pucks
import Gates
import compare_func
from checkFeasibility import *
import repair
import multiprocessing as mp
import os
import time
import copy
import math
from ticketsMatrix import *
from transferTime import *
g = Gates.Gates().all_gates
p = Pucks.Pucks(gates=g).all_pucks
t = Tickets.Tickets().all_tickets
matrix = get_path_matrix(p, t)
# print(matrix.sum())
def score(allocation, pucks, gates):
F = np.zeros([303, 303])
for i in range(303):
if (allocation[i, :].sum() == 0):
continue
index1 = pucks[i].arrive_type
index2 = gates[list(allocation[i, :]).index(1)].terminal
for j in range(303):
if (allocation[j, :].sum() == 0):
continue
index3 = pucks[j].depart_type
index4 = gates[list(allocation[j, :]).index(1)].terminal
# print(allocation[j,:].sum())
# print(index1,index2,index3,index4)
F[i, j] = PaperWorkTime[index1 + index2 + '-' + index3 + index4]
newMatrix = copy.deepcopy(matrix)
for i in range(303):
if allocation[i, :].sum() == 0:
newMatrix[i, :] = np.zeros(303)
newMatrix[:, i] = np.zeros(303)
# print(newMatrix.sum())
print([allocation.sum(), (F * matrix).sum(), np.sign(np.sum(allocation, axis=0)).sum(),
(F * matrix).sum() / newMatrix.sum()])
return allocation.sum() - (F * matrix).sum() - np.sign(np.sum(allocation, axis=0)).sum(), [allocation.sum(),
(F * matrix).sum(),
np.sign(
np.sum(allocation,
axis=0)).sum(),
(
F * matrix).sum() / newMatrix.sum()]
def transpose_1(a, pucks, gates):
# unorder_set = list(range(303))
s = copy.deepcopy(a)
unassign = []
assigned = []
for index in range(s.shape[0]):
if s[index, :].sum() == 0:
unassign.append(index)
else:
assigned.append(index)
# gateSet = random.choice(list(range(69)),random.randint(1,3),replace=False)
gateSet = range(10, 19)
for gate in gateSet:
for index in range(303):
if s[index, gate] == 1:
unassign.append(index)
assigned.remove(index)
for gate in gateSet:
s[:, gate] = np.zeros(303)
newPucks = [pucks[k] for k in unassign]
newPucks.sort(key=compare_func.cmp_to_key(puck_compare_depart_and_stay_time))
for item in newPucks:
for index in range(68, 0, -1):
if (can_add(s, pucks, gates, item.id, index)):
add_puck(s, pucks, gates, item.id, index)
return s
def transpose_2(a, pucks, gates):
for loop in range(100):
index1 = random.randint(0, 69)
index2 = random.randint(0, 69)
if (index1 == index2):
continue
if (gates[index1].flight_type != gates[index2].flight_type):
continue
if (gates[index1].arrive_type_D != gates[index2].arrive_type_D):
continue
if (gates[index1].arrive_type_D != gates[index2].arrive_type_D):
continue
if (gates[index1].depart_type_D != gates[index2].depart_type_D):
continue
if (gates[index1].depart_type_I != gates[index2].depart_type_I):
continue
tmp = copy.deepcopy(a[:, index1])
a[:, index1] = copy.deepcopy(a[:, index2])
a[:, index2] = copy.deepcopy(tmp)
return a
return a
def transpose_3(a, pucks, gates):
for loop in range(100):
index1 = random.randint(0, 69)
index2 = random.randint(0, 69)
if (index1 == index2):
continue
if (gates[index1].flight_type != gates[index2].flight_type):
continue
if (gates[index1].arrive_type_D != gates[index2].arrive_type_D):
continue
if (gates[index1].depart_type_I != gates[index2].depart_type_I):
continue
tmp = copy.deepcopy(a[:, index1])
a[:, index1] = copy.deepcopy(a[:, index2])
a[:, index2] = copy.deepcopy(tmp)
return a
return a
def move(allocation, pucks, gates):
print(score(allocation, pucks, gates))
assign = []
for line in range(allocation.shape[0]):
if (allocation[line, :].sum() > 0):
assign.append(line)
for item in assign:
for index in range(68, 0, -1):
allocation, dindex = delete_puck(allocation, item)
# add_puck(allocation, pucks, gates, item, dindex)
allocation, re = add_puck(allocation, pucks, gates, item, index)
if (not re):
add_puck(allocation, pucks, gates, item, dindex)
# if(can_add(allocation,pucks,gates,item,index)):
np.savetxt('opt' + '.txt', allocation, fmt='%d', delimiter=',')
print(score(allocation, pucks, gates))
def transpose_3(a, pucks, gates):
pass
def safunc(a, pucks, gates):
curScore, w = score(a, pucks, gates)
optScore, w = score(a, pucks, gates)
optSec = w[1]
optThi = w[1]
maxK = 100
T = 1000
Tmin = 10
k = 100
t = 0
while T >= Tmin:
for k in range(maxK):
# print('gg',curScore,optScore)
# newAllocation = transpose_1(a,pucks,gates)
newAllocation = transpose_2(a, pucks, gates)
newScore, b = score(newAllocation, pucks, gates)
if not check_feasibility(newAllocation, pucks, gates):
exit(-1)
print(optScore, newAllocation.sum(), np.sign(np.sum(newAllocation, axis=0)).sum())
if newScore > curScore:
a = copy.deepcopy(newAllocation)
curScore = newScore
if newScore > optScore or (b[1] < optSec):
optScore = newScore
optSec = b[1]
np.savetxt('opt2' + '.txt', a, fmt='%d', delimiter=',')
# exit(-1)
else:
p = math.exp(-(newScore - optScore) / T)
r = np.random.uniform(low=0, high=1)
if r < p:
a = copy.deepcopy(newAllocation)
curScore = newScore
t += 1
T = 1000 / math.log2(1 + t)
if __name__ == '__main__':
g = Gates.Gates().all_gates
p = Pucks.Pucks(gates=g).all_pucks
a = np.loadtxt("opt.csv", delimiter=',')
# transpose_1(a, p, g)
# np.savetxt('opt'+'.txt',a,fmt='%d',delimiter=',')
# move(a,p,g)
#
safunc(a, p, g)
# print(can_add(a, p, g, 0, 0))
#
#
|
run.py | #!/usr/bin/env python3
from cloudfoundry_client.client import CloudFoundryClient
from sys import argv
import os
import time
import subprocess
import requests
import threading
import json
import time
from collections import OrderedDict
from jinja2 import Environment, FileSystemLoader
from flask import request, jsonify, make_response, Flask, redirect, url_for
#
# This is how long we wait after scanning all /info endpoints before
# trying again ...
#
SLEEP_INTERVAL = 10
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
matrix = OrderedDict()
spaces = OrderedDict()
urls = OrderedDict()
env = Environment(loader=FileSystemLoader('templates'))
scanning = False
try:
with open('xredentials.json') as io:
credentials = json.loads(io.read())
configured = True
except FileNotFoundError:
configured = False
except Exception as e:
print(e)
exit()
class Refresh(object):
def __init__(self, auth, interval=30):
self._interval = interval
self._auth = auth
self._scanning = False
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
"""
Regenerate our datapoints by querying all the /info endpoints
"""
global matrix, spaces, scanning
client = CloudFoundryClient(self._auth.get('gate'), skip_verification=True)
client.init_with_user_credentials(self._auth.get('user'), self._auth.get('pass'))
for organization in client.organizations:
org_name = organization['entity']['name']
for space in organization.spaces():
space_name = space['entity']['name']
if space_name not in spaces:
spaces[space_name] = 0
for app in space.apps():
name = app['entity']['name']
if name.split('-')[-1] not in spaces:
continue
app_name = '-'.join(app['entity']['name'].split('-')[:-1])
route = app.summary()['routes']
if not len(route):
continue
domain = route[0]['domain']['name']
host = route[0]['host']
url = 'http://{}.{}/info'.format(host, domain)
response = requests.get(url)
try:
if app_name not in matrix:
matrix[app_name] = {}
urls[app_name] = {}
urls[app_name][space_name] = url
matrix[app_name][space_name] = response.json()
spaces[space_name] += 1
except Exception as e:
pass
if scanning:
return
scanning = True
while True:
time.sleep(SLEEP_INTERVAL)
count = 0
immutable_matrix = list(matrix)
for app in immutable_matrix:
immutable_spaces = list(spaces)
for space in immutable_spaces:
if space in urls[app]:
url = urls[app][space]
response = requests.get(url)
if 'gateway-test' in url:
print(url, response)
count += 1
try:
json = response.json()
except Exception as e:
json = {'branch': 'ERROR', 'version': str(response.status_code)}
matrix[app][space] = json
print('Finished pass, scanned "{}" urls'.format(count))
@app.route('/authenticate')
def authenticate():
global configured
credentials = request.args.get('credentials')
try:
credentials = json.loads(credentials)
for auth in credentials:
Refresh(auth)
configured = True
except Exception as e:
template = env.get_template('loginfail.html')
return make_response(template.render(), 200)
return redirect(url_for('home'))
@app.route('/status')
def status():
"""
Provide the updated environment
"""
global matrix, spaces
column_titles = ['Micro-Service']
items = []
for space in sorted(spaces):
if spaces[space]:
column_titles.append(space)
for app in matrix:
item = [app]
if not len(matrix[app]):
continue
for space in spaces:
if not spaces[space]:
continue
if not space in matrix[app]:
item.append('-')
continue
j = matrix[app][space]
branch = j.get('branch', '-')
if '/' in branch:
branch = ' '.join(branch.split('/')[1:])
if not len(branch):
branch='(unknown)'
version = j.get('version', '-').replace('-SNAPSHOT', '')
label = '{}/{}'.format(branch, version)
if label == 'ERROR/200':
label = 'UP - no endpoint'
if label == 'ERROR/404':
label = 'DOWN or no endpoint'
item.append(label)
if len(item):
items.append(item)
result = {
'draw': 1,
'recordsTotal': len(items),
'recordsFiltered': len(items),
'data': items,
'column_titles': column_titles
}
return make_response(jsonify(result), 200)
@app.route('/')
def home():
"""
Render the status template
"""
if configured:
template = env.get_template('index.html')
else:
template = env.get_template('login.html')
return make_response(template.render(), 200)
if configured:
for auth in credentials:
Refresh(auth)
app.run(host='0.0.0.0', port=8080, debug=False)
|
test_gym.py | import pytest
import gym
import botbowl.ai
from multiprocessing import Process, Pipe
import numpy as np
import multiprocessing
import os
envs = [
"botbowl-1-v3",
"botbowl-3-v3",
"botbowl-5-v3",
"botbowl-7-v3",
"botbowl-11-v3"
]
@pytest.mark.parametrize("env", envs)
def test_gym(env):
env = gym.make(env)
seed = 0
env.seed(seed)
rnd = np.random.RandomState(seed)
steps = 0
obs = env.reset()
done = False
while not done:
action_types = env.available_action_types()
assert len(action_types) > 0
action_type = rnd.choice(action_types)
available_positions = env.available_positions(action_type)
assert obs is not None
pos = rnd.choice(available_positions) if len(available_positions) > 0 else None
action = {
'action-type': action_type,
'x': pos.x if pos is not None else None,
'y': pos.y if pos is not None else None
}
obs, reward, done, info = env.step(action)
assert reward is not None
steps += 1
assert steps > 10
def worker(remote, parent_remote, env):
parent_remote.close()
seed = env.get_seed()
rnd = np.random.RandomState(seed)
steps = 0
obs = env.reset()
while True:
command = remote.recv()
if command == 'step':
action_types = env.available_action_types()
action_type = rnd.choice(action_types)
available_positions = env.available_positions(action_type)
pos = rnd.choice(available_positions) if len(available_positions) > 0 else None
action = {
'action-type': action_type,
'x': pos.x if pos is not None else None,
'y': pos.y if pos is not None else None
}
obs, reward, done, info = env.step(action)
steps += 1
if done:
obs = env.reset()
remote.send((obs, reward, done, info))
elif command == 'reset':
obs = env.reset()
done = False
elif command == 'close':
env.close()
break
@pytest.mark.parametrize("env", envs)
def test_multiple_gyms(env):
seed = 0
nenvs = 2
envs = [gym.make(env) for _ in range(nenvs)]
for i in range(len(envs)):
envs[i].seed(seed)
remotes, work_remotes = zip(*[Pipe() for _ in range(nenvs)])
ps = [Process(target=worker, args=(work_remote, remote, env))
for (work_remote, remote, env) in zip(work_remotes, remotes, envs)]
for p in ps:
p.daemon = True # If the main process crashes, we should not cause things to hang
p.start()
for remote in work_remotes:
remote.close()
for i in range(20):
for remote in remotes:
remote.send('step')
results = [remote.recv() for remote in remotes]
for j in range(len(results)):
obs, reward, done, info = results[j]
assert reward is not None
assert obs is not None
for remote in remotes:
remote.send('close')
for p in ps:
p.join()
assert True
|
ion_optics_manager.py | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
import os
import six.moves.cPickle as pickle
from traits.api import Range, Instance, Bool, \
Button, Any
from pychron.core.helpers.isotope_utils import sort_isotopes
from pychron.core.ui.thread import Thread
from pychron.graph.graph import Graph
from pychron.managers.manager import Manager
from pychron.paths import paths
from pychron.pychron_constants import NULL_STR
from pychron.spectrometer.base_detector import BaseDetector
from pychron.spectrometer.base_spectrometer import NoIntensityChange
from pychron.spectrometer.ion_optics.coincidence_config import CoincidenceConfig
from pychron.spectrometer.ion_optics.peak_center_config import PeakCenterConfigurer
from pychron.spectrometer.jobs.coincidence import Coincidence
from pychron.spectrometer.jobs.peak_center import PeakCenter, AccelVoltagePeakCenter
class MFTableCTX(object):
def __init__(self, manager, mftable):
self._manager = manager
self._opath = manager.spectrometer.magnet.get_field_table_path()
self._mftable = mftable
def __enter__(self):
self._manager.set_mftable(self._mftable)
def __exit__(self, exc_type, exc_val, exc_tb):
self._manager.set_mftable(self._opath)
class IonOpticsManager(Manager):
reference_detector = Instance(BaseDetector)
reference_isotope = Any
magnet_dac = Range(0.0, 6.0)
graph = Instance(Graph)
peak_center_button = Button('Peak Center')
stop_button = Button('Stop')
alive = Bool(False)
spectrometer = Any
peak_center = Instance(PeakCenter)
coincidence = Instance(Coincidence)
peak_center_config = Instance(PeakCenterConfigurer)
# coincidence_config = Instance(CoincidenceConfig)
canceled = False
peak_center_result = None
_centering_thread = None
def close(self):
self.cancel_peak_center()
def cancel_peak_center(self):
self.alive = False
self.canceled = True
self.peak_center.canceled = True
self.peak_center.stop()
self.info('peak center canceled')
def get_mass(self, isotope_key):
spec = self.spectrometer
molweights = spec.molecular_weights
return molweights[isotope_key]
def mftable_ctx(self, mftable):
return MFTableCTX(self, mftable)
def set_mftable(self, name=None):
"""
if mt is None set to the default mftable located at setupfiles/spectrometer/mftable.csv
:param mt:
:return:
"""
if name and name != os.path.splitext(os.path.basename(paths.mftable))[0]:
self.spectrometer.use_deflection_correction = False
else:
self.spectrometer.use_deflection_correction = True
self.spectrometer.magnet.set_mftable(name)
def get_position(self, *args, **kw):
kw['update_isotopes'] = False
return self._get_position(*args, **kw)
def av_position(self, pos, detector, *args, **kw):
av = self._get_av_position(pos, detector)
self.spectrometer.source.set_hv(av)
self.info('positioning {} ({}) on {}'.format(pos, av, detector))
return av
def position(self, pos, detector, use_af_demag=True, *args, **kw):
dac = self._get_position(pos, detector, *args, **kw)
mag = self.spectrometer.magnet
self.info('positioning {} ({}) on {}'.format(pos, dac, detector))
return mag.set_dac(dac, use_af_demag=use_af_demag)
def do_coincidence_scan(self, new_thread=True):
if new_thread:
t = Thread(name='ion_optics.coincidence', target=self._coincidence)
t.start()
self._centering_thread = t
def setup_coincidence(self):
pcc = self.coincidence_config
pcc.dac = self.spectrometer.magnet.dac
info = pcc.edit_traits()
if not info.result:
return
detector = pcc.detector
isotope = pcc.isotope
detectors = [d for d in pcc.additional_detectors]
# integration_time = pcc.integration_time
if pcc.use_nominal_dac:
center_dac = self.get_position(isotope, detector)
elif pcc.use_current_dac:
center_dac = self.spectrometer.magnet.dac
else:
center_dac = pcc.dac
# self.spectrometer.save_integration()
# self.spectrometer.set_integration(integration_time)
cs = Coincidence(spectrometer=self.spectrometer,
center_dac=center_dac,
reference_detector=detector,
reference_isotope=isotope,
additional_detectors=detectors)
self.coincidence = cs
return cs
def get_center_dac(self, det, iso):
spec = self.spectrometer
det = spec.get_detector(det)
molweights = spec.molecular_weights
mass = molweights[iso]
dac = spec.magnet.map_mass_to_dac(mass, det.name)
# correct for deflection
return spec.correct_dac(det, dac)
def do_peak_center(self,
save=True,
confirm_save=False,
warn=False,
new_thread=True,
message='',
on_end=None,
timeout=None):
self.debug('doing pc')
self.canceled = False
self.alive = True
self.peak_center_result = None
args = (save, confirm_save, warn, message, on_end, timeout)
if new_thread:
t = Thread(name='ion_optics.peak_center', target=self._peak_center,
args=args)
t.start()
self._centering_thread = t
return t
else:
self._peak_center(*args)
def setup_peak_center(self, detector=None,
isotope=None,
integration_time=1.04,
directions='Increase',
center_dac=None,
name='',
show_label=False,
window=0.015,
step_width=0.0005,
min_peak_height=1.0,
percent=80,
deconvolve=None,
use_interpolation=False,
interpolation_kind='linear',
dac_offset=None,
calculate_all_peaks=False,
config_name=None,
use_configuration_dac=True,
new=False,
update_others=True,
plot_panel=None):
if deconvolve is None:
n_peaks, select_peak = 1, 1
use_dac_offset = False
if dac_offset is not None:
use_dac_offset = True
spec = self.spectrometer
pcconfig = self.peak_center_config
spec.save_integration()
self.debug('setup peak center. detector={}, isotope={}'.format(detector, isotope))
pcc = None
dataspace = 'dac'
use_accel_voltage = False
use_extend = False
self._setup_config()
if config_name:
pcconfig.load()
pcconfig.active_name = config_name
pcc = pcconfig.active_item
elif detector is None or isotope is None:
self.debug('ask user for peak center configuration')
pcconfig.load()
if config_name:
pcconfig.active_name = config_name
info = pcconfig.edit_traits()
if not info.result:
return
else:
pcc = pcconfig.active_item
if pcc:
if not detector:
detector = pcc.active_detectors
if not isotope:
isotope = pcc.isotope
directions = pcc.directions
integration_time = pcc.integration_time
dataspace = pcc.dataspace
use_accel_voltage = pcc.use_accel_voltage
use_extend = pcc.use_extend
window = pcc.window
min_peak_height = pcc.min_peak_height
step_width = pcc.step_width
percent = pcc.percent
use_interpolation = pcc.use_interpolation
interpolation_kind = pcc.interpolation_kind
n_peaks = pcc.n_peaks
select_peak = pcc.select_n_peak
use_dac_offset = pcc.use_dac_offset
dac_offset = pcc.dac_offset
calculate_all_peaks = pcc.calculate_all_peaks
update_others = pcc.update_others
if not pcc.use_mftable_dac and center_dac is None and use_configuration_dac:
center_dac = pcc.dac
spec.set_integration_time(integration_time)
period = int(integration_time * 1000 * 0.9)
if not isinstance(detector, (tuple, list)):
detector = (detector,)
ref = spec.get_detector(detector[0])
if center_dac is None:
center_dac = self.get_center_dac(ref, isotope)
# if mass:
# mag = spec.magnet
# center_dac = mag.map_mass_to_dac(mass, ref)
# low = mag.map_mass_to_dac(mass - window / 2., ref)
# high = mag.map_mass_to_dac(mass + window / 2., ref)
# window = high - low
# step_width = abs(mag.map_mass_to_dac(mass + step_width, ref) - center_dac)
if len(detector) > 1:
ad = detector[1:]
else:
ad = []
pc = self.peak_center
klass = AccelVoltagePeakCenter if use_accel_voltage else PeakCenter
if not pc or new or (use_accel_voltage and not isinstance(pc, AccelVoltagePeakCenter)):
pc = klass()
pc.trait_set(center_dac=center_dac,
dataspace=dataspace,
use_accel_voltage=use_accel_voltage,
use_extend=use_extend,
period=period,
window=window,
percent=percent,
min_peak_height=min_peak_height,
step_width=step_width,
directions=directions,
reference_detector=ref,
additional_detectors=ad,
reference_isotope=isotope,
spectrometer=spec,
show_label=show_label,
use_interpolation=use_interpolation,
interpolation_kind=interpolation_kind,
n_peaks=n_peaks,
select_peak=select_peak,
use_dac_offset=use_dac_offset,
dac_offset=dac_offset,
calculate_all_peaks=calculate_all_peaks,
update_others=update_others)
graph = pc.graph
graph.name = name
if plot_panel:
plot_panel.set_peak_center_graph(graph)
self.peak_center = pc
self.reference_detector = ref
self.reference_isotope = isotope
return self.peak_center
def backup_mftable(self):
self.spectrometer.magnet.field_table.backup()
# private
def _setup_config(self):
config = self.peak_center_config
config.detectors = self.spectrometer.detector_names
keys = list(self.spectrometer.molecular_weights.keys())
config.isotopes = sort_isotopes(keys)
config.integration_times = self.spectrometer.integration_times
# def _get_peak_center_config(self, config_name):
# if config_name is None:
# config_name = 'default'
#
# config = self.peak_center_config.get(config_name)
#
# config.detectors = self.spectrometer.detectors_names
# if config.detector_name:
# config.detector = next((di for di in config.detectors if di == config.detector_name), None)
#
# if not config.detector:
# config.detector = config.detectors[0]
#
# keys = self.spectrometer.molecular_weights.keys()
# config.isotopes = sort_isotopes(keys)
# config.integration_times = self.spectrometer.integration_times
# return config
# def _timeout_func(self, timeout, evt):
# st = time.time()
# while not evt.is_set():
# if not self.alive:
# break
#
# if time.time() - st > timeout:
# self.warning('Peak Centering timed out after {}s'.format(timeout))
# self.cancel_peak_center()
# break
#
# time.sleep(0.01)
def _peak_center(self, save, confirm_save, warn, message, on_end, timeout):
pc = self.peak_center
spec = self.spectrometer
ref = self.reference_detector
isotope = self.reference_isotope
try:
center_value = pc.get_peak_center()
except NoIntensityChange as e:
self.warning('Peak Centering failed. No Intensity change. {}'.format(e))
center_value = None
self.peak_center_result = center_value
if center_value:
det = spec.get_detector(ref)
if pc.use_accel_voltage:
args = ref, isotope, center_value
else:
dac_a = spec.uncorrect_dac(det, center_value)
self.info('dac uncorrected for HV and deflection {}'.format(dac_a))
args = ref, isotope, dac_a
self.adjusted_peak_center_result = dac_a
self.info('new center pos {} ({}) @ {}'.format(*args))
if save:
if confirm_save:
msg = 'Update Magnet Field Table with new peak center- {} ({}) @ RefDetUnits= {}'.format(*args)
if pc.use_accel_voltage:
msg = 'Update Accel Voltage Table with new peak center- {} ({}) @ RefDetUnits= {}'.format(*args)
save = self.confirmation_dialog(msg)
if save:
if pc.use_accel_voltage:
spec.source.update_field_table(det, isotope, center_value, message)
else:
spec.magnet.update_field_table(det, isotope, dac_a, message,
update_others=pc.update_others)
spec.magnet.set_dac(dac_a)
elif not self.canceled:
msg = 'centering failed'
if warn:
self.warning_dialog(msg)
self.warning(msg)
# needs to be called on the main thread to properly update
# the menubar actions. alive=False enables IonOptics>Peak Center
# d = lambda:self.trait_set(alive=False)
# still necessary with qt? and tasks
if on_end:
on_end()
self.trait_set(alive=False)
self.spectrometer.restore_integration()
def _get_av_position(self, pos, detector, update_isotopes=True):
self.debug('AV POSITION {} {}'.format(pos, detector))
spec = self.spectrometer
if not isinstance(detector, str):
detector = detector.name
if isinstance(pos, str):
try:
pos = float(pos)
except ValueError:
# pos is isotope
if update_isotopes:
# if the pos is an isotope then update the detectors
spec.update_isotopes(pos, detector)
pos = self.get_mass(pos)
# pos is mass i.e 39.962
av = spec.source.map_mass_to_hv(pos, detector)
return av
def _get_position(self, pos, detector, use_dac=False, update_isotopes=True):
"""
pos can be str or float
"Ar40", "39.962", 39.962
to set in DAC space set use_dac=True
"""
if pos == NULL_STR:
return
spec = self.spectrometer
mag = spec.magnet
if isinstance(detector, str):
det = spec.get_detector(detector)
else:
det = detector
self.debug('detector {}'.format(det))
if use_dac:
dac = pos
else:
self.debug('POSITION {} {}'.format(pos, detector))
if isinstance(pos, str):
try:
pos = float(pos)
except ValueError:
# pos is isotope
if update_isotopes:
# if the pos is an isotope then update the detectors
spec.update_isotopes(pos, detector)
pos = self.get_mass(pos)
mag.mass_change(pos)
# pos is mass i.e 39.962
print('det is',det)
dac = mag.map_mass_to_dac(pos, det.name)
dac = spec.correct_dac(det, dac)
return dac
def _coincidence(self):
self.coincidence.get_peak_center()
self.info('coincidence finished')
self.spectrometer.restore_integration()
# ===============================================================================
# handler
# ===============================================================================
def _coincidence_config_default(self):
config = None
p = os.path.join(paths.hidden_dir, 'coincidence_config.p')
if os.path.isfile(p):
try:
with open(p) as rfile:
config = pickle.load(rfile)
config.detectors = dets = self.spectrometer.detectors
config.detector = next((di for di in dets if di.name == config.detector_name), None)
except Exception as e:
print('coincidence config', e)
if config is None:
config = CoincidenceConfig()
config.detectors = self.spectrometer.detectors
config.detector = config.detectors[0]
keys = list(self.spectrometer.molecular_weights.keys())
config.isotopes = sort_isotopes(keys)
return config
def _peak_center_config_default(self):
config = PeakCenterConfigurer()
return config
# def _peak_center_config_default(self):
# config = None
# p = os.path.join(paths.hidden_dir, 'peak_center_config.p')
# if os.path.isfile(p):
# try:
# with open(p) as rfile:
# config = pickle.load(rfile)
# config.detectors = dets = self.spectrometer.detectors
# config.detector = next((di for di in dets if di.name == config.detector_name), None)
#
# except Exception, e:
# print 'peak center config', e
#
# if config is None:
# config = PeakCenterConfig()
# config.detectors = self.spectrometer.detectors
# config.detector = config.detectors[0]
#
# keys = self.spectrometer.molecular_weights.keys()
# config.isotopes = sort_isotopes(keys)
#
# return config
if __name__ == '__main__':
io = IonOpticsManager()
io.configure_traits()
# ============= EOF =============================================
# def _graph_factory(self):
# g = Graph(
# container_dict=dict(padding=5, bgcolor='gray'))
# g.new_plot()
# return g
#
# def _graph_default(self):
# return self._graph_factory()
# def _detector_default(self):
# return self.detectors[0]
# def peak_center_config_view(self):
# v = View(Item('detector', editor=EnumEditor(name='detectors')),
# Item('isotope'),
# Item('dac'),
# Item('directions'),
# buttons=['OK', 'Cancel'],
# kind='livemodal',
# title='Peak Center'
# )
# return v
# def graph_view(self):
# v = View(Item('graph', show_label=False, style='custom'),
# width=300,
# height=500
# )
# return v
# def peak_center_view(self):
# v = View(Item('graph', show_label=False, style='custom'),
# width=300,
# height=500,
# handler=self.handler_klass
# )
# return v
# def traits_view(self):
# v = View(Item('magnet_dac'),
# Item('peak_center_button',
# enabled_when='not alive',
# show_label=False),
# Item('stop_button', enabled_when='alive',
# show_label=False),
#
# Item('graph', show_label=False, style='custom'),
#
#
# resizable=True)
# return v
# def _correct_dac(self, det, dac):
# # dac is in axial units
#
# # convert to detector
# dac *= det.relative_position
#
# '''
# convert to axial detector
# dac_a= dac_d / relpos
#
# relpos==dac_detA/dac_axial
#
# '''
# #correct for deflection
# dev = det.get_deflection_correction()
#
# dac += dev
#
# # #correct for hv
# dac *= self.spectrometer.get_hv_correction(current=True)
# return dac
|
trayicon.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
* UMSE Antivirus Agent Example
* Author: David Alvarez Perez <dalvarezperez87[at]gmail[dot]com>
* Module: System Try Icon
* Description: This module is the System Try Icon.
*
* Copyright (c) 2019-2020. The UMSE Authors. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
"""
import sys, os, hashlib, threading, webbrowser, subprocess
from PyQt5 import QtCore, QtGui, QtWidgets
import multiprocessing
# Libraries
from aboutform import AboutForm
from scanfiles import ScanFiles
class SystemTrayIcon(QtWidgets.QSystemTrayIcon):
def __init__(self, icon, parent=None):
QtWidgets.QSystemTrayIcon.__init__(self, icon, parent)
menu = QtWidgets.QMenu(parent)
# RealTime Protection flag
self.sampleCollectorStatusEnabled = False
self.collectSamplesStartText="Collect malware samples"
self.collectSamplesStopText="Stop to collect malware samples"
self.icon="resources\\start.ico"
# Adding actions to the interface.
self.collectorAction = menu.addAction(QtGui.QIcon(self.icon), self.collectSamplesStartText, self.collectSamples)
menu.addSeparator()
self.aboutAction = menu.addAction(QtGui.QIcon("resources\\agent.ico"), "About", self.aboutWindow)
self.internetAction = menu.addAction(QtGui.QIcon("resources\\agent.ico"), "UMSE Agent on Internet", self.viewWebSite)
self.exitAction = menu.addAction(QtGui.QIcon("resources\\exit.ico"), "Exit", self.exit)
self.setContextMenu(menu)
# Logical action definitions
def collectSamples(self):
'''
Starts to collect malware samples from the system
'''
current_status = self.sampleCollectorStatusEnabled
self.updatesampleCollectorStatus()
if(current_status):
self.collectSamplesInBackground(False)
else:
self.collectSamplesInBackground(True)
def aboutWindow(self):
'''
Shows the about window
'''
AboutForm().exec_()
def updatesampleCollectorStatus(self):
'''
Updates the sample collector status indicator
'''
if (self.sampleCollectorStatusEnabled):
self.icon="resources\\start.ico"
self.collectorAction.setText(self.collectSamplesStartText)
self.collectorAction.setIcon(QtGui.QIcon(self.icon))
self.sampleCollectorStatusEnabled=False
else:
self.icon="resources\\stop.ico"
self.collectorAction.setText(self.collectSamplesStopText)
self.collectorAction.setIcon(QtGui.QIcon(self.icon))
self.sampleCollectorStatusEnabled=True
def viewWebSite(self):
'''
Opens the UMSE website in browser
'''
webbrowser.open("https://github.com/dalvarezperez/umse")
def exit(self):
'''
Exists from the program
'''
sys.exit()
def collectSamplesInBackground(self, start):
'''
Collects malware samples in background
'''
if(start):
self.hilo=threading.Thread(target=self.scanMyFiles, args=())
self.hilo.daemon = True
self.hilo.start()
else:
self.hilo.do_stop = True
def scanMyFiles(self):
'''
Scans the system for malware samples
'''
self._scanFiles=ScanFiles(self) |
subprocess2.py | # coding=utf8
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Collection of subprocess wrapper functions.
In theory you shouldn't need anything else in subprocess, or this module failed.
"""
import cStringIO
import errno
import logging
import os
import Queue
import subprocess
import sys
import time
import threading
# Constants forwarded from subprocess.
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
# Sends stdout or stderr to os.devnull.
VOID = object()
# Error code when a process was killed because it timed out.
TIMED_OUT = -2001
# Globals.
# Set to True if you somehow need to disable this hack.
SUBPROCESS_CLEANUP_HACKED = False
class CalledProcessError(subprocess.CalledProcessError):
"""Augment the standard exception with more data."""
def __init__(self, returncode, cmd, cwd, stdout, stderr):
super(CalledProcessError, self).__init__(returncode, cmd)
self.stdout = stdout
self.stderr = stderr
self.cwd = cwd
def __str__(self):
out = 'Command %s returned non-zero exit status %s' % (
' '.join(self.cmd), self.returncode)
if self.cwd:
out += ' in ' + self.cwd
return '\n'.join(filter(None, (out, self.stdout, self.stderr)))
class CygwinRebaseError(CalledProcessError):
"""Occurs when cygwin's fork() emulation fails due to rebased dll."""
## Utility functions
def kill_pid(pid):
"""Kills a process by its process id."""
try:
# Unable to import 'module'
# pylint: disable=E1101,F0401
import signal
return os.kill(pid, signal.SIGKILL)
except ImportError:
pass
def kill_win(process):
"""Kills a process with its windows handle.
Has no effect on other platforms.
"""
try:
# Unable to import 'module'
# pylint: disable=F0401
import win32process
# Access to a protected member _handle of a client class
# pylint: disable=W0212
return win32process.TerminateProcess(process._handle, -1)
except ImportError:
pass
def add_kill():
"""Adds kill() method to subprocess.Popen for python <2.6"""
if hasattr(subprocess.Popen, 'kill'):
return
if sys.platform == 'win32':
subprocess.Popen.kill = kill_win
else:
subprocess.Popen.kill = lambda process: kill_pid(process.pid)
def hack_subprocess():
"""subprocess functions may throw exceptions when used in multiple threads.
See http://bugs.python.org/issue1731717 for more information.
"""
global SUBPROCESS_CLEANUP_HACKED
if not SUBPROCESS_CLEANUP_HACKED and threading.activeCount() != 1:
# Only hack if there is ever multiple threads.
# There is no point to leak with only one thread.
subprocess._cleanup = lambda: None
SUBPROCESS_CLEANUP_HACKED = True
def get_english_env(env):
"""Forces LANG and/or LANGUAGE to be English.
Forces encoding to utf-8 for subprocesses.
Returns None if it is unnecessary.
"""
if sys.platform == 'win32':
return None
env = env or os.environ
# Test if it is necessary at all.
is_english = lambda name: env.get(name, 'en').startswith('en')
if is_english('LANG') and is_english('LANGUAGE'):
return None
# Requires modifications.
env = env.copy()
def fix_lang(name):
if not is_english(name):
env[name] = 'en_US.UTF-8'
fix_lang('LANG')
fix_lang('LANGUAGE')
return env
class NagTimer(object):
"""
Triggers a callback when a time interval passes without an event being fired.
For example, the event could be receiving terminal output from a subprocess;
and the callback could print a warning to stderr that the subprocess appeared
to be hung.
"""
def __init__(self, interval, cb):
self.interval = interval
self.cb = cb
self.timer = threading.Timer(self.interval, self.fn)
self.last_output = self.previous_last_output = 0
def start(self):
self.last_output = self.previous_last_output = time.time()
self.timer.start()
def event(self):
self.last_output = time.time()
def fn(self):
now = time.time()
if self.last_output == self.previous_last_output:
self.cb(now - self.previous_last_output)
# Use 0.1 fudge factor, just in case
# (self.last_output - now) is very close to zero.
sleep_time = (self.last_output - now - 0.1) % self.interval
self.previous_last_output = self.last_output
self.timer = threading.Timer(sleep_time + 0.1, self.fn)
self.timer.start()
def cancel(self):
self.timer.cancel()
class Popen(subprocess.Popen):
"""Wraps subprocess.Popen() with various workarounds.
- Forces English output since it's easier to parse the stdout if it is always
in English.
- Sets shell=True on windows by default. You can override this by forcing
shell parameter to a value.
- Adds support for VOID to not buffer when not needed.
- Adds self.start property.
Note: Popen() can throw OSError when cwd or args[0] doesn't exist. Translate
exceptions generated by cygwin when it fails trying to emulate fork().
"""
def __init__(self, args, **kwargs):
# Make sure we hack subprocess if necessary.
hack_subprocess()
add_kill()
env = get_english_env(kwargs.get('env'))
if env:
kwargs['env'] = env
if kwargs.get('shell') is None:
# *Sigh*: Windows needs shell=True, or else it won't search %PATH% for
# the executable, but shell=True makes subprocess on Linux fail when it's
# called with a list because it only tries to execute the first item in
# the list.
kwargs['shell'] = bool(sys.platform=='win32')
if isinstance(args, basestring):
tmp_str = args
elif isinstance(args, (list, tuple)):
tmp_str = ' '.join(args)
else:
raise CalledProcessError(None, args, kwargs.get('cwd'), None, None)
if kwargs.get('cwd', None):
tmp_str += '; cwd=%s' % kwargs['cwd']
logging.debug(tmp_str)
self.stdout_cb = None
self.stderr_cb = None
self.stdin_is_void = False
self.stdout_is_void = False
self.stderr_is_void = False
self.cmd_str = tmp_str
if kwargs.get('stdin') is VOID:
kwargs['stdin'] = open(os.devnull, 'r')
self.stdin_is_void = True
for stream in ('stdout', 'stderr'):
if kwargs.get(stream) in (VOID, os.devnull):
kwargs[stream] = open(os.devnull, 'w')
setattr(self, stream + '_is_void', True)
if callable(kwargs.get(stream)):
setattr(self, stream + '_cb', kwargs[stream])
kwargs[stream] = PIPE
self.start = time.time()
self.timeout = None
self.nag_timer = None
self.nag_max = None
self.shell = kwargs.get('shell', None)
# Silence pylint on MacOSX
self.returncode = None
try:
super(Popen, self).__init__(args, **kwargs)
except OSError, e:
if e.errno == errno.EAGAIN and sys.platform == 'cygwin':
# Convert fork() emulation failure into a CygwinRebaseError().
raise CygwinRebaseError(
e.errno,
args,
kwargs.get('cwd'),
None,
'Visit '
'http://code.google.com/p/chromium/wiki/CygwinDllRemappingFailure '
'to learn how to fix this error; you need to rebase your cygwin '
'dlls')
# Popen() can throw OSError when cwd or args[0] doesn't exist. Let it go
# through
raise
def _tee_threads(self, input): # pylint: disable=W0622
"""Does I/O for a process's pipes using threads.
It's the simplest and slowest implementation. Expect very slow behavior.
If there is a callback and it doesn't keep up with the calls, the timeout
effectiveness will be delayed accordingly.
"""
# Queue of either of <threadname> when done or (<threadname>, data). In
# theory we would like to limit to ~64kb items to not cause large memory
# usage when the callback blocks. It is not done because it slows down
# processing on OSX10.6 by a factor of 2x, making it even slower than
# Windows! Revisit this decision if it becomes a problem, e.g. crash
# because of memory exhaustion.
queue = Queue.Queue()
done = threading.Event()
nag = None
def write_stdin():
try:
stdin_io = cStringIO.StringIO(input)
while True:
data = stdin_io.read(1024)
if data:
self.stdin.write(data)
else:
self.stdin.close()
break
finally:
queue.put('stdin')
def _queue_pipe_read(pipe, name):
"""Queues characters read from a pipe into a queue."""
try:
while True:
data = pipe.read(1)
if not data:
break
if nag:
nag.event()
queue.put((name, data))
finally:
queue.put(name)
def timeout_fn():
try:
done.wait(self.timeout)
finally:
queue.put('timeout')
def wait_fn():
try:
self.wait()
finally:
queue.put('wait')
# Starts up to 5 threads:
# Wait for the process to quit
# Read stdout
# Read stderr
# Write stdin
# Timeout
threads = {
'wait': threading.Thread(target=wait_fn),
}
if self.timeout is not None:
threads['timeout'] = threading.Thread(target=timeout_fn)
if self.stdout_cb:
threads['stdout'] = threading.Thread(
target=_queue_pipe_read, args=(self.stdout, 'stdout'))
if self.stderr_cb:
threads['stderr'] = threading.Thread(
target=_queue_pipe_read, args=(self.stderr, 'stderr'))
if input:
threads['stdin'] = threading.Thread(target=write_stdin)
elif self.stdin:
# Pipe but no input, make sure it's closed.
self.stdin.close()
for t in threads.itervalues():
t.start()
if self.nag_timer:
def _nag_cb(elapsed):
logging.warn(' No output for %.0f seconds from command:' % elapsed)
logging.warn(' %s' % self.cmd_str)
if (self.nag_max and
int('%.0f' % (elapsed / self.nag_timer)) >= self.nag_max):
queue.put('timeout')
done.set() # Must do this so that timeout thread stops waiting.
nag = NagTimer(self.nag_timer, _nag_cb)
nag.start()
timed_out = False
try:
# This thread needs to be optimized for speed.
while threads:
item = queue.get()
if item[0] == 'stdout':
self.stdout_cb(item[1])
elif item[0] == 'stderr':
self.stderr_cb(item[1])
else:
# A thread terminated.
if item in threads:
threads[item].join()
del threads[item]
if item == 'wait':
# Terminate the timeout thread if necessary.
done.set()
elif item == 'timeout' and not timed_out and self.poll() is None:
logging.debug('Timed out after %.0fs: killing' % (
time.time() - self.start))
self.kill()
timed_out = True
finally:
# Stop the threads.
done.set()
if nag:
nag.cancel()
if 'wait' in threads:
# Accelerate things, otherwise it would hang until the child process is
# done.
logging.debug('Killing child because of an exception')
self.kill()
# Join threads.
for thread in threads.itervalues():
thread.join()
if timed_out:
self.returncode = TIMED_OUT
# pylint: disable=W0221,W0622
def communicate(self, input=None, timeout=None, nag_timer=None,
nag_max=None):
"""Adds timeout and callbacks support.
Returns (stdout, stderr) like subprocess.Popen().communicate().
- The process will be killed after |timeout| seconds and returncode set to
TIMED_OUT.
- If the subprocess runs for |nag_timer| seconds without producing terminal
output, print a warning to stderr.
"""
self.timeout = timeout
self.nag_timer = nag_timer
self.nag_max = nag_max
if (not self.timeout and not self.nag_timer and
not self.stdout_cb and not self.stderr_cb):
return super(Popen, self).communicate(input)
if self.timeout and self.shell:
raise TypeError(
'Using timeout and shell simultaneously will cause a process leak '
'since the shell will be killed instead of the child process.')
stdout = None
stderr = None
# Convert to a lambda to workaround python's deadlock.
# http://docs.python.org/library/subprocess.html#subprocess.Popen.wait
# When the pipe fills up, it would deadlock this process.
if self.stdout and not self.stdout_cb and not self.stdout_is_void:
stdout = []
self.stdout_cb = stdout.append
if self.stderr and not self.stderr_cb and not self.stderr_is_void:
stderr = []
self.stderr_cb = stderr.append
self._tee_threads(input)
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
return (stdout, stderr)
def communicate(args, timeout=None, nag_timer=None, nag_max=None, **kwargs):
"""Wraps subprocess.Popen().communicate() and add timeout support.
Returns ((stdout, stderr), returncode).
- The process will be killed after |timeout| seconds and returncode set to
TIMED_OUT.
- If the subprocess runs for |nag_timer| seconds without producing terminal
output, print a warning to stderr.
- Automatically passes stdin content as input so do not specify stdin=PIPE.
"""
stdin = kwargs.pop('stdin', None)
if stdin is not None:
if isinstance(stdin, basestring):
# When stdin is passed as an argument, use it as the actual input data and
# set the Popen() parameter accordingly.
kwargs['stdin'] = PIPE
else:
kwargs['stdin'] = stdin
stdin = None
proc = Popen(args, **kwargs)
if stdin:
return proc.communicate(stdin, timeout, nag_timer), proc.returncode
else:
return proc.communicate(None, timeout, nag_timer), proc.returncode
def call(args, **kwargs):
"""Emulates subprocess.call().
Automatically convert stdout=PIPE or stderr=PIPE to VOID.
In no case they can be returned since no code path raises
subprocess2.CalledProcessError.
"""
if kwargs.get('stdout') == PIPE:
kwargs['stdout'] = VOID
if kwargs.get('stderr') == PIPE:
kwargs['stderr'] = VOID
return communicate(args, **kwargs)[1]
def check_call_out(args, **kwargs):
"""Improved version of subprocess.check_call().
Returns (stdout, stderr), unlike subprocess.check_call().
"""
out, returncode = communicate(args, **kwargs)
if returncode:
raise CalledProcessError(
returncode, args, kwargs.get('cwd'), out[0], out[1])
return out
def check_call(args, **kwargs):
"""Emulate subprocess.check_call()."""
check_call_out(args, **kwargs)
return 0
def capture(args, **kwargs):
"""Captures stdout of a process call and returns it.
Returns stdout.
- Discards returncode.
- Blocks stdin by default if not specified since no output will be visible.
"""
kwargs.setdefault('stdin', VOID)
# Like check_output, deny the caller from using stdout arg.
return communicate(args, stdout=PIPE, **kwargs)[0][0]
def check_output(args, **kwargs):
"""Emulates subprocess.check_output().
Captures stdout of a process call and returns stdout only.
- Throws if return code is not 0.
- Works even prior to python 2.7.
- Blocks stdin by default if not specified since no output will be visible.
- As per doc, "The stdout argument is not allowed as it is used internally."
"""
kwargs.setdefault('stdin', VOID)
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
return check_call_out(args, stdout=PIPE, **kwargs)[0]
|
psrdada_to_fits.py | #!/usr/bin/env python3
import sys
import os
import argparse
import logging
from multiprocessing import Process
from time import sleep
import numpy as np
from arts_tools.constants import NTAB, NSB, SB_TABLE
# check psrdada import as psrdada-python is not an explicit requirement in setup.py
try:
import psrdada
except ImportError:
raise ImportError("Cannot import psrdada; Is psrdada-python installed?")
#: Number of polarizations
NPOL = 4
#: Number of channels
NCHAN = 1536
#: Number of samples per PSRDADA page
NSAMP = 12500
def get_psrdada_header(fname):
"""
Read header raw PSRDADA file
:param str fname: Path to PSRDADA file
:return: header (dict)
"""
# load a typical amount of bytes from the file and look for header size keyword
nbyte = 1
raw_header = ''
with open(fname, 'r') as f:
while True:
raw_header = raw_header + f.read(nbyte)
header = [line.strip().split(maxsplit=1) for line in raw_header.split('\n')]
header = np.array(header)
try:
key_index = np.where(header == 'HDR_SIZE')[0]
hdr_size = header[key_index, 1][0].astype(int)
except (IndexError, ValueError):
if nbyte > 1e6:
logging.error(f"Key HDR_SIZE not found in first MB of PSRDADA file {fname}")
sys.exit(1)
nbyte += 4096
else:
break
# load the full header with known size
f.seek(0)
header = f.read(hdr_size)
# convert to dict, skipping empty lines and zero padding at the end
header = dict([line.strip().split(maxsplit=1) for line in header.split('\n') if line][:-1])
return header
def read_psrdada_to_buffer(files, header, key, dadafits):
"""
Read PSRDADA files into a memory buffer
:param list files: Files to read into memory buffer
:param dict header: PSRDADA header
:param str key: PSRDADA shared memory key
:param Process dadafits: Handle to dadafits process
"""
# convert key to hexadecimal
hexkey = int(key, 16)
# connect to the memory buffer
writer = psrdada.Writer()
writer.connect(hexkey)
# write header
writer.setHeader(header)
# sort input files
files.sort()
# loop over files and read them into header
for i, fname in enumerate(files):
with open(fname, 'rb') as f:
# skip header
f.seek(int(header['HDR_SIZE']))
# read data page by page
while True:
# check if dadafits is still alive to read the data we are about to write
if not dadafits.is_alive():
# not calling writer.disconnect here, as it hangs forever. Unclear why
# raise an error here, as it is caught and logged by the main programme
raise ChildProcessError("dadafits no longer running; not writing new data")
data = np.fromfile(f, count=int(header['RESOLUTION']), dtype='uint8')
# if at end of file, break loop
if len(data) == 0:
break
# write page into buffer
page = np.asarray(writer.getNextPage())
page[:] = data[:]
# mark the buffer as filled
writer.markFilled()
# done, mark end of data, then disconnect
writer.getNextPage()
writer.markEndOfData()
writer.disconnect()
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
description=f"Convert raw IQUV data to PSRFITS.\n"
f"The output can either be one file for each "
f"of the {NTAB} tied-array beams (TABs),\nor one "
f"file for each user-specified synthesized beam (SB) "
f"between 0 and {NSB}.")
group = parser.add_mutually_exclusive_group()
group.add_argument('--tab', action='store_true', help="Create TABs instead of SBs")
group.add_argument('--sb', type=int, nargs='*', help="Space-separated list of SBs to create")
parser.add_argument('--output_dir', required=True, help="Output directory")
parser.add_argument('--key', default='dada', help="4-digit hexadecimal Key to use for PSRDADA shared "
"memory buffer (Default: %(default)s)")
parser.add_argument('--templates', help="Path to folder with FITS templates "
"(Default: use templates provided with package)")
parser.add_argument('--sbtable', help="Path to SB table "
"(Default: use SB table provided with package)")
parser.add_argument('--verbose', '-v', action='store_true', help="Increase verbosity")
parser.add_argument('raw_files', nargs='+', help="Path to input PSRDADA file(s) belonging to one observation. "
"If multiple files are given, they are sorted by filename "
"before processing")
# print help if no arguments are given
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# set log verbosity
if args.verbose:
logging_level = logging.DEBUG
else:
logging_level = logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging_level, stream=sys.stderr)
logging.debug(f"Arguments: {args}")
# verify the input files exist
for fname in args.raw_files:
if not os.path.isfile(fname):
logging.error(f"Input file does not exist: {fname}")
sys.exit(1)
# set FITS templates dir and SB table dir
root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
if args.templates is None:
args.templates = os.path.join(root_dir, 'static', 'fits_templates')
logging.debug(f"Setting FITS templates directory to {args.templates}")
if args.sbtable is None:
args.sbtable = os.path.join(root_dir, SB_TABLE)
logging.debug(f"Setting SB table to {args.sbtable}")
# set SB command
if args.tab:
sb_cmd = ""
else:
# check if SB range is valid
if (np.any(args.sb) > NSB) or np.any(args.sb) < 0:
logging.error(f"Invalid SB index given, allowed values are 0 to {NSB}")
sys.exit(1)
sb_cmd = f"-S {args.sbtable} -s " + ','.join(map(str, args.sb))
# create output directory
if os.path.isdir(args.output_dir):
logging.warning(f"Output directory already exists: {args.output_dir}")
try:
os.makedirs(args.output_dir, exist_ok=True)
except Exception as e:
logging.error(f"Failed to create output directory: {e}")
sys.exit(1)
# get PSRDADA header from first file
header = get_psrdada_header(args.raw_files[0])
# create memory buffer (single page, as processing is slower than reading data anyway)
buffer_size = NTAB * NCHAN * NSAMP * NPOL
cmd = f"dada_db -a {header['HDR_SIZE']} -b {buffer_size} -n 1 -k {args.key} -w 2>/dev/null"
logging.debug(f"dada_db command: {cmd}")
memory_buffer = Process(target=os.system, args=(cmd, ), name='dada_db')
logging.info(f"Creating PSRDADA memory buffer of size {buffer_size / 1e9} GB")
memory_buffer.start()
sleep(1)
# if the memory buffer is not alive, something failed (probably key that was already in use)
if not memory_buffer.is_alive():
logging.error(f"Failed to set up memory buffer, perhaps PSRDADA key is already in use? "
f"Run \"dada_db -d -k {args.key}\" to remove it (But be careful not to remove "
f"buffers used by observations when running this script on any of arts001 - arts040)")
sys.exit(1)
# run FITS writer
cmd = f"dadafits -k {args.key} -l /dev/null -t {args.templates} -d {args.output_dir} {sb_cmd}"
logging.debug(f"dadafits command: {cmd}")
dadafits = Process(target=os.system, args=(cmd, ), name='dadafits')
logging.info(f"Starting dadafits")
dadafits.start()
sleep(1)
# read data into buffer
try:
read_psrdada_to_buffer(args.raw_files, header, args.key, dadafits)
except Exception as e:
logging.error(f"Exception in writing data to memory buffer: {type(e).__name__}: {e}")
# remove the running Processes
dadafits.terminate()
# sometimes terminating the buffer Process does not actually remove the buffer, so
# remove it explicitly first
cmd = f"dada_db -d -k {args.key} >/dev/null 2>&1"
os.system(cmd)
memory_buffer.terminate()
sys.exit(1)
# when done reading data into buffer, wait until dadafits exits
logging.info("Waiting for dadafits to finish")
dadafits.join()
logging.info("dadafits finished")
# remove the memory buffer
logging.info("Removing PSRDADA memory buffer")
# sometimes terminating the buffer Process does not actually remove the buffer, so
# remove it explicitly first
cmd = f"dada_db -d -k {args.key} >/dev/null 2>&1"
os.system(cmd)
memory_buffer.terminate()
logging.info("Done")
|
Baru2.py | # -*- coding: utf-8 -*-
#Vipro_Bot
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
from threading import Thread
from googletrans import Translator
from gtts import gTTS
import time,random,sys,json,codecs,threading,glob,urllib,urllib2,urllib3,re,ast,os,subprocess,requests,tempfile
acil = LINETCR.LINE()
acil.login(token="ExUt6WtkCC1DSR8zxYNa.FrRaFsa2S2DfEdOkgzkYUG.5qp2/W898/B5U8MJXmiFXhe6cBga5c/jocEH/ZAGn0o=")
acil.loginResult()
pb1 = LINETCR.LINE()
pb1.login(token="Ex2ATUwiY1KiindBtdj5.VxTn0rMR1ZQZ31qVTyQkzq.OCpCNK+PN8Em8TUBS+4mz8Uk6NhwpKnpOPhH7Fs+6ew=")
pb1.loginResult()
pb2 = LINETCR.LINE()
pb2.login(token="Ex09yBlEHiyffrhsUSj7.oISUxqs9khqTd3kgBWDsDW.j7jkXcF3yyTWl7Pv3/WJLuoJIW5Wzk0+DoGhoMk4an4=")
pb2.loginResult()
print "Avrilia-Login Success\n\n=====[Sukses Login]====="
reload(sys)
sys.setdefaultencoding('utf-8')
pisMessage ="""
╔═──────┅═ইई═┅──────
║꧁【☆ᏴᏞᎪᏟK ᎪNᏩᎬᏞᏚ☆】꧂
║ ᎪᏙᎡᏆᏞᏆᎪ ᏞᎬᏙᎪNᎪ
╠═──────┅═ইई═┅──────
╠ ͜͡✪͜͡✪➢Me
╠ ͜͡✪͜͡✪➢Add
╠ ͜͡✪͜͡✪➢Cn "text"
╠ ͜͡✪͜͡✪➢Clockname "text"
╠ ͜͡✪͜͡✪➢TL:"text"
╠ ͜͡✪͜͡✪➢Ban:"mid"
╠ ͜͡✪͜͡✪➢Unban:"mid"
╠ ͜͡✪͜͡✪➢Bl:on
╠ ͜͡✪͜͡✪➢Unbl:on
╠ ͜͡✪͜͡✪➢Mcheck
╠ ͜͡✪͜͡✪➢Mybio:
╠ ͜͡✪͜͡✪➢Mybots
╠ ͜͡✪͜͡✪➢Mymid
╠ ͜͡✪͜͡✪➢Mygroups
╠ ͜͡✪͜͡✪➢Message set:"text"
╠ ͜͡✪͜͡✪➢Message confirm
╠ ͜͡✪͜͡✪➢Msg add-"text"
╠ ͜͡✪͜͡✪➢Com set:"text"
╠ ͜͡✪͜͡✪➢Comment
╠ ͜͡✪͜͡✪➢Comban/del/cek
╠ ͜͡✪͜͡✪➢Help set:"text"
╠ ͜͡✪͜͡✪➢Change
╠ ͜͡✪͜͡✪➢Gn "text"
╠ ͜͡✪͜͡✪➢Clink/Curl
╠ ͜͡✪͜͡✪➢Kick:"mid"
╠ ͜͡✪͜͡✪➢Invite:"mid"
╠ ͜͡✪͜͡✪➢Creator
╠ ͜͡✪͜͡✪➢Contact
╠ ͜͡✪͜͡✪➢Cancel/Bcancel
╠ ͜͡✪͜͡✪➢Gcancel:"jumlah"
╠ ͜͡✪͜͡✪➢Gcancelall
╠ ͜͡✪͜͡✪➢Ginfo
╠ ͜͡✪͜͡✪➢Prank in (Masukin bot)
╠ ͜͡✪͜͡✪➢Prank out (Keluarin bot)
╠ ͜͡✪͜͡✪➢Setlastpoint
╠ ͜͡✪͜͡✪➢Cctv
╠ ͜͡✪͜͡✪➢Glink
╠ ͜͡✪͜͡✪➢Spam on/of "jumlah/text"
╠ ͜͡✪͜͡✪➢Gurl
╠ ͜͡✪͜͡✪➢Sc:"mid"
╠ ͜͡✪͜͡✪➢Blocklist
╠ ͜͡✪͜͡✪➢Banlist
╠ ͜͡✪͜͡✪➢Update
╠ ͜͡✪͜͡✪➢Creator
╠ ͜͡✪͜͡✪➢Sc "@"
╠ ͜͡✪͜͡✪➢Fuck "@"
╠ ͜͡✪͜͡✪➢Sikat "@"
╠ ͜͡✪͜͡✪➢Spam "@"
╠ ͜͡✪͜͡✪➢Ban "@"
╠ ͜͡✪͜͡✪➢Unban "@"
╠ ͜͡✪͜͡✪➢Copy "@"
╠ ͜͡✪͜͡✪➢Nuke
╠ ͜͡✪͜͡✪➢Backup
╠ ͜͡✪͜͡✪➢Tag
╠ ͜͡✪͜͡✪➢Bc "text"
╠ ͜͡✪͜͡✪➢Say "text"
╠ ͜͡✪͜͡✪➢Kick@mbl "kick blacklist"
╠ ͜͡✪͜͡✪➢Ping
╠ ͜͡✪͜͡✪➢Sett
╚═──────┅═ইई═┅──────
ASSISTAN
╔═──────┅═ইई═┅──────
╠ ͜͡✪͜͡✪➢All:
╠ ͜͡✪͜͡✪➢Allbio:
╠ ͜͡✪͜͡✪➢All mid
╠ ͜͡✪͜͡✪➢Respon
╠ ͜͡✪͜͡✪➢B:out
╠ ͜͡✪͜͡✪➢B1-2 mid
╠ ͜͡✪͜͡✪➢B1-2name "text"
╠ ͜͡✪͜͡✪➢B1-2
╠ ͜͡✪͜͡✪➢B1-2 gift
╠ ͜͡✪͜͡✪➢B come
╠ ͜͡✪͜͡✪➢B1-2 in
╠ ͜͡✪͜͡✪➢B1-2 bye
╚═──────┅═ইई═┅──────
SETTINGS
╔═──────┅═ইई═┅──────
╠ ͜͡✪͜͡✪➢Contact:on/off
╠ ͜͡✪͜͡✪➢Add:on/off
╠ ͜͡✪͜͡✪➢Join:on/off
╠ ͜͡✪͜͡✪➢Leave:on/off
╠ ͜͡✪͜͡✪➢Share:on/off
╠ ͜͡✪͜͡✪➢Com:on/off
╠ ͜͡✪͜͡✪➢Clock:on/off
╚═──────┅═ইई═┅──────
PROTECT
╔═──────┅═ইई═┅──────
╠ ͜͡✪͜͡✪➢Pro:on/off
╠ ͜͡✪͜͡✪➢Prolink:on/off
╠ ͜͡✪͜͡✪➢Proinvite:on/off
╠ ͜͡✪͜͡✪➢Procancel:on/off
╠═──────┅═ইई═┅──────
║ ᎪᏙᎡᏆᏞᏆᎪ ᏞᎬᏙᎪNᎪ
║http://line.me/ti/p/~avrilia_ganteng
╚═──────┅═ইई═┅──────
"""
helo="====I AM SELF Avrilia"
KAC=[acil,pb1,pb2]
mid = acil.getProfile().mid
pb1mid = pb1.getProfile().mid
pb2mid = pb2.getProfile().mid
Bots=[mid,pb1mid,pb2mid]
admsa = "u7d1ac07d2036b36745783a0a1992b2ba"
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':False,
'autoAdd':True,
'message':"тнαикѕ fσя α∂∂ мє...\nBY:\n──────┅═ইई═┅──────\n꧁【☆ᏴᏞᎪᏟK ᎪNᏩᎬᏞᏚ☆】꧂\nᎪᏙᎡᏆᏞᏆᎪ ᏞᎬᏙᎪNᎪ\nhttp://line.me/ti/p/~avrilia_quester\n──────┅═ইई═┅──────""",
"lang":"JP",
"comment1":"BY:\n──────┅═ইई═┅──────\n꧁【☆ᏴᏞᎪᏟK ᎪNᏩᎬᏞᏚ☆】꧂\nᎪᏙᎡᏆᏞᏆᎪ ᏞᎬᏙᎪNᎪ\nhttp://line.me/ti/p/~avrilia_quester\n──────┅═ইई═┅──────",
"comment":"Thanks For Add Me",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":True,
"cName":"──┅═͜͡✥A⃟⃟V⃟⃟R⃟⃟ I⃟⃟L⃟⃟❂➤",
"cNames":"──┅═͜͡✥A⃟⃟V⃟⃟R⃟⃟ I⃟⃟L⃟⃟❂➤",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
"copy":True,
"copy2":"target",
"target":{}
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
wait3 = {
"copy":False,
"copy2":"target",
"target":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
contact = acil.getProfile()
backup = acil.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = pb1.getProfile()
backup = pb1.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = pb2.getProfile()
backup = pb2.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
responsename = acil.getProfile().displayName
responsename2 = pb1.getProfile().displayName
responsename3 = pb2.getProfile().displayName
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def mention(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag"
try:
nadya.sendMessage(msg)
except Exception as error:
print error
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self._client.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
#r.content
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
print op
try:
if op.param1 in wait2['readPoint']:
Name = nadya.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name + datetime.now().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・" + Name + " ツ"
else:
pass
except:
pass
def RECEIVE_MESSAGE(op):
msg = op.message
try:
if msg.contentType == 0:
try:
if msg.to in wait2['readPoint']:
if msg.from_ in wait2["ROM"][msg.to]:
del wait2["ROM"][msg.to][msg.from_]
else:
pass
except:
pass
else:
pass
except KeyboardInterrupt:
sys.exit(0)
except Exception as error:
print error
print ("\n\nRECEIVE_MESSAGE\n\n")
return
def bot(op):
try:
if op.type == 0:
return
if op.type == 13:
if mid in op.param3:
G = acil.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
acil.rejectGroupInvitation(op.param1)
else:
acil.acceptGroupInvitation(op.param1)
else:
acil.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
acil.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace(" ",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
acil.cancelGroupInvitation(op.param1, matched_list)
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
acil.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
acil.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == "u7d1ac07d2036b36745783a0a1992b2ba":
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
acil.acceptGroupInvitationByTicket(list_[1],list_[2])
G = acil.getGroup(list_[1])
G.preventJoinByTicket = True
acil.updateGroup(G)
except:
acil.sendText(msg.to,"Suksess")
if msg.toType == 1:
if wait["leaveRoom"] == True:
acil.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
acil.like(url[25:58], url[66:], likeType=1001)
pb1.like(url[25:58], url[66:], likeType=1001)
pb2.like(url[25:58], url[66:], likeType=1001)
acil.comment(url[25:58], url[66:], wait["comment1"])
pb1.comment(url[25:58], url[66:], wait["comment1"])
pb2.comment(url[25:58], url[66:], wait["comment1"])
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
acil.sendText(msg.to,"sudah masuk daftar hitam👈")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
acil.sendText(msg.to,"Itu tidak berkomentar👈")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
acil.sendText(msg.to,"Done")
wait["dblack"] = False
else:
wait["dblack"] = False
acil.sendText(msg.to,"Tidak ada dalam daftar hitam👈")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
acil.sendText(msg.to,"sudah masuk daftar hitam")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
acil.sendText(msg.to,"Done👈")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
acil.sendText(msg.to,"Done👈")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
acil.sendText(msg.to,"Done👈")
elif wait["contact"] == True:
msg.contentType = 0
acil.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = acil.getContact(msg.contentMetadata["mid"])
try:
cu = acil.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
acil.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = acil.getContact(msg.contentMetadata["mid"])
try:
cu = acil.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
acil.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "menempatkan URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
acil.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text.lower() == 'Pis':
if wait["lang"] == "JP":
acil.sendText(msg.to,pisMessage)
else:
acil.sendText(msg.to,helpMessage)
elif ("Gn:" in msg.text):
if msg.toType == 2:
group = acil.getGroup(msg.to)
group.name = msg.text.replace("Gn:","")
pb1.updateGroup(group)
else:
acil.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok👈")
elif ("Gn " in msg.text):
if msg.toType == 2:
group = acil.getGroup(msg.to)
group.name = msg.text.replace("Gn ","")
acil.updateGroup(group)
else:
acil.sendText(msg.to,"Can not be used for groups other than")
elif "Kick:" in msg.text:
midd = msg.text.replace("Kick:","")
acil.kickoutFromGroup(msg.to,[midd])
elif "Invite:" in msg.text:
midd = msg.text.replace("Invite:","")
acil.findAndAddContactsByMid(midd)
acil.inviteIntoGroup(msg.to,[midd])
elif "Me" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
acil.sendText(msg.to,"─────┅═•••͜͡❍ইई❍͜͡•••═┅─────")
acil.sendMessage(msg)
acil.sendText(msg.to,"─────┅═•••͜͡❍ইई❍͜͡•••═┅─────")
elif "Mybots" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
msg.contentType = 13
acil.sendText(msg.to,"─────┅═•••͜͡❍ইई❍͜͡•••═┅─────")
acil.sendMessage(msg)
msg.contentMetadata = {'mid': pb1mid}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': pb2mid}
acil.sendMessage(msg)
acil.sendText(msg.to,"─────┅═•••͜͡❍ইई❍͜͡•••═┅─────")
msg.contentType = 13
elif "B1" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': pb1mid}
pb1.sendMessage(msg)
elif "B2" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': pb2mid}
pb2.sendMessage(msg)
elif "Creator" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': 'u7d1ac07d2036b36745783a0a1992b2ba'}
acil.sendMessage(msg)
elif msg.text in ["Bot1 Gift","B1 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
pb1.sendMessage(msg)
elif msg.text in ["Gift","i gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
acil.sendMessage(msg)
elif msg.text in ["Bot2 Gift","B2 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
pb2.sendMessage(msg)
elif msg.text in ["B Cancel","Cancel dong","Bcancel"]:
if msg.toType == 2:
group = pb1.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
pb1.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"No invites👈")
else:
acil.sendText(msg.to,"Invite people inside not👈")
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Tidak ada undangan")
else:
acil.sendText(msg.to,"invitan tidak ada")
elif msg.text in ["Cancel","cancel"]:
if msg.toType == 2:
group = acil.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
acil.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"No invites👈")
else:
acil.sendText(msg.to,"Invite people inside not👈")
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Tidak ada undangan👈")
else:
acil.sendText(msg.to,"invitan tidak ada")
#elif "gurl" == msg.text:
#print acil.getGroup(msg.to)
##acil.sendMessage(msg)
elif msg.text in ["Clink"]:
if msg.toType == 2:
group = acil.getGroup(msg.to)
group.preventJoinByTicket = False
acil.updateGroup(group)
if wait["lang"] == "JP":
acil.sendText(msg.to,"URL open ô€¨ô€„Œ")
else:
acil.sendText(msg.to,"URL open ô€¨ô€„Œ")
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"It can not be used outside the group ô€œô€„‰👈")
else:
acil.sendText(msg.to,"Can not be used for groups other than ô€œô€„‰")
elif msg.text in ["Curl"]:
if msg.toType == 2:
group = acil.getGroup(msg.to)
group.preventJoinByTicket = True
acil.updateGroup(group)
if wait["lang"] == "JP":
acil.sendText(msg.to,"URL close ô€¨👈")
else:
acil.sendText(msg.to,"URL close ô€¨👈")
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"It can not be used outside the group 👈")
else:
acil.sendText(msg.to,"Can not be used for groups other than ô€œ")
elif "Ginfo" == msg.text:
if msg.toType == 2:
# if msg.from_ in admin:
ginfo = acil.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
QR = "Close"
else:
QR = "Open"
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + "[•]" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + "[•]" + gCreator + "\n\n[Group Status]\n" + "[•]Status QR =>" + QR + "\n\n[Group Picture]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "\nPending:" + sinvitee)
else:
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Group Status]\nGroup Picture:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
# else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Can not be used outside the group")
else:
acil.sendText(msg.to,"Not for use less than group")
elif "Contact" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.to}
acil.sendMessage(msg)
elif "Mymid" == msg.text:
acil.sendText(msg.to,mid)
elif "B1 mid" == msg.text:
pb1.sendText(msg.to,pb1mid)
elif "B2 mid" == msg.text:
pb2.sendText(msg.to,pb2mid)
elif "All mid" == msg.text:
pb1.sendText(msg.to,pb1mid)
pb2.sendText(msg.to,pb2mid)
elif "TL:" in msg.text:
tl_text = msg.text.replace("TL:","")
acil.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+acil.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "All:" in msg.text:
string = msg.text.replace("All:","")
if len(string.decode('utf-8')) <= 20:
profile = pb1.getProfile()
profile.displayName = string
pb1.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = pb2.getProfile()
profile.displayName = string
pb2.updateProfile(profile)
acil.sendText(msg.to,"semua nama telah di update menjadi\n👉 " + string + "👈")
elif "Allbio:" in msg.text:
string = msg.text.replace("Allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = pb1.getProfile()
profile.statusMessage = string
pb1.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = pb2.getProfile()
profile.statusMessage = string
pb2.updateProfile(profile)
elif "Cn " in msg.text:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 20:
profile = acil.getProfile()
profile.displayName = string
acil.updateProfile(profile)
acil.sendText(msg.to,"Update Names👉 " + string + "👈")
#---------------------------------------------------------
elif "B1name " in msg.text:
string = msg.text.replace("B1name ","")
if len(string.decode('utf-8')) <= 20:
profile = pb1.getProfile()
profile.displayName = string
pb1.updateProfile(profile)
pb1.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "B2name " in msg.text:
string = msg.text.replace("B2name ","")
if len(string.decode('utf-8')) <= 20:
profile = pb2.getProfile()
profile.displayName = string
pb2.updateProfile(profile)
pb2.sendText(msg.to,"Update Names👉" + string + "⇇⇇👈")
#--------------------------------------------------------
elif "Mybio:" in msg.text:
string = msg.text.replace("Mybio:","")
if len(string.decode('utf-8')) <= 500:
profile = acil.getProfile()
profile.statusMessage = string
acil.updateProfile(profile)
acil.sendText(msg.to,"Update Bio👉" + string + "⇇⇇👈")
#--------------------------------------------------------
elif "Sc:" in msg.text:
mmid = msg.text.replace("Sc:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
acil.sendMessage(msg)
elif msg.text.lower() == 'contact:on':
if wait["contact"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Sudah On")
else:
acil.sendText(msg.to,"It is already open")
else:
wait["contact"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already open 👈")
else:
acil.sendText(msg.to,"It is already open ")
elif msg.text.lower() == 'contact:off':
if wait["contact"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"sudah off ô€œô€„‰👈")
else:
acil.sendText(msg.to,"It is already off ô€œô€„‰👈")
else:
wait["contact"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"off ô€œô€„‰already")
else:
acil.sendText(msg.to,"already Close ô€œô€„‰👈")
elif msg.text in ["Pro:on"]:
if wait["protect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Enable 👈")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["protect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Enable")
else:
acil.sendText(msg.to,"It is already On ô€¨")
elif msg.text in ['Prolink:on']:
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protection Enable 👈")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protect Enable��")
else:
acil.sendText(msg.to,"It is already On ô€¨")
elif msg.text in ['Proinvite:on']:
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protect Enable 👈")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protect Enable")
else:
acil.sendText(msg.to,"It is already On ô€¨")
elif msg.text in ['Procancel:on']:
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Cancel Protection Enable 👈")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON")
else:
acil.sendText(msg.to,"It is already On ô€¨")
elif msg.text.lower() == 'join:on':
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Ini sudah off 👈")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"already ON")
else:
acil.sendText(msg.to,"It is already On ô€¨")
elif msg.text.lower() == 'blocklist':
blockedlist = acil.getBlockedContactIds()
acil.sendText(msg.to, "Please wait...")
kontak = acil.getContacts(blockedlist)
num=1
msgs="User Blocked List\n"
for ids in kontak:
msgs+="\n%i. %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n\nTotal %i blocked user(s)" % len(kontak)
acil.sendText(msg.to, msgs)
elif msg.text.lower() == 'join:off':
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Auto Join Already Off")
else:
acil.sendText(msg.to,"Auto Join set off")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close")
else:
acil.sendText(msg.to,"It is already open ô€œ👈")
elif msg.text in ["Pro:off"]:
if wait["protect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Protection Disable ô€œ👈")
else:
acil.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["protect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close")
else:
acil.sendText(msg.to,"It is already open ô€œ👈")
elif msg.text in ["Prolink:off"]:
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Link Protection Disable ô€œ👈")
else:
acil.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close")
else:
acil.sendText(msg.to,"It is already open ô€œ👈")
elif msg.text in ["Proinvite:off"]:
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Invite Protection Disable ô€œ👈")
else:
acil.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close")
else:
acil.sendText(msg.to,"It is already open ô€œ👈")
elif msg.text in ["Procancel:off"]:
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Cancel Protection Disable ô€œ👈")
else:
acil.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"already close")
else:
acil.sendText(msg.to,"It is already open ô€œ👈")
elif "Gcancel:" in msg.text:
try:
strnum = msg.text.replace("Group cancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Itu off undangan ditolak👈\nSilakan kirim dengan menentukan jumlah orang ketika Anda menghidupkan👈")
else:
acil.sendText(msg.to,"Off undangan ditolak👈Sebutkan jumlah terbuka ketika Anda ingin mengirim")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,strnum + "Kelompok berikut yang diundang akan ditolak secara otomatis👈")
else:
acil.sendText(msg.to,strnum + "The team declined to create the following automatic invitation")
except:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Nilai tidak benar👈")
else:
acil.sendText(msg.to,"Weird value🛡")
elif msg.text in ["Leave:on"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"on👈")
else:
acil.sendText(msg.to,"Sudah terbuka ")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done👈")
else:
acil.sendText(msg.to,"Is already open👈")
elif msg.text in ["Leave:off"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"on👈")
else:
acil.sendText(msg.to,"Sudah off👈")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done👈")
else:
acil.sendText(msg.to,"Is already close👈")
elif msg.text in ["Share:on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done ")
else:
acil.sendText(msg.to,"Hal ini sudah terbuka👈")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"on👈")
else:
acil.sendText(msg.to,"on👈")
elif msg.text in ["Share:off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Done👈")
else:
acil.sendText(msg.to,"It is already turned off 👈")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Off👈")
else:
acil.sendText(msg.to,"Off👈")
elif msg.text.lower() == 'set':
md = ""
if wait["contact"] == True: md+=" Contact:on \n"
else: md+=" Contact:off\n"
if wait["autoJoin"] == True: md+=" Auto Join:on \n"
else: md +=" Auto Join:off\n"
if wait["autoCancel"]["on"] == True:md+=" Auto cancel:" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= " Group cancel:off \n"
if wait["leaveRoom"] == True: md+=" Auto leave:on \n"
else: md+=" Auto leave:off \n"
if wait["timeline"] == True: md+=" Share:on \n"
else:md+=" Share:off \n"
if wait["autoAdd"] == True: md+=" Auto add:on \n"
else:md+=" Auto add:off ��\n"
if wait["commentOn"] == True: md+=" Auto komentar:on \n"
else:md+=" Auto komentar:off \n"
if wait["protect"] == True: md+=" Protect:on 🔓\n"
else:md+=" Protect:off 🔒\n"
if wait["linkprotect"] == True: md+="Link Protect:on 🔓\n"
else:md+=" Link Protect:off🔒\n"
if wait["inviteprotect"] == True: md+="Invitation Protect:on🔓\n"
else:md+=" Invitation Protect:off🔒\n"
if wait["cancelprotect"] == True: md+" CancelProtect:on 🔓\n"
else:md+=" Cancel Protect:off 🔒\n"
acil.sendText(msg.to,md)
acil.sendText(msg.to,"──────┅═ইई═┅──────")
msg.contentType = 13
msg.contentMetadata = {'mid': 'ubd7b4dd119abd73ab3df542fb58a8a65'}
acil.sendMessage(msg)
acil.sendText(msg.to,"──────┅═ইई═┅──────")
elif "Gowner" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
acil.sendText(msg.to,"[Nama]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nAnggota:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
acil.sendMessage(msg)
elif cms(msg.text,["Add"]):
msg.contentType = 13
msg.contentMetadata = {'mid': "u978f7e8d02351b3d1d4a3973000c2080"}
acil.sendText(msg.to,"─────┅═•••͜͡❍ইई❍͜͡•••═┅─────")
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u5818cb4404411c2e2e6e6937d172cca8"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u17a086ccff618e754588a1108335867f"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "ua028b2a4f96dff4b4a52ae25223e5073"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "udfaf52176415b46cb445ae2757ec85f3"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u29ad304bbe5e9025b8431e65832a4cfa"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u565281632a958bb2795f6434f6872e3b"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u30ceda3992172f0861558a2b7a6ef5ab"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u224e7f2fd36e3565b0756319936450c5"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u604ca77dec7ab8d450ae762d5d08cd93"}
acil.sendMessage(msg)
#msg.contentType = 13
#msg.contentMetadata = {'mid': "u2ca90ea24d7ba639272925d715d8a99c"}
#acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u2552e86aab1b1426749dd0439b0f8c7f"}
acil.sendMessage(msg)
#msg.contentType = 13
#msg.contentMetadata = {'mid': "uc67a847198ce188b412a058d86f10367"}
#acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u190afbb99dd1c28cc57642627f2aa1a2"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u12322ff2ca2b48474389f3d91b9ff385"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u2beb70887d61c0e3abf3ac327b7b21d9"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "ub08e59948aaf244041d99091254e743c"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u2c83fe9f836a2f74f7f9316e0c184f9d"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u02c62ba90a4f9ff95950d1a5ee9f2154"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u47b8e60143e0e1c6fdebe67e6a355ad2"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u70489ca3e0d013e866a556665ee9d99b"}
acil.sendMessage(msg)
acil.sendText(msg.to,"─────┅═•••͜͡❍ইई❍͜͡•••═┅─────")
elif "Set album:" in msg.text:
gid = msg.text.replace("Set album:","")
album = acil.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Tidak ada album👈")
else:
acil.sendText(msg.to,"Dalam album tidak👈")
else:
if wait["lang"] == "JP":
mg = "Berikut ini adalah album dari target"
else:
mg = "Berikut ini adalah subjek dari album"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "æžš\n"
else:
mg += str(y["title"]) + ":0 Pieces\n"
acil.sendText(msg.to,mg)
elif "Album" in msg.text:
gid = msg.text.replace("Album","")
album = acil.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Tidak ada album")
else:
acil.sendText(msg.to,"Dalam album tidak")
else:
if wait["lang"] == "JP":
mg = "Berikut ini adalah album dari target"
else:
mg = "Berikut ini adalah subjek dari album"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "\n"
else:
mg += str(y["title"]) + ":0 pieces\n"
elif "Hapus album " in msg.text:
gid = msg.text.replace("Hapus album ","")
albums = acil.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
acil.deleteAlbum(gid,album["gid"])
i += 1
if wait["lang"] == "JP":
acil.sendText(msg.to,str(i) + "Soal album telah dihapus")
else:
acil.sendText(msg.to,str(i) + "Hapus kesulitan album🛡")
elif msg.text.lower() == 'group id':
gid = acil.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (acil.getGroup(i).name,i)
acil.sendText(msg.to,h)
elif msg.text.lower() == '@out':
gid = pb1.getGroupIdsJoined()
gid = pb2.getGroupIdsJoined()
#gid = acil.getGroupIdsJoined()
gid = ki4.getGroupIdsJoined()
gid = ki5.getGroupIdsJoined()
gid = ki6.getGroupIdsJoined()
for i in gid:
pb1.leaveGroup(i)
pb2.leaveGroup(i)
#acil.leaveGroup(i)
ki4.leaveGroup(i)
ki5.leaveGroup(i)
ki6.leaveGroup(i)
if wait["lang"] == "JP":
acil.sendText(msg.to,"꧁【☆ᏴᏞᎪᏟK ᎪNᏩᎬᏞᏚ☆】꧂ Sudah Keluar Di semua grup")
else:
acil.sendText(msg.to,"He declined all invitations")
elif msg.text in ["Gcancelall"]:
gid = acil.getGroupIdsInvited()
for i in gid:
acil.rejectGroupInvitation(i)
if wait["lang"] == "JP":
acil.sendText(msg.to,"Aku menolak semua undangan")
else:
acil.sendText(msg.to,"He declined all invitations")
elif "Album deleted:" in msg.text:
gid = msg.text.replace("Album deleted:","")
albums = acil.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
acil.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
acil.sendText(msg.to,str(i) + "Soal album telah dihapus👈")
else:
acil.sendText(msg.to,str(i) + "Hapus kesulitan album👈")
elif msg.text in ["Add:on","Add auto on"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already On")
else:
acil.sendText(msg.to,"Already On👈")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already On👈")
else:
acil.sendText(msg.to,"Already On👈")
elif msg.text in ["Add:off","Add auto off"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Hal ini sudah off👈")
else:
acil.sendText(msg.to,"Hal ini sudah dimatikan👈")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Already Off👈")
else:
acil.sendText(msg.to,"Untuk mengaktifkan-off👈")
elif "Message set:" in msg.text:
wait["message"] = msg.text.replace("Message set:","")
acil.sendText(msg.to,"We changed the message👈")
elif "Help set:" in msg.text:
wait["help"] = msg.text.replace("Help set:","")
acil.sendText(msg.to,"We changed the Help👈")
elif "Msg add-" in msg.text:
wait["message"] = msg.text.replace("Pesan add-","")
if wait["lang"] == "JP":
acil.sendText(msg.to,"Kami mengubah pesan🛡")
else:
acil.sendText(msg.to,"Change information")
elif msg.text in ["Pesan add cek","Message confirm"]:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Additional information is automatically set to the following \n\n" + wait["message"])
else:
acil.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
elif msg.text in ["Change","change"]:
if wait["lang"] =="JP":
wait["lang"] = "TW"
acil.sendText(msg.to,"I changed the language to engglis👈")
else:
wait["lang"] = "JP"
acil.sendText(msg.to,"I changed the language to indonesia👈")
elif "Message set" in msg.text:
c = msg.text.replace("Message set","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Is a string that can not be changed👈")
else:
wait["comment"] = c
acil.sendText(msg.to,"This has been changed👈\n\n" + c)
elif "Com set:" in msg.text:
c = msg.text.replace("Come Set:","")
if c in [""," ","\n",None]:
acil.sendText(msg.to,"Merupakan string yang tidak bisa diubah👈")
else:
wait["comment"] = c
acil.sendText(msg.to,"Ini telah diubah👈\n\n" + c)
elif msg.text in ["Comment:on","Com:on","Comment on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Aku berada di👈")
else:
acil.sendText(msg.to,"To open👈")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
acil.sendText(msg.to,"オンã«ã—ã¾ã—ãŸ👈")
else:
acil.sendText(msg.to,"è¦äº†å¼€👈")
elif msg.text in ["Com:off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Hal ini sudah off")
else:
acil.sendText(msg.to,"It is already turned off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
acil.sendText(msg.to,"Off👈")
else:
acil.sendText(msg.to,"To turn off")
elif msg.text in ["Com","Comment"]:
acil.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:👈\n\n" + str(wait["comment"]))
elif msg.text in ["Glink","Url"]:
if msg.toType == 2:
g = acil.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
acil.updateGroup(g)
gurl = acil.reissueGroupTicket(msg.to)
acil.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok")
else:
acil.sendText(msg.to,"Tidak dapat digunakan untuk kelompok selain")
elif "gurl+" in msg.text:
if msg.toType == 2:
gid = msg.text.replace("gurl+","")
gurl = acil.reissueGroupTicket(gid)
acil.sendText(msg.to,"line://ti/g/" + gurl)
else:
acil.sendText(msg.to,"グループ以外ã§ã¯ä½¿ç”¨ã§ãã¾ã›ã‚“👈")
elif "gurl" in msg.text:
if msg.toType == 1:
tid = msg.text.replace("gurl","")
turl = pb1.getUserTicket(tid)
pb1.sendText(msg.to,"line://ti/p" + turl)
else:
pb1.sendText(msg.to,"error")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = acil.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
acil.updateGroup(x)
gurl = acil.reissueGroupTicket(msg.to)
acil.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
acil.sendText(msg.to,"Can't be used outside the group")
else:
acil.sendText(msg.to,"Not for use less than group")
# else:
# acil.sendText(msg.to,"Tidak dapat digunakan untuk kelompok selain")
elif msg.text in ["Comban"]:
wait["wblack"] = True
acil.sendText(msg.to,"Please send contacts from the person you want to add to the blacklistô€œô€…”👈")
elif msg.text in ["Comban del"]:
wait["dblack"] = True
acil.sendText(msg.to,"Please send contacts from the person you want to add from the blacklistô€œô€…”👈")
elif msg.text in ["Comban cek"]:
if wait["commentBlack"] == {}:
acil.sendText(msg.to,"Nothing in the blacklistô€œ🛡")
else:
acil.sendText(msg.to,"The following is a blacklistô€œ👈")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +acil.getContact(mi_d).displayName + "\n"
acil.sendText(msg.to,mc)
elif msg.text.lower() == 'Clock:on':
if wait["clock"] == True:
acil.sendText(msg.to,"Sudah On")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = acil.getProfile()
profile.displayName = wait["cName"] + nowT
acil.updateProfile(profile)
acil.sendText(msg.to,"👉Jam on👈")
elif msg.text.lower() == 'Clock:off':
if wait["clock"] == False:
acil.sendText(msg.to,"Hal ini sudah off🛡")
else:
wait["clock"] = False
acil.sendText(msg.to,"Adalah Off")
elif "Clockname " in msg.text:
n = msg.text.replace("Jam say ","")
if len(n.decode("utf-8")) > 30:
acil.sendText(msg.to,"terlalu lama")
else:
wait["cName"] = n
acil.sendText(msg.to,"Ini telah diubah🛡\n\n" + n)
elif msg.text.lower() == 'update':
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = acil.getProfile()
profile.displayName = wait["cName"] + nowT
acil.updateProfile(profile)
acil.sendText(msg.to,"Diperbarui👈")
else:
acil.sendText(msg.to,"Silahkan Aktifkan Nama")
elif "Fuck: " in msg.text:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
gs.preventJoinByTicket = False
acil.updateGroup(gs)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
acil.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"Fuck You")
pass
else:
for target in targets:
try:
acil.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
acil.leaveGroup(msg.to)
gs = acil.getGroup(msg.to)
gs.preventJoinByTicket = True
acil.updateGroup(gs)
gs.preventJoinByTicket(gs)
acil.updateGroup(gs)
#-----------------------------------------------------------
elif ("Cipok " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
acil.kickoutFromGroup(msg.to,[target])
except:
acil.sendText(msg.to,"Suksess")
elif ("Ciduk " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
pb1.kickoutFromGroup(msg.to,[target])
except:
pb1.sendText(msg.to,"Suksess")
elif ("Sc " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
key = acil.getContact(key1)
acil.sendText(msg.to,"Mid:" + key1)
elif "Nk " in msg.text:
nk0 = msg.text.replace("Beb ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = acil.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"suksess")
pass
else:
for target in targets:
try:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
acil.sendText(msg.to,"Good Bye")
#-----------------------------------------------------------
elif ("Bye " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
except:
pass
elif ("Ban " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"Succes Banned")
except:
pass
elif msg.text in ["Mygroups"]:
gid = acil.getGroupIdsJoined()
h = ""
for i in gid:
h += "[╠ ͜͡✪͜͡✪➢] %s \n" % (acil.getGroup(i).name + " | Members : " + str(len (acil.getGroup(i).members)))
acil.sendText(msg.to, "☆「Group List」☆\n"+ h +"Total Group : " +str(len(gid)))
#----------------------------------------------------------
elif "Unban @" in msg.text:
if msg.toType == 2:
print "[Unban]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip()
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"Target Unlocked")
except:
acil.sendText(msg.to,"Error")
elif "Ban:" in msg.text:
nk0 = msg.text.replace("Ban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = acil.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"Target Locked")
except:
acil.sendText(msg.to,"Error")
elif "Unban:" in msg.text:
nk0 = msg.text.replace("Unban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = acil.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
acil.sendText(msg.to,"Target Unlocked")
except:
acil.sendText(msg.to,"Error")
#-----------------------------------------------------------
elif msg.text == "Mata":
acil.sendText(msg.to, "Check Yang Suka Ngintip Orang Mandi")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "Lihat":
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "╠ ͜͡✪͜͡✪➢\n"
acil.sendText(msg.to,"======Tercyduck====== %s\n=====Tukang Ngintip======\n%s\nReading point creation date n time:\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
acil.sendText(msg.to,"An already read point has not been set.\n「set」you can send ♪ read point will be created ♪")
#-------------------------------------------------
elif "Spam @" in msg.text:
# if msg.from_ in admin:
_name = msg.text.replace("Spam @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
acil.sendText(msg.to,"Wating in progres...\n──────┅═ইई═┅──────\n꧁【☆ᏴᏞᎪᏟK ᎪNᏩᎬᏞᏚ☆】꧂\nᎪᏙᎡᏆᏞᏆᎪ ᏞᎬᏙᎪNᎪ\nhttp://line.me/ti/p/AqTXMqygnD\n──────┅═ইई═┅──────")
acil.sendText(g.mid,"──────┅═ইई═┅──────\n꧁【☆ᏴᏞᎪᏟK ᎪNᏩᎬᏞᏚ☆】꧂\nᎪᏙᎡᏆᏞᏆᎪ ᏞᎬᏙᎪNᎪ\nhttp://line.me/ti/p/AqTXMqygnD\n──────┅═ইई═┅──────")
pb1.sendText(g.mid,"──────┅═ইई═┅──────\n꧁【☆ᏴᏞᎪᏟK ᎪNᏩᎬᏞᏚ☆】꧂\nᎪᏙᎡᏆᏞᏆᎪ ᏞᎬᏙᎪNᎪ\nhttp://line.me/ti/p/AqTXMqygnD\n──────┅═ইई═┅──────")
pb2.sendText(g.mid,"──────┅═ইई═┅──────\n꧁【☆ᏴᏞᎪᏟK ᎪNᏩᎬᏞᏚ☆】꧂\nᎪᏙᎡᏆᏞᏆᎪ ᏞᎬᏙᎪNᎪ\nhttp://line.me/ti/p/AqTXMqygnD\n──────┅═ইई═┅──────")
acil.sendText(g.mid,"──────┅═ইई═┅──────\n꧁【☆ᏴᏞᎪᏟK ᎪNᏩᎬᏞᏚ☆】꧂\nᎪᏙᎡᏆᏞᏆᎪ ᏞᎬᏙᎪNᎪ\nhttp://line.me/ti/p/AqTXMqygnD\n──────┅═ইई═┅──────")
pb1.sendText(g.mid,"──────┅═ইई═┅──────\n꧁【☆ᏴᏞᎪᏟK ᎪNᏩᎬᏞᏚ☆】꧂\nᎪᏙᎡᏆᏞᏆᎪ ᏞᎬᏙᎪNᎪ\nhttp://line.me/ti/p/AqTXMqygnD\n──────┅═ইई═┅──────")
pb2.sendText(g.mid,"──────┅═ইई═┅──────\n꧁【☆ᏴᏞᎪᏟK ᎪNᏩᎬᏞᏚ☆】꧂\nᎪᏙᎡᏆᏞᏆᎪ ᏞᎬᏙᎪNᎪ\nhttp://line.me/ti/p/AqTXMqygnD\n──────┅═ইई═┅──────")
pb1.sendText(g.mid,"──────┅═ইई═┅──────\n꧁【☆ᏴᏞᎪᏟK ᎪNᏩᎬᏞᏚ☆】꧂\nᎪᏙᎡᏆᏞᏆᎪ ᏞᎬᏙᎪNᎪ\nhttp://line.me/ti/p/AqTXMqygnD\n──────┅═ইई═┅──────")
acil.sendText(msg.to, "Succes")
print " Spammed !"
#--------------------------------------------------------------------------
#-----------------------------------------------------------
elif "Mban:" in msg.text:
midd = msg.text.replace("Mban:","")
wait["blacklist"][midd] = True
acil.sendText(msg.to,"Target Lock")
#-----------------------------------------------------------
elif "#leave" in msg.text:
try:
import sys
sys.exit()
except:
pass
#-----------------------------------------------------------
elif "Spam " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
text = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (text+"\n")
if txt[1] == "on":
if jmlh <= 1000:
for x in range(jmlh):
acil.sendText(msg.to, text)
else:
acil.sendText(msg.to, "Out Of Range!")
elif txt[1] == "off":
if jmlh <= 1000:
acil.sendText(msg.to, tulisan)
else:
acil.sendText(msg.to, "Out Of Range!")
#-----------------------------------------------------------
elif msg.text.lower() == 'respon':
profile = pb1.getProfile()
text = profile.displayName + "Hadir"
pb1.sendText(msg.to, text)
profile = pb2.getProfile()
text = profile.displayName + "Hadir"
pb2.sendText(msg.to, text)
#-----------------------------------------------------------speed
elif msg.text in ["Bl:on"]:
wait["wblacklist"] = True
acil.sendText(msg.to,"Send Contact")
elif msg.text in ["Unbl:on"]:
wait["dblacklist"] = True
acil.sendText(msg.to,"Send Contact")
elif msg.text.lower() == 'mcheck':
if wait["blacklist"] == {}:
acil.sendText(msg.to," Nothing in the blacklist")
else:
acil.sendText(msg.to," following is a blacklist")
mc = ""
for mi_d in wait["blacklist"]:
mc += "�" +acil.getContact(mi_d).displayName + "\n"
acil.sendText(msg.to,mc)
elif msg.text.lower() == 'banlist':
if msg.toType == 2:
group = acil.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = "❂••••••BLACKLIST••••••❂" + "\n"
for mm in matched_list:
cocoa += "😂" +acil.getContact(mm).displayName + "\n"
acil.sendText(msg.to,cocoa + "❂••••••••••••••••❂")
elif msg.text.lower() == 'kick@mbl':
if msg.toType == 2:
group = pb1.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
pb1.sendText(msg.to,"Daftar hitam pengguna tidak memiliki")
return
for jj in matched_list:
try:
acil.kickoutFromGroup(msg.to,[jj])
pb1.kickoutFromGroup(msg.to,[jj])
pb2.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#---------------------------------------------------
elif msg.text in ["backup"]:
try:
acil.updateDisplayPicture(backup.pictureStatus)
acil.updateProfile(backup)
acil.sendText(msg.to, "Telah kembali semula")
except Exception as e:
acil.sendText(msg.to, str(e))
#------------------------------------------------
elif "Copy @" in msg.text:
if msg.toType == 2:
print "[COPY] Ok"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = acil.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
sendMessage(msg.to, "Ok Vril")
else:
for target in targets:
try:
acil.cloneContactProfile(target)
acil.sendText(msg.to, "success")
except Exception as e:
print e
#----------------------------------------------
#---------------------- = NUKE = ------------------
elif "KIBAR!!!" in msg.text:
if msg.toType == 2:
print "Kick all member"
_name = msg.text.replace("Go","")
gs = acil.getGroup(msg.to)
gs = pb1.getGroup(msg.to)
gs = pb2.getGroup(msg.to)
gs = acil.getGroup(msg.to)
gs = pb1.getGroup(msg.to)
gs = pb2.getGroup(msg.to)
gs = acil.getGroup(msg.to)
gs = pb1.getGroup(msg.to)
h = acil.getContact(mid)
start = time.time()
pb1.sendText(msg.to, "🅆🄴🄻🄲🄾🄼🄴 🅃🄾\n🄺🄸🄲🄺🄴🅁 🄰🅁🄴🄽🄰\n_______________________________")
elapsed_time = time.time() - start
pb1.sendText(msg.to, "%sseconds" % (elapsed_time))
pb2.sendText(msg.to, "──────┅═ই✪͜͡ ͜͡✪ई═┅───────")
acil.sendText(msg.to, "【ᏴᏞᎪᏟK\_☠☬☠_/ᎪNᏩᎬᏞᏚ】")
pb1.sendImageWithUrl(msg.to,"http://dl.profile.line-cdn.net/0h4f8UopZva0kFLEezRx4UHjlpZSRyAm0BfU12fXV_PCwtTipPOxghe3IkPHshFS5Pakl0LXcuYi0h")
pb2.sendImageWithUrl(msg.to,"http://dl.profile.line-cdn.net/0h4f8Us-i0a0kFLEezZY8UHjlpZSRyAm0BfU12fXV_PCwtTipPOxghe3IkPHshFS5Pakl0LXcuYi0h")
acil.sendText(msg.to,"⚠️⚠️__AWAS!!! __⚠️⚠️\n___TANPA PERMISI GW___\n🔥 BAKAR GRUP LO NYET!!🔥\n___JANGAN TANYA KENAPA___\n😎KARNA KAMI PUNYA PRINSIF 😎\n KALO GAK RATA ZOOM MUKA KANG KIBAR\n\n\n──────┅═ইई═┅──────\n【ᏴᏞᎪᏟK\_☠☬☠_/ᎪNᏩᎬᏞᏚ】\n ᎪᏙᎡᏆᏞᏆᎪ ᏞᎬᏙᎪNᎪ\n line.me/ti/p/AqTXMqygnD\n──────┅═ইई═┅──────\n\n\nKAMI TAU APA!!?? KAMI HANYA NUMPANG KIBAR&PLAY\n\n\nDAH GITU AJA TQ\n\n\n(itu)JADI TANGKIS AJE BOSS (itu)\n\n\nGO!! GO!! GO!! GO!! GO!!\n\n\n________ (go)________ ")
pb1.sendText(msg.to,"❂•••••••••••✧•••••••••••❂ ")
msg.contentType = 13
msg.contentMetadata = {'mid': "u978f7e8d02351b3d1d4a3973000c2080"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u5818cb4404411c2e2e6e6937d172cca8"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "ua028b2a4f96dff4b4a52ae25223e5073"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "udfaf52176415b46cb445ae2757ec85f3"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u29ad304bbe5e9025b8431e65832a4cfa"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u565281632a958bb2795f6434f6872e3b"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u30ceda3992172f0861558a2b7a6ef5ab"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u224e7f2fd36e3565b0756319936450c5"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u604ca77dec7ab8d450ae762d5d08cd93"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u2ca90ea24d7ba639272925d715d8a99c"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u2552e86aab1b1426749dd0439b0f8c7f"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "uc67a847198ce188b412a058d86f10367"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u190afbb99dd1c28cc57642627f2aa1a2"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u12322ff2ca2b48474389f3d91b9ff385"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u2beb70887d61c0e3abf3ac327b7b21d9"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "ub08e59948aaf244041d99091254e743c"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u2c83fe9f836a2f74f7f9316e0c184f9d"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u02c62ba90a4f9ff95950d1a5ee9f2154"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u47b8e60143e0e1c6fdebe67e6a355ad2"}
acil.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u70489ca3e0d013e866a556665ee9d99b"}
acil.sendMessage(msg)
pb1.sendText(msg.to, "❂•••••••••••✧•••••••••••❂ ")
pb2.sendText(msg.to, "★_____TANGKIS NYETT_____★\n\nUDAH GITU AJA YANG PENTING KIBAR\n\n🔥RATA KAMI SENANG GAK RATA BULY AJE KAMI DISINI🔥\n\n\n__JADI TANGKIS AJA GO_GO_GO_!!!!!__\n\n______【ᏴᏞᎪᏟK\_☠☬☠_/ᎪNᏩᎬᏞᏚ】______ ")
acil.sendText(msg.to, "──────┅═ই✪͜͡ ͜͡✪ई═┅─────── ")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
acil.sendText(msg.to,"Success test")
ki6.sendText(msg.to,"Success test")
else:
for target in targets:
if target not in Bots or owner:
if target in owner:
pass
elif target in admin:
pass
elif target in Bots:
pass
else:
try:
klist=[cl,ki,ki2]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
acil.sendText(msg,to,"Udah Gitu Aja")
pb2.sendText(msg,to,"Rata Yehkan!!!")
#-------------------- = NUKE FINISH = -----------------------------
#-------------Fungsi Tagall User Start---------------#
elif msg.text in ["Dor","Tagall","Sepi","Tag"]:
group = acil.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "╠➢@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
acil.sendMessage(msg)
except Exception as error:
print error
#-------------------------------------------------------------
elif msg.text.lower() == 'cancel':
if msg.toType == 2:
group = acil.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
acil.cancelGroupInvitation(msg.to,[_mid])
acil.sendText(msg.to,"I pretended to cancel and canceled👈")
elif "Album" in msg.text:
try:
albumtags = msg.text.replace("Album","")
gid = albumtags[:33]
name = albumtags.replace(albumtags[:34],"")
acil.createAlbum(gid,name)
acil.sendText(msg.to,name + "We created an album👈")
except:
acil.sendText(msg.to,"Error")
elif "fakec→" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
amid = msg.text.replace("fakec→","")
acil.sendText(msg.to,str(acil.channel.createAlbumF(msg.to,name,amid)))
except Exception as e:
try:
acil.sendText(msg.to,str(e))
except:
pass
#-----------------------------------------------
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
acil.sendText(msg.to, "「Progress Speeds...」")
pb1.sendText(msg.to, "「Progress Speeds...」")
pb2.sendText(msg.to, "「Progress Speeds...」")
elapsed_time = time.time() - start
acil.sendText(msg.to, "%sseconds" % (elapsed_time))
pb1.sendText(msg.to, "%sseconds" % (elapsed_time))
pb2.sendText(msg.to, "%sseconds" % (elapsed_time))
#-----------------------------------------------
elif msg.text.lower() == '1':
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
pb1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
pb2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
random.choice(KAC).updateGroup(G)
#-----------------------------------------------
elif msg.text.lower() == '1':
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
pb1.acceptGroupInvitationByTicket(msg.to,Ticket)
pb2.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
pb1.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
pb1.updateGroup(G)
elif msg.text.lower() == 'In':
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
pb1.acceptGroupInvitationByTicket(msg.to,Ticket)
pb2.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
pb1.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
pb1.updateGroup(G)
#-----------------------------------------------
elif "1" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
pb1.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
pb1.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
pb1.updateGroup(G)
#-----------------------------------------------
elif "2" in msg.text:
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = False
acil.updateGroup(G)
invsend = 0
Ticket = acil.reissueGroupTicket(msg.to)
pb2.acceptGroupInvitationByTicket(msg.to,Ticket)
G = acil.getGroup(msg.to)
ginfo = acil.getGroup(msg.to)
G.preventJoinByTicket = True
pb2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
pb2.updateGroup(G)
#-----------------------------------------------
#-----------------------------------------------
elif msg.text.lower() == '@exit':
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
acil.sendText(msg.to,"Bye Bye😘 Jangan Lupa Add Yehkan\n" + str(ginfo.name) + "")
pb1.leaveGroup(msg.to)
pb2.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B1 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
pb1.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "B2 bye" in msg.text:
if msg.toType == 2:
ginfo = acil.getGroup(msg.to)
try:
pb2.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif msg.text in ["Welcome","wc","welcome","Wc"]:
ginfo = acil.getGroup(msg.to)
acil.sendText(msg.to,"🅆🄴🄻🄲🄾🄼🄴 🅃🄾\n\n_______________________________ \n\n" + str(ginfo.name))
acil.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
elif "Bc " in msg.text:
bctxt = msg.text.replace("Bc ","")
kisendText(msg.to,(bctxt))
elif "Say " in msg.text:
bctxt = msg.text.replace("Say ","")
pb1.sendText(msg.to,(bctxt))
pb2.sendText(msg.to,(bctxt))
elif msg.text.lower() == 'ping':
pb1.sendText(msg.to,"Pong ")
pb2.sendText(msg.to,"Pung ")
#-----------------------------------------------
#-----------------------------------------------
if op.type == 19:
try:
if op.param3 in mid:
if op.param2 in pb1mid:
G = pb1.getGroup(op.param1)
G.preventJoinByTicket = False
pb1.updateGroup(G)
Ticket = pb1.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
pb1.acceptGroupInvitationByTicket(op.param1,Ticket)
pb2.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
else:
G = pb1.getGroup(op.param1)
pb1.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
pb1.updateGroup(G)
Ticket = pb1.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
pb1.acceptGroupInvitationByTicket(op.param1,Ticket)
pb2.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
pb1.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in pb1mid:
if op.param2 in pb2mid:
G = pb2.getGroup(op.param1)
G.preventJoinByTicket = False
pb2.updateGroup(G)
Ticket = pb2.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
pb1.acceptGroupInvitationByTicket(op.param1,Ticket)
pb2.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
pb2.updateGroup(G)
else:
G = pb2.getGroup(op.param1)
pb2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
pb2.updateGroup(G)
Ticket = pb2.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
pb1.acceptGroupInvitationByTicket(op.param1,Ticket)
pb2.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
pb1.updateGroup(G)
elif op.param3 in pb2mid:
if op.param2 in mid:
G = acil.getGroup(op.param1)
G.preventJoinByTicket = False
acil.updateGroup(G)
Ticket = acil.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
pb1.acceptGroupInvitationByTicket(op.param1,Ticket)
pb2.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
else:
G = acil.getGroup(op.param1)
acil.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
acil.updateGroup(G)
Ticket = acil.reissueGroupTicket(op.param1)
acil.acceptGroupInvitationByTicket(op.param1,Ticket)
pb1.acceptGroupInvitationByTicket(op.param1,Ticket)
pb2.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
acil.updateGroup(G)
except:
pass
if op.type == 17:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
ks.updateGroup(G)
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
# pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
# random.choice(KAK).kickoutFromGroup(op.param1,[op.param2])
except:
pass
elif op.param2 not in admin + Bots:
random.choice(KAC).sendText(op.param1,"Welcome. Don't Play Bots. I can kick you!")
else:
pass
if op.type == 19:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
acil.sendText(op.param1,"")
else:
acil.sendText(op.param1,"")
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
acil.sendText(op.param1,"")
else:
acil.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
acil.cancelGroupInvitation(op.param1,[contact.mid for contact in acil.getGroup(op.param1).invitee])
else:
acil.sendText(op.param1,"")
else:
acil.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
acil.cancelGroupInvitation(op.param1,[contact.mid for contact in acil.getGroup(op.param1).invitee])
else:
acil.sendText(op.param1,"")
else:
acil.sendText(op.param1,"")
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = pb1.getGroup(op.param1)
G.preventJoinByTicket = True
pb1.updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
acil.sendText(op.param1,"")
else:
acil.sendText(op.param1,"")
if op.type == 5:
if wait["autoAdd"] == True:
if (wait["message"] in [""," ","\n",None]):
pass
else:
acil.sendText(op.param1,str(wait["message"]))
pb1.sendText(op.param1,str(wait["message"]))
pb2.sendText(op.param1,str(wait["message"]))
#------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------
if op.type == 55:
if op.param1 in wait2['readPoint']:
Name = acil.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
acil.sendText
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = acil.getProfile()
profile.displayName = wait["cName"] + nowT
acil.updateProfile(profile)
time.sleep(0.30)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = acil.fetchOps(acil.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(acil.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
acil.Poll.rev = max(acil.Poll.rev, Op.revision)
bot(Op)
|
test_gateway.py | import functools
import time
from threading import Thread
import numpy as np
import pytest
import requests
from jina.flow import Flow
from tests import JinaTestCase
concurrency = 10
class MyTestCase(JinaTestCase):
# @pytest.mark.skip('this tests hang up for unknown reason on github')
def test_rest_gateway_concurrency(self):
def _request(status_codes, durations, index):
resp = requests.post(
f'http://0.0.0.0:{f.port_expose}/api/index',
json={
'data': [
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC',
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AvdGjTZeOlQq07xSYPgJjlWRwfWEBx2+CgAVrPrP+O5ghhOa+a0cocoWnaMJFAsBuCQCgiJOKDBcIQTiLieOrPD/cp/6iZ/Iu4HqAh5dGzggIQVJI3WqTxwVTDjs5XJOy38AlgHoaKgY+xJEXeFTyR7FOfF7JNWjs3b8evQE6B2dTDvQZx3n3Rz6rgOtVlaZRLvR9geCAxuY3G+0mepEAhrTISES3bwPWYYi48OUrQOc//IaJeij9xZGGmDIG9kc73fNI7eA8VMBAAD//0SxXMMT90UdAAAAAElFTkSuQmCC']})
durations[index] = resp.elapsed.total_seconds()
status_codes[index] = resp.status_code
f = Flow(rest_api=True).add(
uses='_pass',
parallel=2)
with f:
concurrency = 50
threads = []
status_codes = [None] * concurrency
durations = [None] * concurrency
for i in range(concurrency):
t = Thread(target=_request, args=(status_codes, durations, i))
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join()
success = status_codes.count(200)
failed = len(status_codes) - success
print(
f'\nmin roundtrip time: {np.min(durations)}\n',
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
self.assertTrue(success >= 1)
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
self.assertTrue(rate < 0.1)
@pytest.mark.skip('raw grpc gateway is not stable enough under high concurrency')
def test_grpc_gateway_concurrency(self):
def _input_fn():
return iter(['data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC',
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AvdGjTZeOlQq07xSYPgJjlWRwfWEBx2+CgAVrPrP+O5ghhOa+a0cocoWnaMJFAsBuCQCgiJOKDBcIQTiLieOrPD/cp/6iZ/Iu4HqAh5dGzggIQVJI3WqTxwVTDjs5XJOy38AlgHoaKgY+xJEXeFTyR7FOfF7JNWjs3b8evQE6B2dTDvQZx3n3Rz6rgOtVlaZRLvR9geCAxuY3G+0mepEAhrTISES3bwPWYYi48OUrQOc//IaJeij9xZGGmDIG9kc73fNI7eA8VMBAAD//0SxXMMT90UdAAAAAElFTkSuQmCC'])
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = (end - start)
status_codes[index] = req.status.code
def _request(f, status_codes, durations, index):
start = time.time()
f.index(
input_fn=_input_fn,
output_fn=functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index
))
f = Flow().add(
uses='_pass',
parallel=2)
with f:
threads = []
status_codes = [None] * concurrency
durations = [None] * concurrency
for i in range(concurrency):
t = Thread(
target=_request, args=(
f, status_codes, durations, i))
threads.append(t)
t.daemon = True
t.start()
for t in threads:
t.join()
print(f'terminate {t}')
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'\nmin roundtrip time: {np.min(durations)}\n',
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
self.assertTrue(success >= 1)
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
self.assertTrue(rate < 0.1)
|
runner_slurm.py | import os
import time
import signal
import threading
from popper import utils as pu
from popper.cli import log as log
from popper.runner_host import HostRunner
from popper.runner_host import DockerRunner as HostDockerRunner
from popper.runner_host import SingularityRunner as HostSingularityRunner
class SlurmRunner(HostRunner):
def __init__(self, **kw):
super(SlurmRunner, self).__init__(**kw)
self._spawned_jobs = set()
def __exit__(self, exc_type, exc, traceback):
self._spawned_jobs = set()
def _tail_output(self, out_file):
self._out_stream_pid = set()
_, ecode, _ = HostRunner._exec_cmd(
["tail", "-f", out_file], pids=self._out_stream_pid
)
return ecode
def _start_out_stream(self, out_file):
self._out_stream_thread = threading.Thread(
target=self._tail_output, args=(out_file,)
)
self._out_stream_thread.start()
# give time so that _exec_cmd puts the pid inside the self._out_stream_pid set
time.sleep(1)
def _stop_out_stream(self):
if len(self._out_stream_pid) != 1:
log.fail("Cannot find PID for tail process")
_out_stream_pid = list(self._out_stream_pid)[0]
try:
os.kill(_out_stream_pid, signal.SIGKILL)
except ProcessLookupError:
log.warning("Tail process was stopped by some other process.")
self._out_stream_thread.join()
def _submit_batch_job(self, cmd, step):
job_name = pu.sanitized_name(step.id, self._config.wid)
temp_dir = "/tmp/popper/slurm/"
os.makedirs(temp_dir, exist_ok=True)
job_script = os.path.join(temp_dir, f"{job_name}.sh")
out_file = os.path.join(temp_dir, f"{job_name}.out")
# create/truncate log
with open(out_file, "w"):
pass
with open(job_script, "w") as f:
f.write("#!/bin/bash\n")
f.write("\n".join(cmd))
sbatch_cmd = f"sbatch --wait --job-name {job_name} --output {out_file}"
sbatch_cmd = sbatch_cmd.split()
for k, v in self._config.resman_opts.get(step.id, {}).items():
sbatch_cmd.append(pu.key_value_to_flag(k, v))
sbatch_cmd.append(job_script)
log.info(f'[{step.id}] {" ".join(sbatch_cmd)}')
if self._config.dry_run:
return 0
self._spawned_jobs.add(job_name)
# start a tail (background) process on the output file
self._start_out_stream(out_file)
# submit the job and wait
_, ecode, output = HostRunner._exec_cmd(sbatch_cmd, logging=False)
# kill the tail process
self._stop_out_stream()
self._spawned_jobs.remove(job_name)
return ecode
def stop_running_tasks(self):
for job_name in self._spawned_jobs:
log.info(f"Cancelling job {job_name}")
_, ecode, _ = HostRunner._exec_cmd(["scancel", "--name", job_name])
if ecode != 0:
log.warning(f"Failed to cancel the job {job_name}.")
class DockerRunner(SlurmRunner, HostDockerRunner):
def __init__(self, **kw):
super(DockerRunner, self).__init__(init_docker_client=False, **kw)
def __exit__(self, exc_type, exc, traceback):
pass
def run(self, step):
"""Execute the given step via slurm in the docker engine."""
cid = pu.sanitized_name(step.id, self._config.wid)
cmd = []
build, img, tag, build_ctx_path = self._get_build_info(step)
cmd.append(f"docker rm -f {cid} || true")
if build:
cmd.append(f"docker build -t {img}:{tag} {build_ctx_path}")
elif not self._config.skip_pull and not step.skip_pull:
cmd.append(f"docker pull {img}:{tag}")
cmd.append(self._create_cmd(step, f"{img}:{tag}", cid))
cmd.append(f"docker start --attach {cid}")
self._spawned_containers.add(cid)
ecode = self._submit_batch_job(cmd, step)
self._spawned_containers.remove(cid)
return ecode
def _create_cmd(self, step, img, cid):
container_args = self._get_container_kwargs(step, img, cid)
container_args.pop("detach")
cmd = ["docker create"]
cmd.append(f"--name {container_args.pop('name')}")
cmd.append(f"--workdir {container_args.pop('working_dir')}")
entrypoint = container_args.pop("entrypoint", None)
if entrypoint:
cmd.append(f"--entrypoint {' '.join(entrypoint)}")
# append volume and environment flags
for vol in container_args.pop("volumes"):
cmd.append(f"-v {vol}")
for env_key, env_val in container_args.pop("environment").items():
cmd.append(f"-e {env_key}={env_val}")
command = container_args.pop("command")
image = container_args.pop("image")
# anything else is treated as a flag
for k, v in container_args.items():
cmd.append(pu.key_value_to_flag(k, v))
# append the image and the commands
cmd.append(image)
if command:
cmd.append(" ".join(command))
return " ".join(cmd)
class SingularityRunner(SlurmRunner, HostSingularityRunner):
def __init__(self, **kw):
super(SingularityRunner, self).__init__(init_spython_client=False, **kw)
if self._config.reuse:
log.fail("Reuse not supported for SingularityRunner.")
def __exit__(self, exc_type, exc_value, exc_traceback):
pass
def run(self, step):
self._setup_singularity_cache()
cid = pu.sanitized_name(step.id, self._config.wid) + ".sif"
self._container = os.path.join(self._singularity_cache, cid)
build, img, build_ctx_path = self._get_build_info(step)
HostRunner._exec_cmd(["rm", "-rf", self._container])
if not self._config.dry_run:
if build:
recipefile = self._get_recipe_file(build_ctx_path, cid)
HostRunner._exec_cmd(
["singularity", "build", "--fakeroot", self._container, recipefile],
cwd=build_ctx_path,
)
else:
HostRunner._exec_cmd(["singularity", "pull", self._container, img])
cmd = [self._create_cmd(step, cid)]
self._spawned_containers.add(cid)
ecode = self._submit_batch_job(cmd, step)
self._spawned_containers.remove(cid)
return ecode
def _create_cmd(self, step, cid):
env = self._prepare_environment(step)
for k, v in env.items():
os.environ[k] = str(v)
if step.runs:
commands = step.runs
cmd = ["singularity exec"]
else:
commands = step.args
cmd = ["singularity run"]
options = self._get_container_options()
cmd.append(" ".join(options))
cmd.append(self._container)
cmd.append(" ".join(commands))
return " ".join(cmd)
|
test_interactions_builtin.py | import logging
import time
import unittest
from random import randint
from threading import Thread
from slack_sdk.errors import SlackClientConfigurationError, SlackClientNotConnectedError
from slack_sdk.socket_mode.request import SocketModeRequest
from slack_sdk.socket_mode.client import BaseSocketModeClient
from slack_sdk import WebClient
from slack_sdk.socket_mode import SocketModeClient
from tests.helpers import is_ci_unstable_test_skip_enabled
from tests.slack_sdk.socket_mode.mock_socket_mode_server import (
start_socket_mode_server,
socket_mode_envelopes,
socket_mode_hello_message,
)
from tests.slack_sdk.socket_mode.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
class TestInteractionsBuiltin(unittest.TestCase):
logger = logging.getLogger(__name__)
def setUp(self):
setup_mock_web_api_server(self)
self.web_client = WebClient(
token="xoxb-api_test",
base_url="http://localhost:8888",
)
def tearDown(self):
cleanup_mock_web_api_server(self)
def test_buffer_size_validation(self):
try:
SocketModeClient(app_token="xapp-A111-222-xyz", receive_buffer_size=1)
self.fail("SlackClientConfigurationError is expected here")
except SlackClientConfigurationError:
pass
def test_interactions(self):
if is_ci_unstable_test_skip_enabled():
return
t = Thread(target=start_socket_mode_server(self, 3011))
t.daemon = True
t.start()
time.sleep(2) # wait for the server
try:
buffer_size_list = [1024, 9000, 35, 49] + list(
[randint(16, 128) for _ in range(10)]
)
for buffer_size in buffer_size_list:
self.reset_sever_state()
received_messages = []
received_socket_mode_requests = []
def message_handler(message):
self.logger.info(f"Raw Message: {message}")
time.sleep(randint(50, 200) / 1000)
received_messages.append(message)
def socket_mode_request_handler(
client: BaseSocketModeClient, request: SocketModeRequest
):
self.logger.info(f"Socket Mode Request: {request}")
time.sleep(randint(50, 200) / 1000)
received_socket_mode_requests.append(request)
self.logger.info(f"Started testing with buffer size: {buffer_size}")
client = SocketModeClient(
app_token="xapp-A111-222-xyz",
web_client=self.web_client,
on_message_listeners=[message_handler],
receive_buffer_size=buffer_size,
auto_reconnect_enabled=False,
trace_enabled=True,
)
try:
client.socket_mode_request_listeners.append(
socket_mode_request_handler
)
client.wss_uri = "ws://0.0.0.0:3011/link"
client.connect()
self.assertTrue(client.is_connected())
time.sleep(2) # wait for the message receiver
repeat = 2
for _ in range(repeat):
client.send_message("foo")
client.send_message("bar")
client.send_message("baz")
self.assertTrue(client.is_connected())
expected = (
socket_mode_envelopes
+ [socket_mode_hello_message]
+ ["foo", "bar", "baz"] * repeat
)
expected.sort()
count = 0
while count < 5 and len(received_messages) < len(expected):
time.sleep(0.1)
self.logger.debug(
f"Received messages: {len(received_messages)}"
)
count += 0.1
received_messages.sort()
self.assertEqual(len(received_messages), len(expected))
self.assertEqual(received_messages, expected)
self.assertEqual(
len(socket_mode_envelopes), len(received_socket_mode_requests)
)
finally:
pass
# client.close()
self.logger.info(f"Passed with buffer size: {buffer_size}")
finally:
client.close()
self.server.stop()
self.server.close()
self.logger.info(f"Passed with buffer size: {buffer_size_list}")
def test_send_message_while_disconnection(self):
if is_ci_unstable_test_skip_enabled():
return
t = Thread(target=start_socket_mode_server(self, 3011))
t.daemon = True
t.start()
time.sleep(2) # wait for the server
try:
self.reset_sever_state()
client = SocketModeClient(
app_token="xapp-A111-222-xyz",
web_client=self.web_client,
auto_reconnect_enabled=False,
trace_enabled=True,
)
client.wss_uri = "ws://0.0.0.0:3011/link"
client.connect()
time.sleep(1) # wait for the connection
client.send_message("foo")
client.disconnect()
time.sleep(1) # wait for the connection
try:
client.send_message("foo")
self.fail("SlackClientNotConnectedError is expected here")
except SlackClientNotConnectedError as _:
pass
client.connect()
time.sleep(1) # wait for the connection
client.send_message("foo")
finally:
client.close()
self.server.stop()
self.server.close()
|
test_general.py | """
Collection of tests for unified general functions
"""
# global
import os
import math
import time
import einops
import pytest
import threading
import numpy as np
from numbers import Number
from collections.abc import Sequence
import torch.multiprocessing as multiprocessing
# local
import ivy
import ivy.functional.backends.numpy
import ivy.functional.backends.jax
import ivy.functional.backends.tensorflow
import ivy.functional.backends.torch
import ivy.functional.backends.mxnet
import ivy_tests.test_ivy.helpers as helpers
# Helpers #
# --------#
def _get_shape_of_list(lst, shape=()):
if not lst:
return []
if not isinstance(lst, Sequence):
return shape
if isinstance(lst[0], Sequence):
l = len(lst[0])
if not all(len(item) == l for item in lst):
msg = 'not all lists have the same length'
raise ValueError(msg)
shape += (len(lst),)
shape = _get_shape_of_list(lst[0], shape)
return shape
# Tests #
# ------#
# set_framework
@pytest.mark.parametrize(
"fw_str", ['numpy', 'jax', 'torch', 'mxnet'])
def test_set_framework(fw_str, dev, call):
ivy.set_framework(fw_str)
ivy.unset_framework()
# use_framework
def test_use_within_use_framework(dev, call):
with ivy.functional.backends.numpy.use:
pass
with ivy.functional.backends.jax.use:
pass
with ivy.functional.backends.tensorflow.use:
pass
with ivy.functional.backends.torch.use:
pass
with ivy.functional.backends.mxnet.use:
pass
@pytest.mark.parametrize(
"allow_duplicates", [True, False])
def test_match_kwargs(allow_duplicates):
def func_a(a, b, c=2):
pass
func_b = lambda a, d, e=5: None
class ClassA:
def __init__(self, c, f, g=3):
pass
kwargs = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6}
kwfa, kwfb, kwca = ivy.match_kwargs(kwargs, func_a, func_b, ClassA, allow_duplicates=allow_duplicates)
if allow_duplicates:
assert kwfa == {'a': 0, 'b': 1, 'c': 2}
assert kwfb == {'a': 0, 'd': 3, 'e': 4}
assert kwca == {'c': 2, 'f': 5, 'g': 6}
else:
assert kwfa == {'a': 0, 'b': 1, 'c': 2}
assert kwfb == {'d': 3, 'e': 4}
assert kwca == {'f': 5, 'g': 6}
# def test_get_referrers_recursive(dev, call):
#
# class SomeClass:
# def __init__(self):
# self.x = [1, 2]
# self.y = [self.x]
#
# some_obj = SomeClass()
# refs = ivy.get_referrers_recursive(some_obj.x)
# ref_keys = refs.keys()
# assert len(ref_keys) == 3
# assert 'repr' in ref_keys
# assert refs['repr'] == '[1,2]'
# y_id = str(id(some_obj.y))
# y_refs = refs[y_id]
# assert y_refs['repr'] == '[[1,2]]'
# some_obj_dict_id = str(id(some_obj.__dict__))
# assert y_refs[some_obj_dict_id] == 'tracked'
# dict_refs = refs[some_obj_dict_id]
# assert dict_refs['repr'] == "{'x':[1,2],'y':[[1,2]]}"
# some_obj_id = str(id(some_obj))
# some_obj_refs = dict_refs[some_obj_id]
# assert some_obj_refs['repr'] == str(some_obj).replace(' ', '')
# assert len(some_obj_refs) == 1
# array
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"from_numpy", [True, False])
def test_array(object_in, dtype, from_numpy, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
# to numpy
if from_numpy:
object_in = np.array(object_in)
# smoke test
ret = ivy.array(object_in, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(call(ivy.array, object_in, dtype, dev), np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
# copy array
@pytest.mark.parametrize(
"x", [[0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
def test_copy_array(x, dtype, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
# smoke test
x = ivy.array(x, dtype, dev)
ret = ivy.copy_array(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(x))
assert id(x) != id(ret)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
# array_equal
@pytest.mark.parametrize(
"x0_n_x1_n_res", [([0.], [0.], True), ([0.], [1.], False),
([[0.], [1.]], [[0.], [1.]], True),
([[0.], [1.]], [[1.], [2.]], False)])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
def test_array_equal(x0_n_x1_n_res, dtype, dev, call):
if call in [helpers.mx_call] and dtype in ['int16', 'bool']:
# mxnet does not support int16, and does not support bool for broadcast_equal method used
pytest.skip()
x0, x1, true_res = x0_n_x1_n_res
# smoke test
x0 = ivy.array(x0, dtype, dev)
x1 = ivy.array(x1, dtype, dev)
res = ivy.array_equal(x0, x1)
# type test
assert ivy.is_array(x0)
assert ivy.is_array(x1)
assert isinstance(res, bool) or ivy.is_array(res)
# value test
assert res == true_res
# arrays_equal
@pytest.mark.parametrize(
"xs_n_res", [([[[0.], [1.]], [[0.], [1.]], [[1.], [2.]]], False)])
@pytest.mark.parametrize(
"dtype", ['float32'])
def test_arrays_equal(xs_n_res, dtype, dev, call):
xs, true_res = xs_n_res
# smoke test
x0 = ivy.array(xs[0], dtype, dev)
x1 = ivy.array(xs[1], dtype, dev)
x2 = ivy.array(xs[2], dtype, dev)
res = ivy.arrays_equal([x0, x1, x2])
# type test
assert ivy.is_array(x0)
assert ivy.is_array(x1)
assert ivy.is_array(x2)
assert isinstance(res, bool) or ivy.is_array(res)
# value test
assert res == true_res
# equal
@pytest.mark.parametrize(
"x0_n_x1_n_x2_em_n_res", [([0.], [0.], [0.], False, True),
([0.], [1.], [0.], False, False),
([0.], [1.], [0.], True, [[True, False, True],
[False, True, False],
[True, False, True]]),
({'a': 0}, {'a': 0}, {'a': 1}, True, [[True, True, False],
[True, True, False],
[False, False, True]])])
@pytest.mark.parametrize(
"to_array", [True, False])
def test_equal(x0_n_x1_n_x2_em_n_res, to_array, dev, call):
x0, x1, x2, equality_matrix, true_res = x0_n_x1_n_x2_em_n_res
# smoke test
if isinstance(x0, list) and to_array:
x0 = ivy.array(x0, dev=dev)
x1 = ivy.array(x1, dev=dev)
x2 = ivy.array(x2, dev=dev)
res = ivy.all_equal(x0, x1, x2, equality_matrix=equality_matrix)
# value test
if equality_matrix:
assert np.array_equal(ivy.to_numpy(res), np.array(true_res))
else:
assert res == true_res
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support variable number of input arguments
return
# to_numpy
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_numpy(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_numpy() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_numpy(tensor_fn(object_in, dtype, dev))
# type test
assert isinstance(ret, np.ndarray)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(ivy.to_numpy(tensor_fn(object_in, dtype, dev)), np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
# to_scalar
@pytest.mark.parametrize(
"object_in", [[0.], [[[1]]], [True], [[1.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_scalar(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_scalar() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_scalar(tensor_fn(object_in, dtype, dev))
true_val = ivy.to_numpy(ivy.array(object_in, dtype=dtype)).item()
# type test
assert isinstance(ret, type(true_val))
# value test
assert ivy.to_scalar(tensor_fn(object_in, dtype, dev)) == true_val
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support scalar conversion
return
# to_list
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_list(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_list() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_list(tensor_fn(object_in, dtype, dev))
# type test
assert isinstance(ret, list)
# cardinality test
assert _get_shape_of_list(ret) == _get_shape_of_list(object_in)
# value test
assert np.allclose(np.asarray(ivy.to_list(tensor_fn(object_in, dtype, dev))),
np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support list conversion
return
# shape
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"as_tensor", [None, True, False])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_shape(object_in, dtype, as_tensor, tensor_fn, dev, call):
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.shape(tensor_fn(object_in, dtype, dev), as_tensor)
# type test
if as_tensor:
assert ivy.is_array(ret)
else:
assert isinstance(ret, tuple)
ret = ivy.array(ret)
# cardinality test
assert ret.shape[0] == len(np.asarray(object_in).shape)
# value test
assert np.array_equal(ivy.to_numpy(ret), np.asarray(np.asarray(object_in).shape, np.int32))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# get_num_dims
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"as_tensor", [None, True, False])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_get_num_dims(object_in, dtype, as_tensor, tensor_fn, dev, call):
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.get_num_dims(tensor_fn(object_in, dtype, dev), as_tensor)
# type test
if as_tensor:
assert ivy.is_array(ret)
else:
assert isinstance(ret, int)
ret = ivy.array(ret)
# cardinality test
assert list(ret.shape) == []
# value test
assert np.array_equal(ivy.to_numpy(ret), np.asarray(len(np.asarray(object_in).shape), np.int32))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# minimum
@pytest.mark.parametrize(
"xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_minimum(xy, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(xy[0], Number) or isinstance(xy[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(xy[0], dtype, dev)
y = tensor_fn(xy[1], dtype, dev)
ret = ivy.minimum(x, y)
# type test
assert ivy.is_array(ret)
# cardinality test
if len(x.shape) > len(y.shape):
assert ret.shape == x.shape
else:
assert ret.shape == y.shape
# value test
assert np.array_equal(call(ivy.minimum, x, y), np.asarray(ivy.functional.backends.numpy.minimum(ivy.to_numpy(x), ivy.to_numpy(y))))
# maximum
@pytest.mark.parametrize(
"xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_maximum(xy, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(xy[0], Number) or isinstance(xy[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(xy[0], dtype, dev)
y = tensor_fn(xy[1], dtype, dev)
ret = ivy.maximum(x, y)
# type test
assert ivy.is_array(ret)
# cardinality test
if len(x.shape) > len(y.shape):
assert ret.shape == x.shape
else:
assert ret.shape == y.shape
# value test
assert np.array_equal(call(ivy.maximum, x, y), np.asarray(ivy.functional.backends.numpy.maximum(ivy.to_numpy(x), ivy.to_numpy(y))))
# clip
@pytest.mark.parametrize(
"x_min_n_max", [(-0.5, 0., 1.5), ([1.7], [0.5], [1.1]), ([[0.8, 2.2], [1.5, 0.2]], 0.2, 1.4),
([[0.8, 2.2], [1.5, 0.2]], [[1., 1.], [1., 1.]], [[1.1, 2.], [1.1, 2.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_clip(x_min_n_max, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_min_n_max[0], Number) or isinstance(x_min_n_max[1], Number) or isinstance(x_min_n_max[2], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_min_n_max[0], dtype, dev)
min_val = tensor_fn(x_min_n_max[1], dtype, dev)
max_val = tensor_fn(x_min_n_max[2], dtype, dev)
if ((min_val.shape != [] and min_val.shape != [1]) or (max_val.shape != [] and max_val.shape != [1]))\
and call in [helpers.mx_call]:
# mxnet only supports numbers or 0 or 1 dimensional arrays for min and max while performing clip
pytest.skip()
ret = ivy.clip(x, min_val, max_val)
# type test
assert ivy.is_array(ret)
# cardinality test
max_shape = max([x.shape, min_val.shape, max_val.shape], key=lambda x_: len(x_))
assert ret.shape == max_shape
# value test
assert np.array_equal(call(ivy.clip, x, min_val, max_val),
np.asarray(ivy.functional.backends.numpy.clip(ivy.to_numpy(x), ivy.to_numpy(min_val), ivy.to_numpy(max_val))))
# clip_vector_norm
# @pytest.mark.parametrize(
# "x_max_norm_n_p_val_clipped",
# [(-0.5, 0.4, 2., -0.4), ([1.7], 1.5, 3., [1.5]),
# ([[0.8, 2.2], [1.5, 0.2]], 4., 1., [[0.6808511, 1.8723406], [1.2765958, 0.17021278]]),
# ([[0.8, 2.2], [1.5, 0.2]], 2.5, 2., [[0.71749604, 1.9731141], [1.345305, 0.17937401]])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_clip_vector_norm(x_max_norm_n_p_val_clipped, dtype, tensor_fn, dev, call):
# # smoke test
# if call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x_max_norm_n_p_val_clipped[0], dtype, dev)
# max_norm = x_max_norm_n_p_val_clipped[1]
# p_val = x_max_norm_n_p_val_clipped[2]
# clipped = x_max_norm_n_p_val_clipped[3]
# ret = ivy.clip_vector_norm(x, max_norm, p_val)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == (x.shape if len(x.shape) else (1,))
# # value test
# assert np.allclose(call(ivy.clip_vector_norm, x, max_norm, p_val), np.array(clipped))
# # compilation test
# if call is helpers.torch_call:
# # pytorch jit cannot compile global variables, in this case MIN_DENOMINATOR
# return
# round
@pytest.mark.parametrize(
"x_n_x_rounded", [(-0.51, -1), ([1.7], [2.]), ([[0.8, 2.2], [1.51, 0.2]], [[1., 2.], [2., 0.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_round(x_n_x_rounded, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_rounded[0], Number) or isinstance(x_n_x_rounded[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_rounded[0], dtype, dev)
ret = ivy.round(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.array_equal(call(ivy.round, x), np.array(x_n_x_rounded[1]))
# floormod
@pytest.mark.parametrize(
"x_n_divisor_n_x_floormod", [(2.5, 2., 0.5), ([10.7], [5.], [0.7]),
([[0.8, 2.2], [1.7, 0.2]], [[0.3, 0.5], [0.4, 0.11]], [[0.2, 0.2], [0.1, 0.09]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_floormod(x_n_divisor_n_x_floormod, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_divisor_n_x_floormod[0], Number) or isinstance(x_n_divisor_n_x_floormod[1], Number) or
isinstance(x_n_divisor_n_x_floormod[2], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_divisor_n_x_floormod[0], dtype, dev)
divisor = ivy.array(x_n_divisor_n_x_floormod[1], dtype, dev)
ret = ivy.floormod(x, divisor)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.floormod, x, divisor), np.array(x_n_divisor_n_x_floormod[2]))
# floor
@pytest.mark.parametrize(
"x_n_x_floored", [(2.5, 2.), ([10.7], [10.]), ([[3.8, 2.2], [1.7, 0.2]], [[3., 2.], [1., 0.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_floor(x_n_x_floored, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_floored[0], Number) or isinstance(x_n_x_floored[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_floored[0], dtype, dev)
ret = ivy.floor(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.floor, x), np.array(x_n_x_floored[1]))
# ceil
@pytest.mark.parametrize(
"x_n_x_ceiled", [(2.5, 3.), ([10.7], [11.]), ([[3.8, 2.2], [1.7, 0.2]], [[4., 3.], [2., 1.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_ceil(x_n_x_ceiled, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_ceiled[0], Number) or isinstance(x_n_x_ceiled[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_ceiled[0], dtype, dev)
ret = ivy.ceil(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.ceil, x), np.array(x_n_x_ceiled[1]))
# abs
@pytest.mark.parametrize(
"x_n_x_absed", [(-2.5, 2.5), ([-10.7], [10.7]), ([[-3.8, 2.2], [1.7, -0.2]], [[3.8, 2.2], [1.7, 0.2]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_abs(x_n_x_absed, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_absed[0], Number) or isinstance(x_n_x_absed[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_absed[0], dtype, dev)
ret = ivy.abs(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.abs, x), np.array(x_n_x_absed[1]))
# argmax
# @pytest.mark.parametrize(
# "x_n_axis_x_argmax", [([-0.3, 0.1], None, [1]), ([[1.3, 2.6], [2.3, 2.5]], 0, [1, 0]),
# ([[1.3, 2.6], [2.3, 2.5]], 1, [1, 1])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_argmax(x_n_axis_x_argmax, dtype, tensor_fn, dev, call):
# # smoke test
# x = ivy.array(x_n_axis_x_argmax[0], dtype, dev)
# axis = x_n_axis_x_argmax[1]
# ret = ivy.argmax(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert tuple(ret.shape) == (len(x.shape),)
# # value test
# assert np.allclose(call(ivy.argmax, x, axis), np.array(x_n_axis_x_argmax[2]))
# argmin
@pytest.mark.parametrize(
"x_n_axis_x_argmin", [([-0.3, 0.1], None, [0]), ([[1.3, 2.6], [2.3, 2.5]], 0, [0, 1]),
([[1.3, 2.6], [2.3, 2.5]], 1, [0, 0])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_argmin(x_n_axis_x_argmin, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x_n_axis_x_argmin[0], dtype, dev)
axis = x_n_axis_x_argmin[1]
ret = ivy.argmin(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert tuple(ret.shape) == (len(x.shape),)
# value test
assert np.allclose(call(ivy.argmin, x, axis), np.array(x_n_axis_x_argmin[2]))
# argsort
# @pytest.mark.parametrize(
# "x_n_axis_x_argsort", [([1, 10, 26.9, 2.8, 166.32, 62.3], -1, [0, 3, 1, 2, 5, 4])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_argsort(x_n_axis_x_argsort, dtype, tensor_fn, dev, call):
# # smoke test
# x = tensor_fn(x_n_axis_x_argsort[0], dtype, dev)
# axis = x_n_axis_x_argsort[1]
# ret = ivy.argsort(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert tuple(ret.shape) == (6,)
# # value test
# assert np.allclose(call(ivy.argsort, x, axis), np.array(x_n_axis_x_argsort[2]))
# arange
@pytest.mark.parametrize(
"stop_n_start_n_step", [[10, None, None], [10, 2, None], [10, 2, 2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_arange(stop_n_start_n_step, dtype, tensor_fn, dev, call):
# smoke test
stop, start, step = stop_n_start_n_step
if (isinstance(stop, Number) or isinstance(start, Number) or isinstance(step, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
if tensor_fn == helpers.var_fn and call is helpers.torch_call:
# pytorch does not support arange using variables as input
pytest.skip()
args = list()
if stop:
stop = tensor_fn(stop, dtype, dev)
args.append(stop)
if start:
start = tensor_fn(start, dtype, dev)
args.append(start)
if step:
step = tensor_fn(step, dtype, dev)
args.append(step)
ret = ivy.arange(*args, dtype=dtype, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (int((ivy.to_list(stop) -
(ivy.to_list(start) if start else 0))/(ivy.to_list(step) if step else 1)),)
# value test
assert np.array_equal(call(ivy.arange, *args, dtype=dtype, dev=dev),
np.asarray(ivy.functional.backends.numpy.arange(*[ivy.to_numpy(arg) for arg in args], dtype=dtype)))
# linspace
@pytest.mark.parametrize(
"start_n_stop_n_num_n_axis", [[1, 10, 100, None], [[[0., 1., 2.]], [[1., 2., 3.]], 150, -1],
[[[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], 6, -2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_linspace(start_n_stop_n_num_n_axis, dtype, tensor_fn, dev, call):
# smoke test
start, stop, num, axis = start_n_stop_n_num_n_axis
if (isinstance(start, Number) or isinstance(stop, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
start = tensor_fn(start, dtype, dev)
stop = tensor_fn(stop, dtype, dev)
ret = ivy.linspace(start, stop, num, axis, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
target_shape = list(start.shape)
target_shape.insert(axis + 1 if (axis and axis != -1) else len(target_shape), num)
assert ret.shape == tuple(target_shape)
# value test
assert np.allclose(call(ivy.linspace, start, stop, num, axis, dev=dev),
np.asarray(ivy.functional.backends.numpy.linspace(ivy.to_numpy(start), ivy.to_numpy(stop), num, axis)))
# logspace
@pytest.mark.parametrize(
"start_n_stop_n_num_n_base_n_axis", [[1, 10, 100, 10., None], [[[0., 1., 2.]], [[1., 2., 3.]], 150, 2., -1],
[[[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], 6, 5., -2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_logspace(start_n_stop_n_num_n_base_n_axis, dtype, tensor_fn, dev, call):
# smoke test
start, stop, num, base, axis = start_n_stop_n_num_n_base_n_axis
if (isinstance(start, Number) or isinstance(stop, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
start = tensor_fn(start, dtype, dev)
stop = tensor_fn(stop, dtype, dev)
ret = ivy.logspace(start, stop, num, base, axis, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
target_shape = list(start.shape)
target_shape.insert(axis + 1 if (axis and axis != -1) else len(target_shape), num)
assert ret.shape == tuple(target_shape)
# value test
assert np.allclose(call(ivy.logspace, start, stop, num, base, axis, dev=dev),
ivy.functional.backends.numpy.logspace(ivy.to_numpy(start), ivy.to_numpy(stop), num, base, axis))
# concatenate
@pytest.mark.parametrize(
"x1_n_x2_n_axis", [(1, 10, 0), ([[0., 1., 2.]], [[1., 2., 3.]], 0), ([[0., 1., 2.]], [[1., 2., 3.]], 1),
([[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_concatenate(x1_n_x2_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x1, x2, axis = x1_n_x2_n_axis
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.concatenate((x1, x2), axis)
# type test
assert ivy.is_array(ret)
# cardinality test
axis_val = (axis % len(x1.shape) if (axis is not None and len(x1.shape) != 0) else len(x1.shape) - 1)
if x1.shape == ():
expected_shape = (2,)
else:
expected_shape = tuple([item * 2 if i == axis_val else item for i, item in enumerate(x1.shape)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.concatenate, [x1, x2], axis),
np.asarray(ivy.functional.backends.numpy.concatenate([ivy.to_numpy(x1), ivy.to_numpy(x2)], axis)))
# flip
# @pytest.mark.parametrize(
# "x_n_axis_n_bs", [(1, 0, None), ([[0., 1., 2.]], None, (1, 3)), ([[0., 1., 2.]], 1, (1, 3)),
# ([[[-0.1471, 0.4477, 0.2214]]], None, None)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_flip(x_n_axis_n_bs, dtype, tensor_fn, dev, call):
# # smoke test
# x, axis, bs = x_n_axis_n_bs
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.flip(x, axis, bs)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == x.shape
# # value test
# assert np.allclose(call(ivy.flip, x, axis, bs), np.asarray(ivy.functional.backends.numpy.flip(ivy.to_numpy(x), axis, bs)))
# stack
# @pytest.mark.parametrize(
# "xs_n_axis", [((1, 0), -1), (([[0., 1., 2.]], [[3., 4., 5.]]), 0), (([[0., 1., 2.]], [[3., 4., 5.]]), 1)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_stack(xs_n_axis, dtype, tensor_fn, dev, call):
# # smoke test
# (x1, x2), axis = xs_n_axis
# if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x1 = tensor_fn(x1, dtype, dev)
# x2 = tensor_fn(x2, dtype, dev)
# ret = ivy.stack((x1, x2), axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# axis_val = (axis % len(x1.shape) if (axis is not None and len(x1.shape) != 0) else len(x1.shape) - 1)
# if x1.shape == ():
# expected_shape = (2,)
# else:
# expected_shape = list(x1.shape)
# expected_shape.insert(axis_val, 2)
# assert ret.shape == tuple(expected_shape)
# # value test
# assert np.allclose(call(ivy.stack, (x1, x2), axis),
# np.asarray(ivy.functional.backends.numpy.stack((ivy.to_numpy(x1), ivy.to_numpy(x2)), axis)))
# unstack
@pytest.mark.parametrize(
"x_n_axis", [(1, -1), ([[0., 1., 2.]], 0), ([[0., 1., 2.]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_unstack(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.unstack(x, axis)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (axis % len(x.shape) if (axis is not None and len(x.shape) != 0) else len(x.shape) - 1)
if x.shape == ():
expected_shape = ()
else:
expected_shape = list(x.shape)
expected_shape.pop(axis_val)
assert ret[0].shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.unstack, x, axis), np.asarray(ivy.functional.backends.numpy.unstack(ivy.to_numpy(x), axis)))
# split
@pytest.mark.parametrize(
"x_n_noss_n_axis_n_wr", [(1, 1, -1, False),
([[0., 1., 2., 3.]], 2, 1, False),
([[0., 1., 2.], [3., 4., 5.]], 2, 0, False),
([[0., 1., 2.], [3., 4., 5.]], 2, 1, True),
([[0., 1., 2.], [3., 4., 5.]], [2, 1], 1, False)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_split(x_n_noss_n_axis_n_wr, dtype, tensor_fn, dev, call):
# smoke test
x, num_or_size_splits, axis, with_remainder = x_n_noss_n_axis_n_wr
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.split(x, num_or_size_splits, axis, with_remainder)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (axis % len(x.shape) if (axis is not None and len(x.shape) != 0) else len(x.shape) - 1)
if x.shape == ():
expected_shape = ()
elif isinstance(num_or_size_splits, int):
expected_shape = tuple([math.ceil(item/num_or_size_splits) if i == axis_val else item
for i, item in enumerate(x.shape)])
else:
expected_shape = tuple([num_or_size_splits[0] if i == axis_val else item for i, item in enumerate(x.shape)])
assert ret[0].shape == expected_shape
# value test
pred_split = call(ivy.split, x, num_or_size_splits, axis, with_remainder)
true_split = ivy.functional.backends.numpy.split(ivy.to_numpy(x), num_or_size_splits, axis, with_remainder)
for pred, true in zip(pred_split, true_split):
assert np.allclose(pred, true)
# compilation test
if call is helpers.torch_call:
# pytorch scripting does not support Union or Numbers for type hinting
return
# repeat
@pytest.mark.parametrize(
"x_n_reps_n_axis", [(1, [1], 0), (1, 2, -1), (1, [2], None), ([[0., 1., 2., 3.]], (2, 1, 0, 3), -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_repeat(x_n_reps_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, reps_raw, axis = x_n_reps_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
if not isinstance(reps_raw, int) and call is helpers.mx_call:
# mxnet repeat only supports integer repeats
pytest.skip()
x = tensor_fn(x, dtype, dev)
x_shape = list(x.shape)
if call not in [helpers.jnp_call, helpers.torch_call]:
# jax and pytorch repeat do not support repeats specified as lists
ret_from_list = ivy.repeat(x, reps_raw, axis)
reps = ivy.array(reps_raw, 'int32', dev)
if call is helpers.mx_call:
# mxnet only supports repeats defined as as int
ret = ivy.repeat(x, reps_raw, axis)
else:
ret = ivy.repeat(x, reps, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
if x.shape == ():
expected_shape = [reps_raw] if isinstance(reps_raw, int) else list(reps_raw)
else:
axis_wrapped = axis % len(x_shape)
expected_shape = x_shape[0:axis_wrapped] + [sum(reps_raw)] + x_shape[axis_wrapped+1:]
assert list(ret.shape) == expected_shape
# value test
if call is helpers.mx_call:
# mxnet only supports repeats defined as as int
assert np.allclose(call(ivy.repeat, x, reps_raw, axis),
np.asarray(ivy.functional.backends.numpy.repeat(ivy.to_numpy(x), ivy.to_numpy(reps), axis)))
else:
assert np.allclose(call(ivy.repeat, x, reps, axis),
np.asarray(ivy.functional.backends.numpy.repeat(ivy.to_numpy(x), ivy.to_numpy(reps), axis)))
# tile
# @pytest.mark.parametrize(
# "x_n_reps", [(1, [1]), (1, 2), (1, [2]), ([[0., 1., 2., 3.]], (2, 1))])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_tile(x_n_reps, dtype, tensor_fn, dev, call):
# # smoke test
# x, reps_raw = x_n_reps
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret_from_list = ivy.tile(x, reps_raw)
# reps = ivy.array(reps_raw, 'int32', dev)
# ret = ivy.tile(x, reps)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# if x.shape == ():
# expected_shape = tuple(reps_raw) if isinstance(reps_raw, list) else (reps_raw,)
# else:
# expected_shape = tuple([int(item * rep) for item, rep in zip(x.shape, reps_raw)])
# assert ret.shape == expected_shape
# # value test
# assert np.allclose(call(ivy.tile, x, reps),
# np.asarray(ivy.functional.backends.numpy.tile(ivy.to_numpy(x), ivy.to_numpy(reps))))
# zero_pad
@pytest.mark.parametrize(
"x_n_pw", [(1, [[1, 1]]), (1, [[0, 0]]), ([[0., 1., 2., 3.]], [[0, 1], [1, 2]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zero_pad(x_n_pw, dtype, tensor_fn, dev, call):
# smoke test
x, pw_raw = x_n_pw
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.zero_pad(x, pw_raw)
pw = ivy.array(pw_raw, 'int32', dev)
ret = ivy.zero_pad(x, pw)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else x.shape
expected_shape = tuple([int(item + pw_[0] + pw_[1]) for item, pw_ in zip(x_shape, pw_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.zero_pad, x, pw), ivy.functional.backends.numpy.zero_pad(ivy.to_numpy(x), ivy.to_numpy(pw)))
# fourier_encode
# @pytest.mark.parametrize(
# "x_n_mf_n_nb_n_gt", [([2.], 4., 4, [[2.0000000e+00, 1.7484555e-07, 9.9805772e-01,-5.2196848e-01,
# 3.4969111e-07, 1.0000000e+00, -6.2295943e-02, -8.5296476e-01, 1.0000000e+00]]),
# ([[1., 2.], [3., 4.], [5., 6.]], [2., 4.], 4,
# [[[1.0000000e+00, -8.7422777e-08, -8.7422777e-08, -8.7422777e-08,
# -8.7422777e-08, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
# -1.0000000e+00],
# [2.0000000e+00, 1.7484555e-07, 9.9805772e-01, -5.2196848e-01,
# -6.0398321e-07, 1.0000000e+00, -6.2295943e-02, -8.5296476e-01,
# 1.0000000e+00]],
# [[3.0000000e+00, -2.3849761e-08, -2.3849761e-08, -2.3849761e-08,
# -2.3849761e-08, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
# -1.0000000e+00],
# [4.0000000e+00, 3.4969111e-07, -1.2434989e-01, 8.9044148e-01,
# -1.2079664e-06, 1.0000000e+00, -9.9223840e-01, 4.5509776e-01,
# 1.0000000e+00]],
# [[5.0000000e+00, -6.7553248e-07, -6.7553248e-07, -6.7553248e-07,
# -6.7553248e-07, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
# -1.0000000e+00],
# [6.0000000e+00, 4.7699523e-08, -9.8256493e-01, -9.9706185e-01,
# -3.7192983e-06, 1.0000000e+00, 1.8591987e-01, 7.6601014e-02,
# 1.0000000e+00]]])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_fourier_encode(x_n_mf_n_nb_n_gt, dtype, tensor_fn, dev, call):
# # smoke test
# x, max_freq, num_bands, ground_truth = x_n_mf_n_nb_n_gt
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# if isinstance(max_freq, list):
# max_freq = tensor_fn(max_freq, dtype, dev)
# ret = ivy.fourier_encode(x, max_freq, num_bands)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# x_shape = [1] if x.shape == () else list(x.shape)
# expected_shape = x_shape + [1 + 2*num_bands]
# assert list(ret.shape) == expected_shape
# # value test
# assert np.allclose(call(ivy.fourier_encode, x, max_freq, num_bands), np.array(ground_truth), atol=1e-5)
# constant_pad
@pytest.mark.parametrize(
"x_n_pw_n_val", [(1, [[1, 1]], 1.5), (1, [[0, 0]], -2.7), ([[0., 1., 2., 3.]], [[0, 1], [1, 2]], 11.)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_constant_pad(x_n_pw_n_val, dtype, tensor_fn, dev, call):
# smoke test
x, pw_raw, val = x_n_pw_n_val
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.constant_pad(x, pw_raw, val)
pw = ivy.array(pw_raw, 'int32', dev)
ret = ivy.constant_pad(x, pw, val)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else x.shape
expected_shape = tuple([int(item + pw_[0] + pw_[1]) for item, pw_ in zip(x_shape, pw_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.constant_pad, x, pw, val),
np.asarray(ivy.functional.backends.numpy.constant_pad(ivy.to_numpy(x), ivy.to_numpy(pw), val)))
# swapaxes
@pytest.mark.parametrize(
"x_n_ax0_n_ax1", [([[1.]], 0, 1), ([[0., 1., 2., 3.]], 1, 0), ([[[0., 1., 2.], [3., 4., 5.]]], -2, -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_swapaxes(x_n_ax0_n_ax1, dtype, tensor_fn, dev, call):
# smoke test
x, ax0, ax1 = x_n_ax0_n_ax1
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.swapaxes(x, ax0, ax1)
# type test
assert ivy.is_array(ret)
# cardinality test
expected_shape = list(x.shape)
expected_shape[ax0], expected_shape[ax1] = expected_shape[ax1], expected_shape[ax0]
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.swapaxes, x, ax0, ax1),
np.asarray(ivy.functional.backends.numpy.swapaxes(ivy.to_numpy(x), ax0, ax1)))
# transpose
@pytest.mark.parametrize(
"x_n_axes", [([[1.]], [1, 0]), ([[0., 1., 2., 3.]], [1, 0]), ([[[0., 1., 2.], [3., 4., 5.]]], [0, 2, 1])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_transpose(x_n_axes, dtype, tensor_fn, dev, call):
# smoke test
x, axes = x_n_axes
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.transpose(x, axes)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = x.shape
assert ret.shape == tuple([x.shape[idx] for idx in axes])
# value test
assert np.allclose(call(ivy.transpose, x, axes), np.asarray(ivy.functional.backends.numpy.transpose(ivy.to_numpy(x), axes)))
# expand_dims
@pytest.mark.parametrize(
"x_n_axis", [(1., 0), (1., -1), ([1.], 0), ([[0., 1., 2., 3.]], -2), ([[[0., 1., 2.], [3., 4., 5.]]], -3)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_expand_dims(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.expand_dims(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
expected_shape = list(x.shape)
expected_shape.insert(axis, 1)
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.expand_dims, x, axis), np.asarray(ivy.functional.backends.numpy.expand_dims(ivy.to_numpy(x), axis)))
# where
@pytest.mark.parametrize(
"cond_n_x1_n_x2", [(True, 2., 3.), (0., 2., 3.), ([True], [2.], [3.]), ([[0.]], [[2., 3.]], [[4., 5.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_where(cond_n_x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
cond, x1, x2 = cond_n_x1_n_x2
if (isinstance(cond, Number) or isinstance(x1, Number) or isinstance(x2, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
cond = tensor_fn(cond, dtype, dev)
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.where(cond, x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape
# value test
assert np.allclose(call(ivy.where, cond, x1, x2),
np.asarray(ivy.functional.backends.numpy.where(ivy.to_numpy(cond), ivy.to_numpy(x1), ivy.to_numpy(x2))))
# indices_where
@pytest.mark.parametrize(
"x", [[True], [[0., 1.], [2., 3.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_indices_where(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.indices_where(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert len(ret.shape) == 2
assert ret.shape[-1] == len(x.shape)
# value test
assert np.allclose(call(ivy.indices_where, x), np.asarray(ivy.functional.backends.numpy.indices_where(ivy.to_numpy(x))))
# isnan
@pytest.mark.parametrize(
"x_n_res", [([True], [False]),
([[0., float('nan')], [float('nan'), 3.]],
[[False, True], [True, False]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isnan(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isnan(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isnan, x), res)
# isinf
@pytest.mark.parametrize(
"x_n_res", [([True], [False]),
([[0., float('inf')], [float('nan'), -float('inf')]],
[[False, True], [False, True]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isinf(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isinf(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isinf, x), res)
# isfinite
@pytest.mark.parametrize(
"x_n_res", [([True], [True]),
([[0., float('inf')], [float('nan'), 3.]],
[[True, False], [False, True]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isfinite(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isfinite(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isfinite, x), res)
# reshape
@pytest.mark.parametrize(
"x_n_shp", [(1., (1, 1)), (1., 1), (1., []), ([[1.]], []), ([[0., 1.], [2., 3.]], (1, 4, 1))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_reshape(x_n_shp, dtype, tensor_fn, dev, call):
# smoke test
x, new_shape = x_n_shp
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.reshape(x, new_shape)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == ((new_shape,) if isinstance(new_shape, int) else tuple(new_shape))
# value test
assert np.allclose(call(ivy.reshape, x, new_shape), np.asarray(ivy.functional.backends.numpy.reshape(ivy.to_numpy(x), new_shape)))
# broadcast_to
@pytest.mark.parametrize(
"x_n_shp", [([1.], (2, 1)), ([[0., 1.], [2., 3.]], (10, 2, 2))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_broadcast_to(x_n_shp, dtype, tensor_fn, dev, call):
# smoke test
x, new_shape = x_n_shp
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.broadcast_to(x, new_shape)
# type test
assert ivy.is_array(ret)
# cardinality test
assert len(ret.shape) == len(new_shape)
# value test
assert np.allclose(call(ivy.broadcast_to, x, new_shape),
np.asarray(ivy.functional.backends.numpy.broadcast_to(ivy.to_numpy(x), new_shape)))
# squeeze
# @pytest.mark.parametrize(
# "x_n_axis", [(1., 0), (1., -1), ([[1.]], None), ([[[0.], [1.]], [[2.], [3.]]], -1)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_squeeze(x_n_axis, dtype, tensor_fn, dev, call):
# # smoke test
# x, axis = x_n_axis
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.squeeze(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# if axis is None:
# expected_shape = [item for item in x.shape if item != 1]
# elif x.shape == ():
# expected_shape = []
# else:
# expected_shape = list(x.shape)
# expected_shape.pop(axis)
# assert ret.shape == tuple(expected_shape)
# # value test
# assert np.allclose(call(ivy.squeeze, x, axis), np.asarray(ivy.functional.backends.numpy.squeeze(ivy.to_numpy(x), axis)))
# zeros
# @pytest.mark.parametrize(
# "shape", [(), (1, 2, 3), tuple([1]*10)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_zeros(shape, dtype, tensor_fn, dev, call):
# # smoke test
# ret = ivy.zeros(shape, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == tuple(shape)
# # value test
# assert np.allclose(call(ivy.zeros, shape, dtype, dev), np.asarray(ivy.functional.backends.numpy.zeros(shape, dtype)))
# zeros_like
@pytest.mark.parametrize(
"x", [1, [1], [[1], [2], [3]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zeros_like(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.zeros_like(x, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.zeros_like, x, dtype, dev),
np.asarray(ivy.functional.backends.numpy.zeros_like(ivy.to_numpy(x), dtype)))
# ones
# @pytest.mark.parametrize(
# "shape", [(), (1, 2, 3), tuple([1]*10)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_ones(shape, dtype, tensor_fn, dev, call):
# # smoke test
# ret = ivy.ones(shape, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == tuple(shape)
# # value test
# assert np.allclose(call(ivy.ones, shape, dtype, dev), np.asarray(ivy.functional.backends.numpy.ones(shape, dtype)))
# ones_like
# @pytest.mark.parametrize(
# "x", [1, [1], [[1], [2], [3]]])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_ones_like(x, dtype, tensor_fn, dev, call):
# # smoke test
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.ones_like(x, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == x.shape
# # value test
# assert np.allclose(call(ivy.ones_like, x, dtype, dev),
# np.asarray(ivy.functional.backends.numpy.ones_like(ivy.to_numpy(x), dtype)))
# full
# @pytest.mark.parametrize(
# "shape", [(), (1, 2, 3), tuple([1]*10)])
# @pytest.mark.parametrize(
# "fill_val", [2., -7.])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_full(shape, fill_val, dtype, tensor_fn, dev, call):
# # smoke test
# ret = ivy.full(shape, fill_val, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == tuple(shape)
# # value test
# assert np.allclose(call(ivy.full, shape, fill_val, dtype, dev),
# np.asarray(ivy.functional.backends.numpy.full(shape, fill_val, dtype)))
# one_hot
@pytest.mark.parametrize(
"ind_n_depth", [([0], 1), ([0, 1, 2], 3), ([[1, 3], [0, 0], [8, 4], [7, 9]], 10)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_one_hot(ind_n_depth, dtype, tensor_fn, dev, call):
# smoke test
ind, depth = ind_n_depth
if isinstance(ind, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ind = ivy.array(ind, 'int32', dev)
ret = ivy.one_hot(ind, depth, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == ind.shape + (depth,)
# value test
assert np.allclose(call(ivy.one_hot, ind, depth, dev),
np.asarray(ivy.functional.backends.numpy.one_hot(ivy.to_numpy(ind), depth)))
# cross
@pytest.mark.parametrize(
"x1_n_x2", [([0., 1., 2.], [3., 4., 5.]), ([[0., 1., 2.], [2., 1., 0.]], [[3., 4., 5.], [5., 4., 3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cross(x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
x1, x2 = x1_n_x2
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = ivy.array(x1, dtype, dev)
x2 = ivy.array(x2, dtype, dev)
ret = ivy.cross(x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape
# value test
assert np.allclose(call(ivy.cross, x1, x2), np.asarray(ivy.functional.backends.numpy.cross(ivy.to_numpy(x1), ivy.to_numpy(x2))))
# matmul
@pytest.mark.parametrize(
"x1_n_x2", [([[0., 1., 2.]], [[3.], [4.], [5.]]), ([[0., 1., 2.], [2., 1., 0.]], [[3., 4.], [5., 5.], [4., 3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_matmul(x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
x1, x2 = x1_n_x2
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = ivy.array(x1, dtype, dev)
x2 = ivy.array(x2, dtype, dev)
ret = ivy.matmul(x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape[:-1] + (x2.shape[-1],)
# value test
assert np.allclose(call(ivy.matmul, x1, x2), np.asarray(ivy.functional.backends.numpy.matmul(ivy.to_numpy(x1), ivy.to_numpy(x2))))
# cumsum
@pytest.mark.parametrize(
"x_n_axis", [([[0., 1., 2.]], -1), ([[0., 1., 2.], [2., 1., 0.]], 0), ([[0., 1., 2.], [2., 1., 0.]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cumsum(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, dev)
ret = ivy.cumsum(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cumsum, x, axis), np.asarray(ivy.functional.backends.numpy.cumsum(ivy.to_numpy(x), axis)))
# cumprod
@pytest.mark.parametrize(
"x_n_axis", [([[0., 1., 2.]], -1), ([[0., 1., 2.], [2., 1., 0.]], 0), ([[0., 1., 2.], [2., 1., 0.]], 1)])
@pytest.mark.parametrize(
"exclusive", [True, False])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cumprod(x_n_axis, exclusive, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, dev)
ret = ivy.cumprod(x, axis, exclusive)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cumprod, x, axis, exclusive),
np.asarray(ivy.functional.backends.numpy.cumprod(ivy.to_numpy(x), axis, exclusive)))
# identity
@pytest.mark.parametrize(
"dim_n_bs", [(3, None), (1, (2, 3)), (5, (1, 2, 3))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_identity(dim_n_bs, dtype, tensor_fn, dev, call):
# smoke test
dim, bs = dim_n_bs
ret = ivy.identity(dim, dtype, bs, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (tuple(bs) if bs else ()) + (dim, dim)
# value test
assert np.allclose(call(ivy.identity, dim, dtype, bs, dev),
np.asarray(ivy.functional.backends.numpy.identity(dim, dtype, bs)))
# meshgrid
@pytest.mark.parametrize(
"xs", [([1, 2, 3], [4, 5, 6]), ([1, 2, 3], [4, 5, 6, 7], [8, 9])])
@pytest.mark.parametrize(
"indexing", ['xy', 'ij'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_meshgrid(xs, indexing, dtype, tensor_fn, dev, call):
# smoke test
xs_as_arrays = [ivy.array(x, 'int32', dev) for x in xs]
rets = ivy.meshgrid(*xs_as_arrays, indexing=indexing)
# type test
for ret in rets:
assert ivy.is_array(ret)
# cardinality test
target_shape = tuple([len(x) for x in xs])
if indexing == 'xy':
target_shape = (target_shape[1], target_shape[0]) + target_shape[2:]
for ret in rets:
assert ret.shape == target_shape
# value test
assert np.allclose(
call(ivy.meshgrid, *xs_as_arrays, indexing=indexing),
[np.asarray(i) for i in ivy.functional.backends.numpy.meshgrid(*[ivy.to_numpy(x) for x in xs_as_arrays], indexing=indexing)])
# scatter_flat
@pytest.mark.parametrize(
"inds_n_upd_n_size_n_tnsr_n_wdup", [([0, 4, 1, 2], [1, 2, 3, 4], 8, None, False),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], 8, None, True),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], None, [11, 10, 9, 8, 7, 6], True)])
@pytest.mark.parametrize(
"red", ['sum', 'min', 'max', 'replace'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_flat(inds_n_upd_n_size_n_tnsr_n_wdup, red, dtype, tensor_fn, dev, call):
# smoke test
if red in ('sum', 'min', 'max') and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, size, tensor, with_duplicates = inds_n_upd_n_size_n_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, 'int32', dev)
upd = tensor_fn(upd, dtype, dev)
if tensor:
# pytorch variables do not support in-place updates
tensor = ivy.array(tensor, dtype, dev) if ivy.current_framework_str() == 'torch'\
else tensor_fn(tensor, dtype, dev)
ret = ivy.scatter_flat(inds, upd, size, tensor, red, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
if size:
assert ret.shape == (size,)
else:
assert ret.shape == tensor.shape
# value test
if red == 'replace' and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
assert np.allclose(call(ivy.scatter_flat, inds, upd, size, tensor, red, dev),
np.asarray(ivy.functional.backends.numpy.scatter_flat(
ivy.to_numpy(inds), ivy.to_numpy(upd), size,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor, red)))
# scatter_nd
@pytest.mark.parametrize(
"inds_n_upd_n_shape_tnsr_n_wdup",
[([[4], [3], [1], [7]], [9, 10, 11, 12], [8], None, False), ([[0, 1, 2]], [1], [3, 3, 3], None, False),
([[0], [2]], [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]], [4, 4, 4], None, False),
([[0, 1, 2]], [1], None, [[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[4, 5, 6], [7, 8, 9], [1, 2, 3]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]]], False)])
@pytest.mark.parametrize(
"red", ['sum', 'min', 'max', 'replace'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_nd(inds_n_upd_n_shape_tnsr_n_wdup, red, dtype, tensor_fn, dev, call):
# smoke test
if red in ('sum', 'min', 'max') and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, shape, tensor, with_duplicates = inds_n_upd_n_shape_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, 'int32', dev)
upd = tensor_fn(upd, dtype, dev)
if tensor:
# pytorch variables do not support in-place updates
tensor = ivy.array(tensor, dtype, dev) if ivy.current_framework_str() == 'torch'\
else tensor_fn(tensor, dtype, dev)
ret = ivy.scatter_nd(inds, upd, shape, tensor, red, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
if shape:
assert tuple(ret.shape) == tuple(shape)
else:
assert tuple(ret.shape) == tuple(tensor.shape)
# value test
if red == 'replace' and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
ret = call(ivy.scatter_nd, inds, upd, shape, tensor, red, dev)
true = np.asarray(ivy.functional.backends.numpy.scatter_nd(
ivy.to_numpy(inds), ivy.to_numpy(upd), shape,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor, red))
assert np.allclose(ret, true)
# gather
@pytest.mark.parametrize(
"prms_n_inds_n_axis", [([9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [0, 4, 7], 0),
([[1, 2], [3, 4]], [[0, 0], [1, 0]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_gather(prms_n_inds_n_axis, dtype, tensor_fn, dev, call):
# smoke test
prms, inds, axis = prms_n_inds_n_axis
prms = tensor_fn(prms, dtype, dev)
inds = ivy.array(inds, 'int32', dev)
ret = ivy.gather(prms, inds, axis, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == inds.shape
# value test
assert np.allclose(call(ivy.gather, prms, inds, axis, dev),
np.asarray(ivy.functional.backends.numpy.gather(ivy.to_numpy(prms), ivy.to_numpy(inds), axis)))
# gather_nd
@pytest.mark.parametrize(
"prms_n_inds", [([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[0, 1], [1, 0]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1]], [[1, 0]]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1, 0]], [[1, 0, 1]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_gather_nd(prms_n_inds, dtype, tensor_fn, dev, call):
# smoke test
prms, inds = prms_n_inds
prms = tensor_fn(prms, dtype, dev)
inds = ivy.array(inds, 'int32', dev)
ret = ivy.gather_nd(prms, inds, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == inds.shape[:-1] + prms.shape[inds.shape[-1]:]
# value test
assert np.allclose(call(ivy.gather_nd, prms, inds, dev),
np.asarray(ivy.functional.backends.numpy.gather_nd(ivy.to_numpy(prms), ivy.to_numpy(inds))))
# linear_resample
@pytest.mark.parametrize(
"x_n_samples_n_axis_n_y_true", [([[10., 9., 8.]], 9, -1, [[10., 9.75, 9.5, 9.25, 9., 8.75, 8.5, 8.25, 8.]]),
([[[10., 9.], [8., 7.]]], 5, -2,
[[[10., 9.], [9.5, 8.5], [9., 8.], [8.5, 7.5], [8., 7.]]]),
([[[10., 9.], [8., 7.]]], 5, -1,
[[[10., 9.75, 9.5, 9.25, 9.], [8., 7.75, 7.5, 7.25, 7.]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_linear_resample(x_n_samples_n_axis_n_y_true, dtype, tensor_fn, dev, call):
# smoke test
x, samples, axis, y_true = x_n_samples_n_axis_n_y_true
x = tensor_fn(x, dtype, dev)
ret = ivy.linear_resample(x, samples, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = list(x.shape)
num_x_dims = len(x_shape)
axis = axis % num_x_dims
x_pre_shape = x_shape[0:axis]
num_vals = x.shape[axis]
x_post_shape = x_shape[axis+1:]
assert list(ret.shape) == x_pre_shape + [samples] + x_post_shape
# value test
y_true = np.array(y_true)
y = call(ivy.linear_resample, x, samples, axis)
assert np.allclose(y, y_true)
# exists
@pytest.mark.parametrize(
"x", [[1.], None, [[10., 9., 8.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_exists(x, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev) if x is not None else None
ret = ivy.exists(x)
# type test
assert isinstance(ret, bool)
# value test
y_true = x is not None
assert ret == y_true
# default
@pytest.mark.parametrize(
"x_n_dv", [([1.], [2.]), (None, [2.]), ([[10., 9., 8.]], [2.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_default(x_n_dv, dtype, tensor_fn, dev, call):
x, dv = x_n_dv
# smoke test
x = tensor_fn(x, dtype, dev) if x is not None else None
dv = tensor_fn(dv, dtype, dev)
ret = ivy.default(x, dv)
# type test
assert ivy.is_array(ret)
# value test
y_true = ivy.to_numpy(x if x is not None else dv)
assert np.allclose(call(ivy.default, x, dv), y_true)
# dtype bits
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ivy.all_dtype_strs)
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_bits(x, dtype, tensor_fn, dev, call):
# smoke test
if ivy.invalid_dtype(dtype):
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.dtype_bits(ivy.dtype(x))
# type test
assert isinstance(ret, int)
assert ret in [1, 8, 16, 32, 64]
# dtype_to_str
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_to_str(x, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call is helpers.jnp_call and dtype in ['int64', 'float64']:
# jax does not support int64 or float64 arrays
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
dtype_as_str = ivy.dtype(x, as_str=True)
dtype_to_str = ivy.dtype_to_str(ivy.dtype(x))
# type test
assert isinstance(dtype_as_str, str)
assert isinstance(dtype_to_str, str)
# value test
assert dtype_to_str == dtype_as_str
# dtype_from_str
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_from_str(x, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call is helpers.jnp_call and dtype in ['int64', 'float64']:
# jax does not support int64 or float64 arrays
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
dt0 = ivy.dtype_from_str(ivy.dtype(x, as_str=True))
dt1 = ivy.dtype(x)
# value test
assert dt0 is dt1
def test_cache_fn(dev, call):
def func():
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn()
ret0_again = cached_fn()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions each use the same global dict
ret0 = ivy.cache_fn(func)()
ret0_again = ivy.cache_fn(func)()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
def test_cache_fn_with_args(dev, call):
def func(_):
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn(0)
ret0_again = cached_fn(0)
ret1 = cached_fn(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions each use the same global dict
ret0 = ivy.cache_fn(func)(0)
ret0_again = ivy.cache_fn(func)(0)
ret1 = ivy.cache_fn(func)(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# def test_framework_setting_with_threading(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# def thread_fn():
# ivy.set_framework('numpy')
# x_ = np.array([0., 1., 2.])
# for _ in range(2000):
# try:
# ivy.reduce_mean(x_)
# except TypeError:
# return False
# ivy.unset_framework()
# return True
#
# # get original framework string and array
# fws = ivy.current_framework_str()
# x = ivy.array([0., 1., 2.])
#
# # start numpy loop thread
# thread = threading.Thread(target=thread_fn)
# thread.start()
#
# # start local original framework loop
# ivy.set_framework(fws)
# for _ in range(2000):
# ivy.reduce_mean(x)
# ivy.unset_framework()
#
# assert not thread.join()
def test_framework_setting_with_multiprocessing(dev, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
def worker_fn(out_queue):
ivy.set_framework('numpy')
x_ = np.array([0., 1., 2.])
for _ in range(1000):
try:
ivy.mean(x_)
except TypeError:
out_queue.put(False)
return
ivy.unset_framework()
out_queue.put(True)
# get original framework string and array
fws = ivy.current_framework_str()
x = ivy.array([0., 1., 2.])
# start numpy loop thread
output_queue = multiprocessing.Queue()
worker = multiprocessing.Process(target=worker_fn, args=(output_queue,))
worker.start()
# start local original framework loop
ivy.set_framework(fws)
for _ in range(1000):
ivy.mean(x)
ivy.unset_framework()
worker.join()
assert output_queue.get_nowait()
# def test_explicit_ivy_framework_handles(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# # store original framework string and unset
# fw_str = ivy.current_framework_str()
# ivy.unset_framework()
#
# # set with explicit handle caught
# ivy_exp = ivy.get_framework(fw_str)
# assert ivy_exp.current_framework_str() == fw_str
#
# # assert backend implemented function is accessible
# assert 'array' in ivy_exp.__dict__
# assert callable(ivy_exp.array)
#
# # assert joint implemented function is also accessible
# assert 'cache_fn' in ivy_exp.__dict__
# assert callable(ivy_exp.cache_fn)
#
# # set global ivy to numpy
# ivy.set_framework('numpy')
#
# # assert the explicit handle is still unchanged
# assert ivy.current_framework_str() == 'numpy'
# assert ivy_exp.current_framework_str() == fw_str
#
# # unset global ivy from numpy
# ivy.unset_framework()
# def test_class_ivy_handles(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# class ArrayGen:
#
# def __init__(self, ivyh):
# self._ivy = ivyh
#
# def get_array(self):
# return self._ivy.array([0., 1., 2.])
#
# # create instance
# ag = ArrayGen(ivy.get_framework())
#
# # create array from array generator
# x = ag.get_array()
#
# # verify this is not a numpy array
# assert not isinstance(x, np.ndarray)
#
# # change global framework to numpy
# ivy.set_framework('numpy')
#
# # create another array from array generator
# x = ag.get_array()
#
# # verify this is not still a numpy array
# assert not isinstance(x, np.ndarray)
# einops_rearrange
@pytest.mark.parametrize(
"x_n_pattern_n_newx", [([[0., 1., 2., 3.]], 'b n -> n b', [[0.], [1.], [2.], [3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_rearrange(x_n_pattern_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, new_x = x_n_pattern_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_rearrange(x, pattern)
true_ret = einops.rearrange(ivy.to_native(x), pattern)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_reduce
@pytest.mark.parametrize(
"x_n_pattern_n_red_n_newx", [([[0., 1., 2., 3.]], 'b n -> b', 'mean', [1.5])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_reduce(x_n_pattern_n_red_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, reduction, new_x = x_n_pattern_n_red_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_reduce(x, pattern, reduction)
true_ret = einops.reduce(ivy.to_native(x), pattern, reduction)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_repeat
@pytest.mark.parametrize(
"x_n_pattern_n_al_n_newx", [([[0., 1., 2., 3.]], 'b n -> b n c', {'c': 2},
[[[0., 0.], [1., 1.], [2., 2.], [3., 3.]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_repeat(x_n_pattern_n_al_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, axes_lengths, new_x = x_n_pattern_n_al_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_repeat(x, pattern, **axes_lengths)
true_ret = einops.repeat(ivy.to_native(x), pattern, **axes_lengths)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# profiler
# def test_profiler(dev, call):
#
# # ToDo: find way to prevent this test from hanging when run alongside other tests in parallel
#
# # log dir
# this_dir = os.path.dirname(os.path.realpath(__file__))
# log_dir = os.path.join(this_dir, '../log')
#
# # with statement
# with ivy.Profiler(log_dir):
# a = ivy.ones([10])
# b = ivy.zeros([10])
# a + b
# if call is helpers.mx_call:
# time.sleep(1) # required by MXNet for some reason
#
# # start and stop methods
# profiler = ivy.Profiler(log_dir)
# profiler.start()
# a = ivy.ones([10])
# b = ivy.zeros([10])
# a + b
# profiler.stop()
# if call is helpers.mx_call:
# time.sleep(1) # required by MXNet for some reason
# container types
def test_container_types(dev, call):
cont_types = ivy.container_types()
assert isinstance(cont_types, list)
for cont_type in cont_types:
assert hasattr(cont_type, 'keys')
assert hasattr(cont_type, 'values')
assert hasattr(cont_type, 'items')
def test_inplace_arrays_supported(dev, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ['numpy', 'mxnet', 'torch']:
assert ivy.inplace_arrays_supported()
elif cur_fw in ['jax', 'tensorflow']:
assert not ivy.inplace_arrays_supported()
else:
raise Exception('Unrecognized framework')
def test_inplace_variables_supported(dev, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ['numpy', 'mxnet', 'torch', 'tensorflow']:
assert ivy.inplace_variables_supported()
elif cur_fw in ['jax']:
assert not ivy.inplace_variables_supported()
else:
raise Exception('Unrecognized framework')
# @pytest.mark.parametrize(
# "x_n_new", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_update(x_n_new, tensor_fn, dev, call):
# x_orig, new_val = x_n_new
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# new_val = tensor_fn(new_val, 'float32', dev)
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_update(x_orig, new_val)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(x), ivy.to_numpy(new_val))
# return
# pytest.skip()
# @pytest.mark.parametrize(
# "x_n_dec", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_decrement(x_n_dec, tensor_fn, dev, call):
# x_orig, dec = x_n_dec
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# dec = tensor_fn(dec, 'float32', dev)
# new_val = x_orig - dec
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_decrement(x_orig, dec)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
# return
# pytest.skip()
# @pytest.mark.parametrize(
# "x_n_inc", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_increment(x_n_inc, tensor_fn, dev, call):
# x_orig, inc = x_n_inc
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# inc = tensor_fn(inc, 'float32', dev)
# new_val = x_orig + inc
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_increment(x_orig, inc)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
# return
# pytest.skip()
|
thread_test.py | import threading
from queue import Queue
import time
lock = threading.Lock()
#Print item pulled from queue
def do_work(num):
#Pretend to do work
#time.sleep(0.1)
with lock:
print(num)
print("Current thread:", str(threading.current_thread()).split(",")[0].split("-")[1]) #Prints just the thread number, not identifier. The order the threads were created in is the number starting at 1
print("Thread ident:", threading.get_ident())
#Pull items from queue to process
def worker():
while True:
item = q.get()
do_work(item)
q.task_done()
if __name__ == "__main__":
thread_num = 4
queue_num = 10
thread_list = []
#Create threads
for i in range(thread_num):
t = threading.Thread(target = worker)
t.daemon = True
thread_list.append(t)
print("Created", thread_num,"threads")
#Create queue
q = Queue()
#Fill queue
for i in range(queue_num):
q.put(i)
print("Queue filled with 0 -", queue_num-1)
print("Starting threads")
#Start threads
for i in thread_list:
i.start()
#Block until task is done
q.join()
|
main_demo.py | import argparse
import sys
import os
from utils import *
import math
import time
import cv2
import numpy as np
from age_gender_ssrnet.SSRNET_model import SSR_net_general, SSR_net
import threading
# ======robot package ====== #
import imutils
import speech_recognition as sr
import pyaudio
import wave # 讀音檔 使用mic可以不用
import requests # robot library
# ============== face & agneder ================ #
parser = argparse.ArgumentParser()
parser.add_argument('--model-cfg', type=str, default='./models/face-yolov3-tiny.cfg',
help='path to config file')
parser.add_argument('--model-weights', type=str,
default='./models/face-yolov3-tiny_41000.weights',
help='path to weights of model')
parser.add_argument('--image', type=str, default='',
help='path to image file')
parser.add_argument('--video', type=str, default='',
help='path to video file')
parser.add_argument('--src', type=int, default=3,
help='source of the camera')
parser.add_argument('--output-dir', type=str, default='outputs/',
help='path to the output directory')
args = parser.parse_args()
# print the arguments
print('----- info -----')
print('[i] The config file: ', args.model_cfg)
print('[i] The weights of model file: ', args.model_weights)
print('###########################################################\n')
# Give the configuration and weight files for the model and load the network
# using them.
net = cv2.dnn.readNetFromDarknet(args.model_cfg, args.model_weights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
######################## Agender model parameter ##################################
# Setup global parameters
face_size = 64
face_padding_ratio = 0.10
# Default parameters for SSR-Net
stage_num = [3, 3, 3]
lambda_local = 1
lambda_d = 1
# Initialize gender net
gender_net = SSR_net_general(face_size, stage_num, lambda_local, lambda_d)()
gender_net.load_weights('age_gender_ssrnet/ssrnet_gender_3_3_3_64_1.0_1.0.h5')
# Initialize age net
age_net = SSR_net(face_size, stage_num, lambda_local, lambda_d)()
age_net.load_weights('age_gender_ssrnet/ssrnet_age_3_3_3_64_1.0_1.0.h5')
#####################################################################################
def predictAgeGender(faces):
# Convert faces to N,64,64,3 blob
blob = np.empty((len(faces), face_size, face_size, 3))
# print("faces: ", faces)
for i, face_bgr in enumerate(faces):
# print("face_bgr: ", face_bgr)
blob[i, :, :, :] = cv2.resize(face_bgr, (64, 64))
blob[i, :, :, :] = cv2.normalize(blob[i, :, :, :], None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
# Predict gender and age
genders = gender_net.predict(blob)
ages = age_net.predict(blob)
# Construct labels
labels = ['{},{}'.format('Male' if (gender >= 0.5) else 'Female', int(age)) for (gender, age) in zip(genders, ages)]
return labels
def collectFaces(frame, face_boxes):
faces = []
# Process faces
# print("collect face_box:", face_boxes)
for i, box in enumerate(face_boxes):
# Convert box coordinates from resized frame_bgr back to original frame
box_orig = [
int(round(box[0] * width_orig / width)),
int(round(box[1] * height_orig / height)),
int(round(box[2] * width_orig / width)),
int(round(box[3] * height_orig / height)),
]
# print("collect box:", box_orig)
# Extract face box from original frame
face_bgr = frame[
max(0, box_orig[1]):min(box_orig[3] + 1, height_orig - 1),
max(0, box_orig[0]):min(box_orig[2] + 1, width_orig - 1),
:
]
# print("collect face_bgr:", face_bgr)
faces.append(face_bgr)
return faces
########################################################################
def agneder_yolo():
print("start agender recognition")
global width, height, labels, status, face, voi_freq, width_orig, height_orig, age, g
wind_name = 'face detection using YOLOv3'
cv2.namedWindow(wind_name, cv2.WINDOW_NORMAL)
cv2.namedWindow(wind_name, cv2.WINDOW_NORMAL)
cap = cv2.VideoCapture(args.src)
cap.set(3, 640)
cap.set(4, 480)
while True:
age = ""
g = ""
has_frame, frame = cap.read()
# frame_s = cv2.resize(frame, (256, 192),interpolation=cv2.INTER_AREA)
start_time = time.time()
############## initial parameter of agender input type ##################
height_orig, width_orig = frame.shape[:2]
#########################################################################
start_time_yolo = time.time()
# Create a 4D blob from a frame.
blob = cv2.dnn.blobFromImage(frame, 1 / 255, (IMG_WIDTH, IMG_HEIGHT),
[0, 0, 0], 1, crop=False)
# Sets the input to the network
net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = net.forward(get_outputs_names(net))
# Remove the bounding boxes with low confidence
face = post_process(frame, outs, CONF_THRESHOLD, NMS_THRESHOLD)
end_time_yolo = time.time()
print("yolo FPS: ", 1/(end_time_yolo-start_time_yolo))
# print("initial label: ", labels)
if len(face) > 0:
#####################################
# convert to agender input type
faces = collectFaces(frame, face)
# Get age and gender
labels = predictAgeGender(faces)
# print("label: ", labels)
if len(labels) > 0:
labels_list = labels[0].split(",")
gender = labels_list[0]
age = int(labels_list[-1])
if age > 0 and age < 16:
g = "小朋友"
elif age >= 16 and age <= 60:
if gender == "Male":
g = "先生"
else:
g = "小姐"
else:
if gender == "Male":
g = "阿伯"
else:
g = "女士"
url = 'http://169.254.246.191:8882/GeosatRobot/api/Device'
voice_cmd = {"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start",'
'"deviceId":"SPEAKER","content":"' + str(age)+"歲的"+ g + '你好"}'}
if status == 1 and voi_freq % 10 == 0:
labels = []
response_voi = requests.post(url, data=voice_cmd)
print("第一個你好的status: ", status)
for (x1, y1, x2, y2) in face:
cv2.rectangle(frame, (x1, y1), (x2, y2), color=(0, 255, 0), lineType=8)
# Draw labels
for (label, box) in zip(labels, face):
cv2.putText(frame, label, org=(box[0], box[1] - 10), fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=1, color=(0, 64, 255), thickness=1, lineType=cv2.LINE_AA)
elif status == 0:
url = 'http://169.254.246.191:8882/GeosatRobot/api/Device'
response_voi = requests.get(url, headers={'Cache-Control': 'no-cache','Pragma': 'no-cache'})
end_time = time.time()
# initialize the set of information we'll displaying on the frame
# info = [
# ('FPS', '{:.2f}'.format(1/(end_time-start_time)))
# ]
#
# for (i, (txt, val)) in enumerate(info):
# text = '{}: {}'.format(txt, val)
# cv2.putText(frame, text, (10, (i * 20) + 20),
# cv2.FONT_HERSHEY_SIMPLEX, 0.7, COLOR_RED, 2)
print("FPS: {:.2f}".format(1/(end_time-start_time)))
cv2.imshow(wind_name, frame)
key = cv2.waitKey(1)
if key == 27 or key == ord('q'):
print('[i] ==> Interrupted by user!')
break
cap.release()
cv2.destroyAllWindows()
print('==> All done!')
print('***********************************************************')
########################################################################
# ======================= navigation =========================== #
def Voice_To_Text(i):
voice_cmd = {"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start",'
'"deviceId":"SPEAKER","content":"請下達指令"}'}
r = sr.Recognizer() # 使用sr下的語音辨識class
with sr.Microphone() as source: # 將語音輸入丟到source
print("請開始說話:")
response_voi = requests.post('http://169.254.246.191:8882/GeosatRobot/api/Device', data=voice_cmd)
time.sleep(3)
r.adjust_for_ambient_noise(source) # 調整麥克風的噪音
audio = r.listen(source)
try:
Text = r.recognize_google(audio, language="zh-TW") # 使用google語音辨識的api
except sr.UnknownValueError:
Text = "無法翻譯"
except sr.RequestError as e:
Text = ("無法翻譯{0}".format(e))
# speech recognition dictionary
voice_para1 = {"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start",'
'"deviceId":"SPEAKER","content":"好的,阿寶將開始送餐"}'}
voice_para2 = {"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start",'
'"deviceId":"SPEAKER","content":"不客氣,這是阿寶應該做的"}'}
voice_para3 = {"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start",'
'"deviceId":"SPEAKER","content":"不好意思,請客人您再說一次"}'}
voice_para4 = {"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start",'
'"deviceId":"SPEAKER","content":"阿寶正在返回充電站"}'}
Mic_paras = [voice_para1, voice_para2, voice_para3, voice_para4]
voice_mod = 0
lim = 0
# speaker model
if Text == '繼續送餐':
print('阿寶準備前往下個送餐地點。')
voice_mod = Mic_paras[0]
i += 1
elif Text == '謝謝':
print('不客氣,這是阿寶應該做的。')
voice_mod = Mic_paras[1]
i += 1
elif Text == "無法翻譯":
print('不好意思,這位客人請再說一次。')
voice_mod = Mic_paras[2]
elif Text == '停止動作':
print("阿寶返回充電站")
voice_mod = Mic_paras[3]
i = 4
lim = 1 # trigger
if i == 4 & lim == 0:
i = 0
return Text, voice_mod, i
def navigation(url, i):
print("start navigation")
global status, voi_freq, age, g, labels
switch = 1
status = 1
print("initial status: ", status)
voi_freq = 0
nav_para1 = {
"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start","deviceId":"NAVIGATION", "content":"A"}'}
nav_para2 = {
"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start","deviceId":"NAVIGATION", "content":"B"}'}
nav_para3 = {
"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start","deviceId":"NAVIGATION", "content":"C"}'}
nav_para4 = {
"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start","deviceId":"NAVIGATION", "content":"D"}'}
nav_para5 = {
"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start","deviceId":"BACKTODOCK",}'}
nav_para6 = {
"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start","deviceId":"NAVIGATIONSTATUS"}'}
Nav_paras = [nav_para1, nav_para2, nav_para3, nav_para4, nav_para5, nav_para6]
voice_cmd = {"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start",'
'"deviceId":"SPEAKER","content":"阿寶正在充電"}'}
age = " "
g = " "
while True:
response_wp = requests.post(url, data=Nav_paras[i])
time.sleep(2)
# print("無進入語音判斷式")
# if pre_age != " " and labels != []:
# if g != pre_gender or age != pre_age:
# print("進入語音")
# pre_age = age
# pre_gender = g
# labels = []
# voice_cmd = {"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start",'
# '"deviceId":"SPEAKER","content":"' + str(age) + "歲的" + g + '你好"}'}
# response_voi = requests.post(url, data=voice_cmd)
voi_freq += 1
response_rp = requests.post(url, data=Nav_paras[5])
cur_state = response_rp.text[169:178]
if age != " ":
voice_cmd = {"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start",'
'"deviceId":"SPEAKER","content":"' + str(age) + "歲的" + g + '你好"}'}
response_voi = requests.post(url, data=voice_cmd)
if cur_state == 'COMPLETED':
print("已抵達"+str(i)+"點")
break
if i == 4:
timer = 0
while True:
response_wp = requests.post(url, data=Nav_paras[4])
time.sleep(1)
response_rp = requests.post(url, data=Nav_paras[5])
print(response_rp.text)
dock_state = response_rp.text[169:178]
if dock_state == 'COMPLETED':
timer += 1
print('number of count: ', timer)
if timer > 30:
switch = 0
break
response_voi = requests.post(url, data=voice_cmd)
status = 0
print("end status: ", status)
return switch
def rotation(url, rv):
ro_right = {"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start","deviceId":"ROBOTBODYROTATE","content":"-3/0.4"}'}
ro_left = {"paraString":'{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start","deviceId":"ROBOTBODYROTATE","content":"3/0.4"}'}
rt = [ro_right, ro_left]
if rv > 0:
ro = rt[0]
else:
ro = rt[1]
response_ro = requests.post(url, data=ro)
def rt_move(url):
move = {"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start","deviceId":"ROBOTBODYMOVE","content":"0.1/0.1"}'}
response_wv = requests.post(url, data=move)
def rt(url):
global lables, face, width_orig, height_orig
while True:
if len(face) > 0:
rx = (face[0][0] + face[0][2]) // 2 - width_orig / 2
ry = (face[0][1] + face[0][3]) // 2 - height_orig / 2
re_center = (rx, ry)
# print("residual value: ", re_center)
if rx > 120:
rotation(url, rx)
elif rx < -120:
rotation(url, rx)
if rx < 120 and rx > -120:
if ry > 50:
rt_move(url)
else:
g = "很高興見到您"
if len(labels) > 0:
labels_list = labels[0].split(",")
gender = labels_list[0]
age = int(labels_list[-1])
if age > 0 and age < 16:
g = "小朋友"
elif age >= 16 and age <= 60:
if gender == "Male":
g = "先生"
else:
g = "小姐"
else:
if gender == "Male":
g = "阿伯"
else:
g = "女士"
voice_cmd = {"paraString": '{"time":"2018-08-08T12:00:00Z","requestId":"AAA","action":"start",'
'"deviceId":"SPEAKER","content":"'+str(age)+"歲的"+g+'你好"}'}
response_voi = requests.post(url, data=voice_cmd)
print("第2個你好的status: ", status)
break
else:
rotation(url, 1)
def main():
time.sleep(5)
print("start main")
url = 'http://169.254.246.191:8882/GeosatRobot/api/Device'
i = 0
switch = navigation(url, i)
rt(url)
# switch = 1 # loop initial value
# response_voi = requests.post(url, data=voice_cmd)
while switch != 0:
text, voice_cmd, i = Voice_To_Text(i)
print("Text: ", text)
if text == '無法翻譯':
print("無法辨識")
# response_voi = requests.post(url, data=voice_cmd)
# time.sleep(2)
elif "你好" in text:
response_voi = requests.post(url, data=voice_cmd)
time.sleep(1)
else:
response_voi = requests.post(url, data=voice_cmd)
switch = navigation(url, i)
rt(url)
def multi_threading():
t1 = threading.Thread(target = agneder_yolo)
t2 = threading.Thread(target = main)
t1.start()
t2.start()
t1.join()
t2.join()
print("threading end")
if __name__ == "__main__":
# main()
face_boxes = []
labels = []
status = 0
voi_freq = 0
width = 640
height = 480
width_orig = 640
height_orig = 480
center_x = 0
center_y = 0
# app.run(threaded=True)
multi_threading()
# agneder_yolo()
print("whole program is finish!")
|
transport.py | #
# Copyright (c) 2018 Sébastien RAMAGE
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
import threading
import logging
import time
import serial
import serial.tools.list_ports
import queue
import socket
import select
from pydispatch import dispatcher
import sys
from .const import ZIGATE_FAILED_TO_CONNECT
import struct
from binascii import unhexlify
LOGGER = logging.getLogger('zigate')
class ZIGATE_NOT_FOUND(Exception):
pass
class ZIGATE_CANNOT_CONNECT(Exception):
pass
class BaseTransport(object):
def __init__(self):
self._buffer = b''
self.queue = queue.Queue()
self.received = queue.Queue()
def read_data(self, data):
'''
Read ZiGate output and split messages
'''
LOGGER.debug('Raw packet received, {}'.format(data))
self._buffer += data
# print(self._buffer)
endpos = self._buffer.find(b'\x03')
while endpos != -1:
startpos = self._buffer.rfind(b'\x01', 0, endpos)
if startpos != -1 and startpos < endpos:
raw_message = self._buffer[startpos:endpos + 1]
self.received.put(raw_message)
else:
LOGGER.error('Malformed packet received, ignore it')
self._buffer = self._buffer[endpos + 1:]
endpos = self._buffer.find(b'\x03')
def send(self, data):
self.queue.put(data)
def is_connected(self):
pass
class FakeTransport(BaseTransport):
'''
Fake transport for test
'''
def __init__(self):
BaseTransport.__init__(self)
self.sent = []
self.auto_responder = {}
self.add_auto_response(0x0010, 0x8010, unhexlify(b'000f3ff0'))
self.add_auto_response(0x0009, 0x8009, unhexlify(b'00000123456789abcdef12340123456789abcdef0b'))
# by default add a fake xiaomi temp sensor on address abcd
self.add_auto_response(0x0015, 0x8015, unhexlify(b'01abcd0123456789abcdef00aa'))
def start_fake_response(self):
def periodic_response():
import random
while True:
time.sleep(5)
temp = int(round(random.random() * 40.0, 2) * 100)
msg = struct.pack('!BHBHHBBHI', 1, int('abcd', 16), 1, 0x0402, 0, 0, 0x22, 4, temp)
enc_msg = self.create_fake_response(0x8102, msg, random.randint(0, 255))
self.received.put(enc_msg)
t = threading.Thread(target=periodic_response)
t.setDaemon(True)
t.start()
def is_connected(self):
return True
def send(self, data):
self.sent.append(data)
# retrieve cmd
data = self.zigate_decode(data[1:-1])
cmd = struct.unpack('!H', data[0:2])[0]
# reply 0x8000 ok for cmd
rssi = 255
value = struct.pack('!BBHB', 0, 1, cmd, rssi)
length = len(value)
checksum = self.checksum(struct.pack('!H', 0x8000),
struct.pack('!B', length),
value)
raw_message = struct.pack('!HHB{}s'.format(len(value)), 0x8000, length, checksum, value)
enc_msg = self.zigate_encode(raw_message)
enc_msg.insert(0, 0x01)
enc_msg.append(0x03)
enc_msg = bytes(enc_msg)
self.received.put(enc_msg)
if cmd in self.auto_responder:
self.received.put(self.auto_responder[cmd])
def add_auto_response(self, cmd, resp, value, rssi=255):
enc_msg = self.create_fake_response(resp, value, rssi)
self.auto_responder[cmd] = enc_msg
def create_fake_response(self, resp, value, rssi=255):
value += struct.pack('!B', rssi)
length = len(value)
checksum = self.checksum(struct.pack('!H', resp),
struct.pack('!B', length),
value)
raw_message = struct.pack('!HHB{}s'.format(len(value)), resp, length, checksum, value)
enc_msg = self.zigate_encode(raw_message)
enc_msg.insert(0, 0x01)
enc_msg.append(0x03)
enc_msg = bytes(enc_msg)
return enc_msg
def checksum(self, *args):
chcksum = 0
for arg in args:
if isinstance(arg, int):
chcksum ^= arg
continue
for x in arg:
chcksum ^= x
return chcksum
def zigate_encode(self, data):
encoded = bytearray()
for b in data:
if b < 0x10:
encoded.extend([0x02, 0x10 ^ b])
else:
encoded.append(b)
return encoded
def zigate_decode(self, data):
flip = False
decoded = bytearray()
for b in data:
if flip:
flip = False
decoded.append(b ^ 0x10)
elif b == 0x02:
flip = True
else:
decoded.append(b)
return decoded
def get_last_cmd(self):
if not self.sent:
return
cmd = self.sent[-1]
data = self.zigate_decode(cmd[1:-1])[5:]
return data
class ThreadSerialConnection(BaseTransport):
def __init__(self, device, port=None):
BaseTransport.__init__(self)
self._port = port
self.device = device
self._running = True
self.reconnect(False)
self.thread = threading.Thread(target=self.listen,
name='ZiGate-Listen')
self.thread.setDaemon(True)
self.thread.start()
def initSerial(self):
self._port = self._find_port(self._port)
return serial.Serial(self._port, 115200)
def reconnect(self, retry=True):
delay = 1
while True:
try:
self.serial = self.initSerial()
break
except ZIGATE_NOT_FOUND:
LOGGER.error('ZiGate has not been found, please check configuration.')
sys.exit(2)
except Exception:
if not retry:
LOGGER.error('Cannot connect to ZiGate using port {}'.format(self._port))
raise ZIGATE_CANNOT_CONNECT('Cannot connect to ZiGate using port {}'.format(self._port))
sys.exit(2)
msg = 'Failed to connect, retry in {} sec...'.format(delay)
dispatcher.send(ZIGATE_FAILED_TO_CONNECT, message=msg)
LOGGER.error(msg)
time.sleep(delay)
if delay < 60:
delay *= 2
def listen(self):
while self._running:
try:
data = self.serial.read(self.serial.in_waiting)
except Exception:
data = None
LOGGER.error('OOPS connection lost, reconnect...')
self.reconnect()
if data:
self.read_data(data)
while not self.queue.empty():
data = self.queue.get()
self.serial.write(data)
time.sleep(0.05)
def _find_port(self, port):
'''
automatically discover zigate port if needed
'''
port = port or 'auto'
if port == 'auto':
LOGGER.info('Searching ZiGate port')
devices = list(serial.tools.list_ports.grep('067b:2303'))
if devices:
port = devices[0].device
if len(devices) == 1:
LOGGER.info('ZiGate found at {}'.format(port))
else:
LOGGER.warning('Found the following devices')
for device in devices:
LOGGER.warning('* {0} - {0.manufacturer}'.format(device))
LOGGER.warning('Choose the first device... {}'.format(port))
else:
LOGGER.error('ZiGate not found')
raise ZIGATE_NOT_FOUND('ZiGate not found')
return port
def is_connected(self):
return self.serial.isOpen()
def close(self):
self._running = False
while self.thread.is_alive():
time.sleep(0.1)
self.serial.close()
class ThreadSocketConnection(ThreadSerialConnection):
def __init__(self, device, host, port=None):
self._host = host
ThreadSerialConnection.__init__(self, device, port)
def initSerial(self):
if self._port in (None, 'auto'):
ports = [23, 9999]
else:
ports = [self._port]
host = self._find_host(self._host)
for port in ports:
try:
s = socket.create_connection((host, port), 10)
LOGGER.debug('ZiGate found on {} port {}'.format(host, port))
return s
except Exception:
LOGGER.debug('ZiGate not found on {} port {}'.format(host, port))
continue
LOGGER.error('Cannot connect to ZiGate using {} port {}'.format(self._host, self._port))
raise ZIGATE_CANNOT_CONNECT('Cannot connect to ZiGate using {} port {}'.format(self._host, self._port))
def _find_host(self, host):
host = host or 'auto'
if host == 'auto':
LOGGER.info('Searching ZiGate Wifi host')
host = discover_host()
if not host:
LOGGER.error('ZiGate not found')
# raise ZIGATE_NOT_FOUND('ZiGate not found')
return host
def listen(self):
while self._running:
socket_list = [self.serial]
read_sockets, write_sockets, error_sockets = select.select(socket_list, socket_list, [])
if read_sockets:
data = self.serial.recv(1024)
if data:
self.read_data(data)
else:
LOGGER.warning('OOPS connection lost, reconnect...')
self.reconnect()
if write_sockets:
while not self.queue.empty():
data = self.queue.get()
self.serial.sendall(data)
time.sleep(0.05)
def is_connected(self): # TODO: check if socket is alive
return True
def discover_host():
from zeroconf import ServiceBrowser, Zeroconf
host = None
def on_service_state_change(zeroconf, service_type, name, state_change):
pass
zeroconf = Zeroconf()
browser = ServiceBrowser(zeroconf, "_zigate._tcp.local.",
handlers=[on_service_state_change])
i = 0
while not host:
time.sleep(0.1)
if browser.services:
service = list(browser.services.values())[0]
info = zeroconf.get_service_info(service.name, service.alias)
host = socket.inet_ntoa(info.address)
i += 1
if i > 50:
break
zeroconf.close()
return host
|
hidapi_backend.py | # pyOCD debugger
# Copyright (c) 2006-2020 Arm Limited
# Copyright (c) 2021 Chris Reed
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import platform
import six
import threading
from .interface import Interface
from .common import (
filter_device_by_usage_page,
generate_device_unique_id,
)
from ..dap_access_api import DAPAccessIntf
from ....utility.compatibility import to_str_safe
from ....utility.timeout import Timeout
LOG = logging.getLogger(__name__)
TRACE = LOG.getChild("trace")
TRACE.setLevel(logging.CRITICAL)
try:
import hid
except ImportError:
IS_AVAILABLE = False
else:
IS_AVAILABLE = True
# OS flags.
_IS_DARWIN = (platform.system() == 'Darwin')
_IS_WINDOWS = (platform.system() == 'Windows')
class HidApiUSB(Interface):
"""@brief CMSIS-DAP USB interface class using hidapi backend."""
isAvailable = IS_AVAILABLE
HIDAPI_MAX_PACKET_COUNT = 30
def __init__(self):
super().__init__()
# Vendor page and usage_id = 2
self.device = None
self.device_info = None
self.thread = None
self.read_sem = threading.Semaphore(0)
self.closed_event = threading.Event()
self.received_data = collections.deque()
def set_packet_count(self, count):
# hidapi for macos has an arbitrary limit on the number of packets it will queue for reading.
# Even though we have a read thread, it doesn't hurt to limit the packet count since the limit
# is fairly high.
if _IS_DARWIN:
count = min(count, self.HIDAPI_MAX_PACKET_COUNT)
self.packet_count = count
def open(self):
try:
self.device.open_path(self.device_info['path'])
except IOError as exc:
raise DAPAccessIntf.DeviceError("Unable to open device: " + str(exc)) from exc
# Windows does not use the receive thread because it causes packet corruption for some reason.
if not _IS_WINDOWS:
# Make certain the closed event is clear.
self.closed_event.clear()
# Start RX thread
self.thread = threading.Thread(target=self.rx_task)
self.thread.daemon = True
self.thread.start()
def rx_task(self):
try:
while not self.closed_event.is_set():
self.read_sem.acquire()
if not self.closed_event.is_set():
read_data = self.device.read(self.packet_size)
if TRACE.isEnabledFor(logging.DEBUG):
# Strip off trailing zero bytes to reduce clutter.
TRACE.debug(" USB IN < (%d) %s", len(read_data), ' '.join([f'{i:02x}' for i in bytes(read_data).rstrip(b'\x00')]))
self.received_data.append(read_data)
finally:
# Set last element of rcv_data to None on exit
self.received_data.append(None)
@staticmethod
def get_all_connected_interfaces():
"""@brief Returns all the connected devices with CMSIS-DAP in the name.
returns an array of HidApiUSB (Interface) objects
"""
devices = hid.enumerate()
boards = []
for deviceInfo in devices:
product_name = to_str_safe(deviceInfo['product_string'])
if ("CMSIS-DAP" not in product_name):
# Check the device path as a backup. Even though we can't get the interface name from
# hidapi, it may appear in the path. At least, it does on macOS.
device_path = to_str_safe(deviceInfo['path'])
if "CMSIS-DAP" not in device_path:
# Skip non cmsis-dap devices
continue
vid = deviceInfo['vendor_id']
pid = deviceInfo['product_id']
# Perform device-specific filtering.
if filter_device_by_usage_page(vid, pid, deviceInfo['usage_page']):
continue
try:
dev = hid.device(vendor_id=vid, product_id=pid, path=deviceInfo['path'])
except IOError as exc:
LOG.debug("Failed to open USB device: %s", exc)
continue
# Create the USB interface object for this device.
new_board = HidApiUSB()
new_board.vid = vid
new_board.pid = pid
new_board.vendor_name = deviceInfo['manufacturer_string'] or f"{vid:#06x}"
new_board.product_name = deviceInfo['product_string'] or f"{pid:#06x}"
new_board.serial_number = deviceInfo['serial_number'] \
or generate_device_unique_id(vid, pid, six.ensure_str(deviceInfo['path']))
new_board.device_info = deviceInfo
new_board.device = dev
boards.append(new_board)
return boards
def write(self, data):
"""@brief Write data on the OUT endpoint associated to the HID interface"""
if TRACE.isEnabledFor(logging.DEBUG):
TRACE.debug(" USB OUT> (%d) %s", len(data), ' '.join([f'{i:02x}' for i in data]))
data.extend([0] * (self.packet_size - len(data)))
if not _IS_WINDOWS:
self.read_sem.release()
self.device.write([0] + data)
def read(self, timeout=Interface.DEFAULT_READ_TIMEOUT):
"""@brief Read data on the IN endpoint associated to the HID interface"""
# Windows doesn't use the read thread, so read directly.
if _IS_WINDOWS:
read_data = self.device.read(self.packet_size)
if TRACE.isEnabledFor(logging.DEBUG):
# Strip off trailing zero bytes to reduce clutter.
TRACE.debug(" USB IN < (%d) %s", len(read_data), ' '.join([f'{i:02x}' for i in bytes(read_data).rstrip(b'\x00')]))
return read_data
# Other OSes use the read thread, so we check for and pull data from the queue.
# Spin for a while if there's not data available yet. 100 µs sleep between checks.
with Timeout(timeout, sleeptime=0.0001) as t_o:
while t_o.check():
if len(self.received_data) != 0:
break
else:
raise DAPAccessIntf.DeviceError(f"Timeout reading from device {self.serial_number}")
if self.received_data[0] is None:
raise DAPAccessIntf.DeviceError(f"Device {self.serial_number} read thread exited")
# Trace when the higher layer actually gets a packet previously read.
if TRACE.isEnabledFor(logging.DEBUG):
# Strip off trailing zero bytes to reduce clutter.
TRACE.debug(" USB RD < (%d) %s", len(self.received_data[0]),
' '.join([f'{i:02x}' for i in bytes(self.received_data[0]).rstrip(b'\x00')]))
return self.received_data.popleft()
def close(self):
"""@brief Close the interface"""
assert not self.closed_event.is_set()
LOG.debug("closing interface")
if not _IS_WINDOWS:
self.closed_event.set()
self.read_sem.release()
self.thread.join()
self.thread = None
self.device.close()
|
chat_room_client.py | """
聊天室
1. 有人进入聊天室需要输入姓名,姓名不能重复
2. 有人进入聊天室时,其他人会收到通知: xxx进入了聊天室
3. 一个人发消息,其他人会收到: XXX:xxxxx
4. 有人退出聊天室,则其他人也会收到同志:XXX退出了聊天室
5. 扩展功能:服务器可以向所有用户发送公告:管理员消息:xxxxxx
技术分析
功能拆分和封装结构的决定
网络通讯协议设定
分功能逻辑讨论
架构 --> 框架 --> 逻辑结构模型
"""
from socket import *
from multiprocessing import *
import sys
class Client:
ADDR = ("127.0.0.1", 8888)
def __init__(self):
self.__udp_socket = socket(AF_INET, SOCK_DGRAM)
self.name = ""
def __login(self):
while True:
self.name = input("请输入昵称(不能重复)")
self.__udp_socket.sendto(self.name.encode(), Client.ADDR)
data, addr = self.__udp_socket.recvfrom(1024)
if data.decode() == "失败":
continue
else:
break
self.__udp_socket.sendto(f"{self.name}进入聊天室".encode(), Client.ADDR)
# 一直循环,回车控制退出
def __send_message(self):
while True:
content = input(">>")
msg = self.name + ": " + content
if not content:
self.__client_quit()
break # 使用回车退出
self.__udp_socket.sendto(msg.encode(), Client.ADDR)
def __client_quit(self):
self.__udp_socket.sendto(f"{self.name}退出聊天室".encode(), Client.ADDR)
self.__udp_socket.close()
sys.exit()
# 接受消息作为子进程
# 一直循环,父进程退出才退出
def __receive_message(self):
while True:
data, addr = self.__udp_socket.recvfrom(1024)
print(data.decode())
def main(self):
self.__login()
p = Process(target=self.__receive_message, daemon=True)
p.start()
self.__send_message()
client = Client()
client.main()
|
routes.py | import hashlib
import os
import time
from statistics import mean
from threading import Thread
import cv2
import imutils
import numpy as np
from flask import Flask, render_template, make_response
from flask_restful import Api, Resource, reqparse
from sklearn.externals import joblib
from werkzeug.datastructures import FileStorage
from tools import file_utils, feature_extractor, face_aligner
# constants
from tools.depth import monodepth_simple
from tools.face_detector import face_detector
from tools.map_extractor.utils.ImageAligner import ImageAligner
from tools.map_extractor.utils.ImageCropper import ImageCropper
from tools.saliency_extractor import saliency
from tools.vole import predict_illuminant
UPLOAD_FOLDER_KEY = 'UPLOAD_FOLDER'
ARGUMENT_FILE_PARAMETER = 'file'
MYDIR = os.path.join(os.path.dirname(__file__), 'static', 'client')
PATH_VIDEOS = os.path.join(MYDIR, 'videos')
PATH_FRAMES = os.path.join(MYDIR, 'frames')
PATH_FEATURES = os.path.join(MYDIR, 'features')
PATH_PREDICTIONS = os.path.join(MYDIR, 'predictions')
DEPTH_FRAMES = os.path.join(MYDIR, 'depth')
SALIENCY_FRAMES = os.path.join(MYDIR, 'saliency')
ILLUMINATION_FRAMES = os.path.join(MYDIR, 'illumination')
# set up Flask application
app = Flask(__name__)
app.config[UPLOAD_FOLDER_KEY] = 'static/uploads'
api = Api(app)
DEPTH_MODEL_PATH = '/home/tiagojc/spoopy/data/fitted_model_depth.sav'
ILLUMINATION_MODEL_PATH = '/home/tiagojc/spoopy/data/fitted_model_illumination.sav'
SALIENCY_MODEL_PATH = '/home/tiagojc/spoopy/data/fitted_model_saliency.sav'
# DEPTH_MODEL_PATH = '/Users/rodrigobresan/Documents/dev/github/anti_spoofing/spoopy/static/evaluate/cross_dataset_combinations/ra/cbsr/depth/fitted_model.sav';
# ILLUMINATION_MODEL_PATH = '/Users/rodrigobresan/Documents/dev/github/anti_spoofing/spoopy/static/evaluate/cross_dataset_combinations/ra/cbsr/depth/fitted_model.sav';
# SALIENCY_MODEL_PATH = '/Users/rodrigobresan/Documents/dev/github/anti_spoofing/spoopy/static/evaluate/cross_dataset_combinations/ra/cbsr/depth/fitted_model.sav';
#
model_depth = joblib.load(DEPTH_MODEL_PATH)
model_illumination = joblib.load(ILLUMINATION_MODEL_PATH)
model_saliency = joblib.load(SALIENCY_MODEL_PATH)
#
fc = face_detector.FaceCropper()
detector, fa = face_aligner.align_faces.make_face_aligner()
# Root endpoint
class Root(Resource):
def __init__(self):
pass
def get(self):
headers = {'Content-Type': 'text/html'}
return make_response(render_template('results.html'), 200, headers)
# Upload video endpoint
def save_raw_artifact():
parser = reqparse.RequestParser()
parser.add_argument(ARGUMENT_FILE_PARAMETER, type=FileStorage, location='files')
args = parser.parse_args()
file = args[ARGUMENT_FILE_PARAMETER]
file_name = file.filename
file_extension = file_name.split(".")[1]
file_hash = compute_md5(file_name)
new_file_name = file_hash + "_" + file_extension
full_file_name = new_file_name + "." + file_extension
output_path = os.path.join(PATH_VIDEOS, full_file_name)
file_utils.file_helper.guarantee_path_preconditions(PATH_VIDEOS)
file.save(output_path)
return new_file_name
class BaseResponse(object):
def __init__(self, id):
self.id = id
class UploadVideo(Resource):
def post(self):
file_id = save_raw_artifact()
frames = split_video_with_id(file_id)
return {'id': file_id,
'frames': frames}, 201
class UploadImage(Resource):
def post(self):
file_id = save_raw_artifact()
output_frames = os.path.join(PATH_FRAMES, file_id, 'raw')
file_utils.file_helper.copy_file(os.path.join(PATH_VIDEOS, get_video_name_from_id(file_id)), output_frames)
print('output_frames: ', output_frames)
frames = list_frames_full_path(output_frames)
print('frames: ', frames)
return {'id': file_id,
'frames': frames}, 201
class UploadAndIllumination(Resource):
def post(self):
try:
file_id = save_raw_artifact()
output_frames = os.path.join(PATH_FRAMES, file_id, 'raw')
file_utils.file_helper.copy_file(os.path.join(PATH_VIDEOS, get_video_name_from_id(file_id)), output_frames)
frames_unaligned = generate_illumination_maps(file_id)
return make_short_results_response(frames_unaligned), 200
except Exception as e:
return {'e': e}, 500
except:
return {"Erro"}, 200
class UploadAndSaliency(Resource):
def post(self):
try:
file_id = save_raw_artifact()
output_frames = os.path.join(PATH_FRAMES, file_id, 'raw')
file_utils.file_helper.copy_file(os.path.join(PATH_VIDEOS, get_video_name_from_id(file_id)), output_frames)
frames_unaligned = generate_saliency_maps(file_id)
return make_short_results_response(frames_unaligned), 200
except Exception as e:
return {'e': e}, 500
except:
return {"Erro"}, 200
class UploadAndDepth(Resource):
def post(self):
try:
file_id = save_raw_artifact()
output_frames = os.path.join(PATH_FRAMES, file_id, 'raw')
file_utils.file_helper.copy_file(os.path.join(PATH_VIDEOS, get_video_name_from_id(file_id)), output_frames)
frames_unaligned = generate_depth_maps(file_id)
return make_short_results_response(frames_unaligned), 200
except Exception as e:
return {'e': e}, 500
except:
return {"Erro"}, 200
class DepthInference(Resource):
def post(self):
item_id = get_video_id()
frames_unaligned = generate_depth_maps(item_id)
# frames_aligned = run_align_images(item_id, 'depth', 'jpg')
# extract_features('depth_aligned', item_id)
# prediction = perform_prediction(item_id, 'depth_aligned', model_depth)
return make_short_results_response(frames_unaligned), 200
class IlluminationInference(Resource):
def post(self):
item_id = get_video_id()
frames_unaligned = generate_illumination_maps(item_id)
# frames_aligned = run_align_images(item_id, 'illumination', 'png')
# extract_features('illumination_aligned', item_id)
# prediction = perform_prediction(item_id, 'illumination_aligned', model_illumination)
return make_short_results_response(frames_unaligned), 200
class SaliencyInference(Resource):
def post(self):
item_id = get_video_id()
frames_unaligned = generate_saliency_maps(item_id)
# frames_aligned = run_align_images(item_id, 'saliency', 'jpg')
# extract_features('saliency_aligned', item_id)
# prediction = perform_prediction(item_id, 'saliency_aligned', model_saliency)
return make_short_results_response(frames_unaligned), 200
def make_short_results_response(frames):
return {'frames': frames}
def make_result_response(frames_unaligned, frames_aligned, prediction):
return {'frames_unaligned': frames_unaligned,
'frames_aligned': frames_aligned,
'prediction': prediction}
def make_frames_response(frames_list):
return {'frames': frames_list}
def list_frames_full_path(dir):
return sort(list(get_files_full_path(dir)))
PATH_LOCAL = "/home/tiagojc/spoopy/spoopy/spoopy/"
PATH_REMOTE = "http://cruzeiro.cti.gov.br:5000/"
def get_files_full_path(path):
for file in os.listdir(path):
if os.path.isfile(os.path.join(path, file)):
yield os.path.join(path.replace(PATH_LOCAL, PATH_REMOTE), file)
def list_frames(dir):
return sort(list(get_files(dir)))
def get_files(path):
for file in os.listdir(path):
if os.path.isfile(os.path.join(path, file)):
yield file
def sort(list):
return sorted(list, key=str.lower)
# Split video endpoint with id
def split_video_with_id(item_id):
video_name = get_video_name_from_id(item_id)
path_videos = os.path.join(PATH_VIDEOS, video_name)
output_frames = os.path.join(PATH_FRAMES, item_id, 'raw')
if os.path.exists(output_frames):
print("Split already done")
return list_frames_full_path(output_frames)
file_utils.file_helper.split_video_into_frames(path_videos, output_frames)
return list_frames_full_path(output_frames)
# Apply depth inference
def generate_depth_maps(item_id):
path_frames = os.path.join(PATH_FRAMES, item_id, 'raw')
output_depth = os.path.join(PATH_FRAMES, item_id, 'depth')
if os.path.exists(output_depth):
print("Depth already done")
# return list_frames_full_path(output_depth)
file_utils.file_helper.guarantee_path_preconditions(output_depth)
monodepth_simple.apply_depth_inference_on_folder(path_frames, output_depth)
return list_frames_full_path(output_depth)
# Apply illumination inference
def generate_illumination_maps(item_id):
folder = os.path.join(PATH_FRAMES, item_id, 'raw')
output = os.path.join(PATH_FRAMES, item_id, 'illumination')
if os.path.exists(output):
print("Illumination already done")
# return list_frames_full_path(output)
file_utils.file_helper.guarantee_path_preconditions(output)
predict_illuminant.predict_illuminant(folder, output)
return list_frames_full_path(output)
# Apply saliency inference
def generate_saliency_maps(item_id):
folder = os.path.join(PATH_FRAMES, item_id, 'raw')
output = os.path.join(PATH_FRAMES, item_id, 'saliency')
if os.path.exists(output):
print("Saliency already done")
# return list_frames_full_path(output)
file_utils.file_helper.guarantee_path_preconditions(output)
saliency.extract_rbd_saliency_folder(folder, output)
return list_frames_full_path(output)
class AlignImages(Resource):
def post(self):
item_id = get_video_id()
run_align_images(item_id)
return item_id, 201
def run_align_images(item_id, property, extension_frames):
raw_frames = os.path.join(PATH_FRAMES, item_id, 'raw')
threads = []
for single_frame in list_frames(raw_frames):
thread = Thread(target=align_single_frame,
args=(item_id, single_frame, detector, fa, property, extension_frames))
threads.append(thread)
thread.start()
print('done: ', single_frame)
for thread in threads:
thread.join()
return list_frames_full_path(os.path.join(PATH_FRAMES, item_id, property + '_aligned'))
def align_single_frame(item_name, current_frame_name, detector, fa, property, extension_frames):
final_aligned_dir = os.path.join(PATH_FRAMES, item_name, property + '_aligned')
final_aligned_path = os.path.join(final_aligned_dir, current_frame_name)
if os.path.exists(final_aligned_path):
return
if not os.path.exists(final_aligned_dir):
os.makedirs(final_aligned_dir, exist_ok=True)
original_frame = os.path.join(PATH_FRAMES, item_name, 'raw', current_frame_name)
original_angle = face_aligner.align_faces.get_face_angle(original_frame, detector, fa)
original_rotated = imutils.rotate(cv2.imread(original_frame), original_angle)
coordinates = fc.get_faces_coordinates(original_rotated)
print('coordinates none: ', coordinates is None)
cropper = ImageCropper(fc, coordinates)
original_path_frame = os.path.join(PATH_FRAMES, item_name, property, current_frame_name)
aligner = ImageAligner(original_path_frame, original_angle, extension_frames)
aligned_img = aligner.align()
print('final: ', final_aligned_path)
cropper.crop(aligned_img, final_aligned_path)
class FeatureExtractor(Resource):
def post(self):
item_id = get_video_id()
properties = ['depth', 'illumination', 'saliency']
threads = []
for property in properties:
thread = Thread(target=extract_features, args=(property, item_id))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
return item_id, 201
def perform_prediction(item_id, type, model):
features = get_feature(type, item_id)
output_pred_file = os.path.join(PATH_PREDICTIONS, remove_extension(item_id), type)
file_utils.file_helper.guarantee_path_preconditions(output_pred_file)
predictions = model.predict(features)
predictions_proba = model.predict_proba(features)
print('predictions: ', predictions)
print('predictions_proba: ', predictions_proba)
np.save(os.path.join(output_pred_file, 'pred.npy'), predictions)
np.save(os.path.join(output_pred_file, 'pred_proba.npy'), predictions_proba)
return mean(predictions_proba[:, 1].tolist())
def run_predictions(item_id):
depth_predictions = perform_prediction(item_id, 'depth_aligned', model_depth)
illumination_predictions = perform_prediction(item_id, 'illumination_aligned', model_illumination)
saliency_predictions = perform_prediction(item_id, 'saliency_aligned', model_saliency)
predictions_list = [mean(depth_predictions[:, 1].tolist()),
mean(illumination_predictions[:, 1].tolist()),
mean(saliency_predictions[:, 1].tolist())]
return predictions_list
class Predictor(Resource):
def post(self):
item_id = get_video_name()
predictions_list = run_predictions(item_id)
return predictions_list, 201
class Crash(Resource):
def get(self):
raise Exception("Crashing server..")
class Process(Resource):
def post(self):
time_begin = time.time()
# upload video
file_name = save_raw_artifact()
time_upload = time.time()
print('upload done ')
# split video into frames
item_id = split_video_with_id(file_name)
time_split = time.time()
print('split done')
# generate depth maps
generate_depth_maps(item_id)
time_depth = time.time()
print('depth done')
# generate illumination maps
generate_illumination_maps(item_id)
time_illumination = time.time()
print('illumination done')
# generate saliency maps
generate_saliency_maps(item_id)
time_saliency = time.time()
print('saliency done')
# align images
run_align_images(file_name)
time_align = time.time()
print('align done')
# extract features
extract_all_features(file_name)
time_features = time.time()
print('features done')
# run predictions
preds = run_predictions(file_name)
time_predictions = time.time()
print('predictions done')
print("Overall results")
print("Upload time: %.2f" % (time_upload - time_begin))
print("Split time: %.2f" % (time_split - time_upload))
print("Depth time: %.2f" % (time_depth - time_split))
print("Illumination time: %.2f" % (time_illumination - time_depth))
print("Saliency time: %.2f" % (time_saliency - time_illumination))
print("Align time: %.2f" % (time_align - time_saliency))
print("Features time: %.2f" % (time_features - time_align))
print("Prediction time: %.2f" % (time_predictions - time_features))
return preds, 201
def perform_all_steps(file_name):
item_id = split_video_with_id(file_name)
print('split done')
generate_depth_maps(item_id)
print('depth done')
# run_saliency(item_id)
run_align_images(file_name)
print('align done')
extract_all_features(file_name)
print('features done')
preds = run_predictions(file_name)
print('preds done')
def get_feature(type, item_id):
path_feature = os.path.join(PATH_FEATURES, remove_extension(item_id), type, 'features_resnet.npy')
features = np.load(path_feature)
features = np.reshape(features, (features.shape[0], -1))
return features
def extract_all_features(item_id):
extract_features('depth_aligned', item_id)
extract_features('illumination_aligned', item_id)
extract_features('saliency_aligned', item_id)
def extract_features(type, item_id):
folder_depth = os.path.join(PATH_FRAMES, item_id, type)
folder_features_output = os.path.join(PATH_FEATURES, item_id, type)
if os.path.exists(folder_features_output):
return
file_utils.file_helper.guarantee_path_preconditions(folder_features_output)
feature_extractor.extract_features_resnet.extract_features(folder_depth, folder_features_output)
def video_exists(id):
output_path = os.path.join(MYDIR, 'frames', id)
return os.path.exists(output_path)
def remove_extension(name):
return name.split(".")[0]
def get_video_name_from_id(video_id):
name_without_ext = video_id.rsplit(".")[0]
file_name = name_without_ext.split("_")[0]
file_ext = name_without_ext.split("_")[1]
full_name = file_name + "_" + file_ext + "." + file_ext
return full_name
def get_video_id():
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='Video.py id')
args = parser.parse_args()
id = args['id']
return id
def get_video_name():
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='Video.py id')
args = parser.parse_args()
name_without_ext = args['id'].rsplit(".")[0]
file_name = name_without_ext.split("_")[0]
file_ext = name_without_ext.split("_")[1]
full_name = file_name + "_" + file_ext + "." + file_ext
return full_name
def compute_md5(my_string):
m = hashlib.md5()
m.update(my_string.encode('utf-8'))
return m.hexdigest()
api.add_resource(Root, '/')
api.add_resource(UploadVideo, '/upload_video')
api.add_resource(UploadAndDepth, '/upload_depth')
api.add_resource(UploadAndIllumination, '/upload_illumination')
api.add_resource(UploadAndSaliency, '/upload_saliency')
api.add_resource(UploadImage, '/upload_image')
api.add_resource(DepthInference, '/depth')
api.add_resource(IlluminationInference, '/illumination')
api.add_resource(SaliencyInference, '/saliency')
api.add_resource(AlignImages, '/align')
api.add_resource(FeatureExtractor, '/feature_extractor')
api.add_resource(Predictor, '/predictor')
api.add_resource(Process, '/process')
api.add_resource(Crash, '/crash')
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
import logging
logging.basicConfig(filename='error.log', level=logging.DEBUG)
app.run(host='0.0.0.0', port=port, threaded=True)
|
lambda_executors.py | import os
import re
import json
import time
import logging
import threading
import subprocess
import six
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote # for Python 2.7
from localstack import config
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file,
to_str, run, cp_r, json_safe, get_free_tcp_port)
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
LAMBDA_RUNTIME_NODEJS = 'nodejs'
LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
LAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'
LAMBDA_RUNTIME_JAVA8 = 'java8'
LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
LAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_RUNTIME_RUBY = 'ruby'
LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
LAMBDA_RUNTIME_CUSTOM_RUNTIME = 'provided'
LAMBDA_EVENT_FILE = 'event_file.json'
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the _execute method """
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
def do_execute(*args):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
try:
result, log_output = self._execute(func_arn, func_details, event, context, version)
finally:
self.function_invoke_times[func_arn] = invocation_time
# forward log output to cloudwatch logs
self._store_logs(func_details, log_output, invocation_time)
# return final result
return result, log_output
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug('Lambda executed in Event (asynchronous) mode, no response from this '
'function will be returned to caller')
FuncThread(do_execute).start()
return None, 'Lambda executed asynchronously.'
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
""" This method must be overwritten by subclasses. """
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def _store_logs(self, func_details, log_output, invocation_time):
if not aws_stack.is_service_enabled('logs'):
return
logs_client = aws_stack.connect_to_service('logs')
log_group_name = '/aws/lambda/%s' % func_details.name()
time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time))
log_stream_name = '%s/[$LATEST]%s' % (time_str, short_uid())
# make sure that the log group exists
log_groups = logs_client.describe_log_groups()['logGroups']
log_groups = [lg['logGroupName'] for lg in log_groups]
if log_group_name not in log_groups:
try:
logs_client.create_log_group(logGroupName=log_group_name)
except Exception as e:
if 'ResourceAlreadyExistsException' in str(e):
# this can happen in certain cases, possibly due to a race condition
pass
else:
raise e
# create a new log stream for this lambda invocation
logs_client.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name)
# store new log events under the log stream
invocation_time = invocation_time
finish_time = int(time.time() * 1000)
log_lines = log_output.split('\n')
time_diff_per_line = float(finish_time - invocation_time) / float(len(log_lines))
log_events = []
for i, line in enumerate(log_lines):
if not line:
continue
# simple heuristic: assume log lines were emitted in regular intervals
log_time = invocation_time + float(i) * time_diff_per_line
event = {'timestamp': int(log_time), 'message': line}
log_events.append(event)
if not log_events:
return
logs_client.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=log_events
)
def run_lambda_executor(self, cmd, event=None, env_vars={}):
process = run(cmd, asynchronous=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE, env_vars=env_vars,
stdin=True)
result, log_output = process.communicate(input=event)
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, six.string_types) and '\n' in result:
additional_logs, _, result = result.rpartition('\n')
log_output += '\n%s' % additional_logs
if return_code != 0:
raise Exception('Lambda process returned error status code: %s. Output:\n%s' %
(return_code, log_output))
return result, log_output
class ContainerInfo:
"""
Contains basic information about a docker container.
"""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
raise Exception('Not implemented')
def _docker_cmd(self):
""" Return the string to be used for running Docker commands. """
return config.DOCKER_CMD
def prepare_event(self, environment, event_body):
""" Return the event as a stdin string. """
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body
return None
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = func_details.envvars.copy()
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
stdin = self.prepare_event(environment, event_body)
docker_host = config.DOCKER_HOST_FROM_CONTAINER
environment['HOSTNAME'] = docker_host
environment['LOCALSTACK_HOSTNAME'] = docker_host
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
# custom command to execute in the container
command = ''
# if running a Java Lambda, set up classpath arguments
if runtime == LAMBDA_RUNTIME_JAVA8:
java_opts = Util.get_java_opts()
stdin = None
# copy executor jar into temp directory
target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
if not os.path.exists(target_file):
cp_r(LAMBDA_EXECUTOR_JAR, target_file)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
save_file(os.path.join(lambda_cwd, LAMBDA_EVENT_FILE), event_body)
command = ("bash -c 'cd %s; java %s -cp \".:`ls *.jar | tr \"\\n\" \":\"`\" \"%s\" \"%s\" \"%s\"'" %
(taskdir, java_opts, LAMBDA_EXECUTOR_CLASS, handler, LAMBDA_EVENT_FILE))
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
LOG.debug('Running lambda cmd: %s' % cmd)
result, log_output = self.run_lambda_executor(cmd, stdin, environment)
log_formatted = log_output.strip().replace('\n', '\n> ')
LOG.debug('Lambda %s result / log output:\n%s\n>%s' % (func_arn, result.strip(), log_formatted))
return result, log_output
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
docker_cmd = self._docker_cmd()
event_file = os.path.join(lambda_cwd, LAMBDA_EVENT_FILE)
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
copy_command = '%s cp "%s/." "%s:/var/task";' % (docker_cmd, lambda_cwd, container_info.name)
elif os.path.exists(event_file):
# otherwise, copy only the event file if it exists
copy_command = '%s cp "%s" "%s:/var/task";' % (docker_cmd, event_file, container_info.name)
cmd = (
'%s'
' %s exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)
LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)
return cmd
def startup(self):
self.cleanup()
# start a process to remove idle containers
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_runtime(runtime)
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
mount_volume = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
mount_volume_str = '-v "%s":/var/task' % lambda_cwd_on_host if mount_volume else ''
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'%s create'
' --rm'
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' %s'
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' %s' # env_vars
' %s' # network
' %s'
) % (docker_cmd, container_name, mount_volume_str, env_vars_str, network_str, docker_image)
LOG.debug(cmd)
run(cmd)
if not mount_volume:
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'%s cp'
' "%s/." "%s:/var/task"'
) % (docker_cmd, lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd)
LOG.debug('Starting container: %s' % container_name)
cmd = '%s start %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
cmd = (
'%s image inspect'
' --format="{{ .ContainerConfig.Entrypoint }}"'
' %s'
) % (docker_cmd, docker_image)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
container_network = self.get_docker_container_network(func_arn)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
docker_cmd = self._docker_cmd()
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = (
'%s stop -t0 %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = (
'%s rm %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = '%s ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"' % self._docker_cmd()
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ("docker ps -a --filter name='%s' "
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'%s inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (docker_cmd, container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def prepare_event(self, environment, event_body):
# Tell Lambci to use STDIN for the event
environment['DOCKER_LAMBDA_USE_STDIN'] = '1'
return event_body.encode()
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
docker_cmd = self._docker_cmd()
docker_image = Util.docker_image_for_runtime(runtime)
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(%s create -i'
' %s'
' %s'
' %s'
' %s' # network
' --rm'
' %s %s'
')";'
'%s cp "%s/." "$CONTAINER_ID:/var/task"; '
'%s start -ai "$CONTAINER_ID";'
) % (docker_cmd, entrypoint, debug_docker_java_port, env_vars_string, network_str,
docker_image, command,
docker_cmd, lambda_cwd,
docker_cmd)
else:
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'%s run -i'
' %s -v "%s":/var/task'
' %s'
' %s' # network
' --rm'
' %s %s'
) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string,
network_str, docker_image, command)
return cmd
class LambdaExecutorLocal(LambdaExecutor):
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
environment = func_details.envvars.copy()
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
if lambda_cwd:
os.chdir(lambda_cwd)
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
queue.put(result)
process = Process(target=do_execute)
with CaptureOutput() as c:
process.run()
result = queue.get()
# TODO: Interweaving stdout/stderr currently not supported
log_output = ''
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ('\n' if log_output else '') + stream
return result, log_output
def execute_java_lambda(self, event, context, handler, main_file):
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(event))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file)
cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
result, log_output = self.run_lambda_executor(cmd)
LOG.debug('Lambda result / log output:\n%s\n> %s' % (
result.strip(), log_output.strip().replace('\n', '\n> ')))
return result, log_output
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ''
if '_debug_port_' in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
@classmethod
def docker_image_for_runtime(cls, runtime):
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
if docker_image == 'lambci/lambda':
docker_tag = '20191117-%s' % docker_tag
return '"%s:%s"' % (docker_image, docker_tag)
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_LOCAL
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
logger.py | # importing the module
import json
import os
import argparse
import time
import threading
import requests
import utils as ut
from datetime import datetime,timezone
openweather = False
secrets = False
warning = False
if "secrets" in os.environ:
if os.path.exists(os.environ["secrets"]):
with open(os.environ["secrets"]) as s:
secrets = json.load(s)
if "openweather" in secrets:
openweather = secrets["openweather"]
if "warnings" in secrets:
warning = secrets["warnings"]
class SessionLogger():
def __init__(self):
self.timer_flag = False
self.timer = threading.Thread(target=self.fun_timer, name="Logging") #,daemon="True")
self.current_s = ut.LOG_FILE
self.ferm_high = 90
self.ferm_low = 50
if openweather:
self.weather_url = openweather["apiurl"] + \
"units=" + openweather["units"] + \
"&zip=" + str(openweather["zip_code"]) + "," + openweather["contry_code"] + \
"&APPID=" + openweather["apikey"]
else:
self.weather_url = "http://wttr.in/?format=j1"
def fun_timer(self):
while self.timer_flag:
data = self.get_current()
if "recipe" in data:
if "fermentation" in data["recipe"]:
if "temperatures" in data["recipe"]["fermentation"]:
print(data["recipe"]["fermentation"])
self.ferm_high = data["recipe"]["fermentation"]["temperatures"]["high"]
self.ferm_low = data["recipe"]["fermentation"]["temperatures"]["low"]
new_read = self.get_readings()
new_data = data["session"]["readings"].append(new_read)
self.update_file(json.dumps(data))
print("logged: {}".format(new_read))
time.sleep(1800)
#time.sleep(60)
def start(self):
self.timer_flag = True
self.timer.start()
def stop(self):
self.timer_flag = False
self.timer.join()
def update_file(self, data):
#print(data)
with open(self.current_s, 'w') as f:
f.write(data)
def get_current(self):
data = {}
with open(self.current_s, 'r') as f:
data = json.load(f)
#print(data)
return data
def get_readings(self):
now = ut.get_now()
wort = ut.ds18b20_to_display()
env = ut.dht22_to_display()
cpu = ut.get_cpu_temp()
weather = self.get_weather_info()
if openweather:
current_conditions = weather
else:
current_conditions = weather['current_condition'][0]
new_read = { "date": now,
"env": { "celsius": env["celsius"], "fahrenheit": env["fahrenheit"] },
"wort": { "celsius": wort["celsius"], "fahrenheit": wort["fahrenheit"] },
"cpu": { "celsius": round(cpu,2), "fahrenheit": round(ut.c_to_f(cpu),2) },
"humidity": env["humidity"],
"current_conditions": current_conditions,
}
self.check_thresholds(wort["fahrenheit"])
return new_read
def get_weather_info(self):
#weather_info = json.loads(requests.get(self.weather_url).text)
weather_info = {}
try:
weather_info = requests.get(self.weather_url,timeout=2).json()
except Exception as e:
print(e)
weather_info["current_condition"] = ['N/A']
weather_info["nearest_area"] = ['N/A']
#print(weather_info)
return weather_info
def check_thresholds(self,reading):
if warning:
if reading > warning["threshold_up"] or reading < warning["threshold_down"] or reading < self.ferm_low or reading > self.ferm_high:
warning["subject"] = "Wort reading reaching threshold"
warning["message"] = "Warning, wort tempeture reaching threshold temperaturess\n\nCheck on it:\n\tWort temp: {}".format(reading)
ut.send_email(warning)
if __name__ == "__main__":
session = SessionLogger()
parser = argparse.ArgumentParser()
parser.add_argument("action")
args = parser.parse_args()
if args.action == "start":
print("Starting logging session")
session.start()
elif args.action == "stop":
print("Stoping session")
session.stop()
|
main.py | from test_process import test_process
from agent import Agent
from torch.utils.tensorboard import SummaryWriter
import torch.multiprocessing as mp
import gym
import time
#-----------------------------PARAMETERS-----------------------------
HYPERPARAMETERS = {
'learning_rate': 0.003,
'gamma': 0.99,
'random_seed': 12,
'baseline': True,
'test_counter': 8,
'env_name': 'CartPole-v1',
'writer_test': True,
'writer_train': False,
'writer_log_dir': 'content/runs/REINFORCE-3232-3-baseline-seed=-1',
'max_train_games': 5000,
'max_test_games': 10,
'print_test_results': True
}
#--------------------------------------------------------------------
if __name__ == '__main__':
# Create TensorBoard writer that will create graphs
writer_train = SummaryWriter(log_dir=HYPERPARAMETERS['writer_log_dir'] + str(time.time())) if HYPERPARAMETERS['writer_train'] else None
# Create enviroment
env = gym.make(HYPERPARAMETERS['env_name'])
# Initialize the policy parameter θ at random.
agent = Agent(env=env, hyperparameters=HYPERPARAMETERS, writer=writer_train)
# End flag will be controlled by test which will indicate whether train and main process should termiante
end_flag = mp.Value('i', 0)
# Episode flag will be shared value of number of episodes done by algorithm
episode = mp.Value('i', 0)
# Wait queue forces test process to test NN parameters only when main says so
wait_queue = mp.Queue()
wait_queue.put(0)
# To be sure test process started we create queue which will block main until test is ready
wait_first_test = mp.Queue()
# Create and run test process
p = mp.Process(target=test_process, args=(HYPERPARAMETERS, agent.get_policy_nn(), end_flag, wait_queue, episode, wait_first_test))
p.start()
# Remembers last episode we run test process so we dont run test process more than once in same episode
last_ep = 0
obs = env.reset()
ep_num = 0
# Main will be blocked until test is ready
wait_first_test.get()
while ep_num < HYPERPARAMETERS['max_train_games']:
# If test process have signalized that we reached neccecary goal (end_flag is shared variable)
if end_flag.value == 1:
break
env.render()
# Give current state to NN and get action from it
action = agent.select_action(obs)
# Take that action and retreive next state, reward and is it terminal state
new_obs, reward, done, _ = env.step(action)
if done:
reward = -20
# Until we reach end of episode, store transitions
agent.add_to_buffer(obs, action, new_obs, reward)
obs = new_obs
if done:
# For each step in episode we need to estimate return Gt and update policy parameters
agent.improve_params()
avg_reward = agent.reset_values(ep_num)
obs = env.reset()
ep_num += 1
episode.value += 1
# Let test process test new parameters
wait_queue.put(0)
# Wait for test process to end before terminating main
p.join()
if writer_train is not None:
writer_train.close()
env.close()
# !tensorboard --logdir "D:\Users\Leon Jovanovic\Documents\Computer Science\Reinforcement Learning\deep-reinforcement-learning-pg-cartpole\reinforce\content\runs" --host=127.0.0.1
# !tensorboard --inspect --logdir "D:\Users\Leon Jovanovic\Documents\Computer Science\Reinforcement Learning\deep-reinforcement-learning-pg-cartpole\reinforce\content\runs"
|
test_chatcommunicate.py | import chatcommunicate
import chatcommands
from globalvars import GlobalVars
import collections
import io
import os
import os.path
import pytest
import threading
import time
from fake import Fake
from unittest.mock import Mock, patch
def test_parse_room_config():
chatcommunicate.parse_room_config("test/test_rooms.yml")
assert ("stackexchange.com", 11540) in chatcommunicate._command_rooms
assert ("stackexchange.com", 30332) in chatcommunicate._command_rooms
assert ("stackoverflow.com", 111347) in chatcommunicate._command_rooms
assert ("stackexchange.com", 3) not in chatcommunicate._command_rooms
assert ("stackexchange.com", 54445) not in chatcommunicate._command_rooms
assert ("meta.stackexchange.com", 89) not in chatcommunicate._command_rooms
assert ("stackexchange.com", 11540) in chatcommunicate._watcher_rooms
assert ("stackexchange.com", 3) in chatcommunicate._watcher_rooms
assert ("meta.stackexchange.com", 89) in chatcommunicate._watcher_rooms
assert ("stackexchange.com", 30332) not in chatcommunicate._watcher_rooms
assert ("stackexchange.com", 54445) not in chatcommunicate._watcher_rooms
assert ("stackoverflow.com", 111347) not in chatcommunicate._watcher_rooms
assert chatcommunicate._privileges[("stackexchange.com", 11540)] == {1, 16070}
assert chatcommunicate._privileges[("stackexchange.com", 30332)] == set()
assert chatcommunicate._privileges[("stackexchange.com", 3)] == set()
assert chatcommunicate._privileges[("stackexchange.com", 54445)] == set()
assert chatcommunicate._privileges[("meta.stackexchange.com", 89)] == {42}
assert chatcommunicate._privileges[("stackoverflow.com", 111347)] == {1337, 256, 4766556}
assert len(chatcommunicate._room_roles) == 5
assert chatcommunicate._room_roles["debug"] == {("stackexchange.com", 11540)}
assert chatcommunicate._room_roles["all"] == {("stackexchange.com", 11540),
("stackexchange.com", 54445),
("stackoverflow.com", 111347)}
assert chatcommunicate._room_roles["metatavern"] == {("meta.stackexchange.com", 89)}
assert chatcommunicate._room_roles["delay"] == {("meta.stackexchange.com", 89)}
assert chatcommunicate._room_roles["no-all-caps title"] == {("meta.stackexchange.com", 89)}
@patch("chatcommunicate.threading.Thread")
@patch("chatcommunicate.Client")
@patch("chatcommunicate.parse_room_config")
def test_init(room_config, client_constructor, thread):
client = Mock()
client_constructor.return_value = client
client.login.side_effect = Exception()
threw_exception = False
try:
chatcommunicate.init("shoutouts", "to simpleflips")
except Exception as e:
assert str(e) == "Failed to log into stackexchange.com"
threw_exception = True
assert threw_exception
client.login.side_effect = None
client.login.reset_mock()
client_constructor.reset_mock()
room_config.side_effect = lambda _: room_config.get_original()("test/test_rooms.yml")
GlobalVars.standby_mode = True
chatcommunicate.init("shoutouts", "to simpleflips")
assert len(chatcommunicate._rooms) == 0
assert client.login.call_count == 3
assert client_constructor.call_count == 3
client_constructor.assert_any_call("stackexchange.com")
client_constructor.assert_any_call("stackoverflow.com")
client_constructor.assert_any_call("meta.stackexchange.com")
assert thread.call_count == 2
thread.assert_any_call(name="pickle ---rick--- runner", target=chatcommunicate.pickle_last_messages, daemon=True)
thread.assert_any_call(name="message sender", target=chatcommunicate.send_messages, daemon=True)
client.login.reset_mock()
client_constructor.reset_mock()
thread.reset_mock()
GlobalVars.standby_mode = False
counter = 0
def throw_every_other(*_):
nonlocal counter
counter += 1
if counter & 1:
raise Exception()
client.login.side_effect = throw_every_other
chatcommunicate.init("shoutouts", "to simpleflips")
assert client.login.call_count == 6
assert counter == 6
assert client_constructor.call_count == 3
client_constructor.assert_any_call("stackexchange.com")
client_constructor.assert_any_call("stackoverflow.com")
client_constructor.assert_any_call("meta.stackexchange.com")
assert thread.call_count == 2
thread.assert_any_call(name="pickle ---rick--- runner", target=chatcommunicate.pickle_last_messages, daemon=True)
thread.assert_any_call(name="message sender", target=chatcommunicate.send_messages, daemon=True)
assert len(chatcommunicate._rooms) == 3
assert chatcommunicate._rooms[("stackexchange.com", 11540)].deletion_watcher is True
assert chatcommunicate._rooms[("stackexchange.com", 30332)].deletion_watcher is False
assert chatcommunicate._rooms[("stackoverflow.com", 111347)].deletion_watcher is False
@pytest.mark.skipif(os.path.isfile("messageData.p"), reason="shouldn't overwrite file")
@patch("chatcommunicate.pickle.dump")
def test_pickle_rick(dump):
try:
threading.Thread(target=chatcommunicate.pickle_last_messages, daemon=True).start()
chatcommunicate._pickle_run.set()
# Yield to the pickling thread until it acquires the lock again
while len(chatcommunicate._pickle_run._cond._waiters) == 0:
time.sleep(0)
assert dump.call_count == 1
call, _ = dump.call_args_list[0]
assert isinstance(call[0], chatcommunicate.LastMessages)
assert isinstance(call[1], io.IOBase) and call[1].name == "messageData.p"
finally:
os.remove("messageData.p")
@patch("chatcommunicate._pickle_run")
def test_message_sender(pickle_rick):
chatcommunicate._last_messages = chatcommunicate.LastMessages({}, collections.OrderedDict())
threading.Thread(target=chatcommunicate.send_messages, daemon=True).start()
room = chatcommunicate.RoomData(Mock(), -1, False)
room.room.id = 11540
room.room._client.host = "stackexchange.com"
room.room._client._do_action_despite_throttling.return_value = Fake({"json": lambda: {"id": 1}})
chatcommunicate._msg_queue.put((room, "test", None))
while not chatcommunicate._msg_queue.empty():
time.sleep(0)
room.room._client._do_action_despite_throttling.assert_called_once_with(("send", 11540, "test"))
room.room.reset_mock()
assert chatcommunicate._last_messages.messages[("stackexchange.com", 11540)] == collections.deque((1,))
room.room.id = 30332
room.room._client._do_action_despite_throttling.return_value = Fake({"json": lambda: {"id": 2}})
chatcommunicate._msg_queue.put((room, "test", "did you hear about what happened to pluto"))
while not chatcommunicate._msg_queue.empty():
time.sleep(0)
room.room._client._do_action_despite_throttling.assert_called_once_with(("send", 30332, "test"))
assert chatcommunicate._last_messages.messages[("stackexchange.com", 11540)] == collections.deque((1,))
assert chatcommunicate._last_messages.reports == collections.OrderedDict({("stackexchange.com", 2): "did you hear about what happened to pluto"})
@patch("chatcommunicate._msg_queue.put")
@patch("chatcommunicate.get_last_messages")
def test_on_msg(get_last_messages, post_msg):
client = Fake({
"_br": {
"user_id": 1337
},
"host": "stackexchange.com"
})
room_data = chatcommunicate.RoomData(Mock(), -1, False)
chatcommunicate._rooms[("stackexchange.com", 11540)] = room_data
chatcommunicate.on_msg(Fake({}, spec=chatcommunicate.events.MessageStarred), None) # don't reply to events we don't care about
msg1 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1,
},
"parent": None,
"content": "shoutouts to simpleflips"
}
}, spec=chatcommunicate.events.MessagePosted)
chatcommunicate.on_msg(msg1, client)
msg2 = Fake({
"message": {
"room": {
"id": 11540
},
"owner": {
"id": 1337
},
"id": 999,
"parent": None,
"content": "!!/not_actually_a_command"
}
}, spec=chatcommunicate.events.MessagePosted)
chatcommunicate.on_msg(msg2, client)
msg3 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"id": 999,
"parent": None,
"content": "!!/a_command"
}
}, spec=chatcommunicate.events.MessagePosted)
mock_command = Mock(side_effect=lambda *_, **kwargs: "hi" if not kwargs["quiet_action"] else "")
chatcommunicate._commands["prefix"]["a_command"] = (mock_command, (0, 0))
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 hi"
mock_command.assert_called_once_with(original_msg=msg3.message, alias_used="a_command", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command-"
chatcommunicate.on_msg(msg3, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with(original_msg=msg3.message, alias_used="a_command", quiet_action=True)
post_msg.reset_mock()
mock_command.reset_mock()
chatcommunicate._commands["prefix"]["a_command"] = (mock_command, (0, 1))
chatcommunicate.on_msg(msg3, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with(None, original_msg=msg3.message, alias_used="a_command", quiet_action=True)
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command 1 2 3"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 hi"
mock_command.assert_called_once_with("1 2 3", original_msg=msg3.message, alias_used="a_command", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
chatcommunicate._commands["prefix"]["a_command"] = (mock_command, (1, 2))
msg3.message.content = "!!/a_command"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 Too few arguments."
mock_command.assert_not_called()
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command 1 2 oatmeal"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 Too many arguments."
mock_command.assert_not_called()
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command- 1 2"
chatcommunicate.on_msg(msg3, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with("1", "2", original_msg=msg3.message, alias_used="a_command", quiet_action=True)
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command 3"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 hi"
mock_command.assert_called_once_with("3", None, original_msg=msg3.message, alias_used="a_command", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
msg4 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"parent": {
"owner": {
"id": 2
}
},
"id": 1000,
"content": "asdf"
}
}, spec=chatcommunicate.events.MessageEdited)
chatcommunicate.on_msg(msg4, client)
msg5 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"parent": {
"owner": {
"id": 1337
}
},
"id": 1000,
"content": "@SmokeDetector why "
}
}, spec=chatcommunicate.events.MessageEdited)
chatcommunicate._commands["reply"]["why"] = (mock_command, (0, 0))
threw_exception = False
try:
chatcommunicate.on_msg(msg5, client)
except AssertionError:
threw_exception = True
assert threw_exception
mock_command.assert_not_called()
post_msg.assert_not_called()
chatcommunicate._commands["reply"]["why"] = (mock_command, (1, 1))
chatcommunicate.on_msg(msg5, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":1000 hi"
mock_command.assert_called_once_with(msg5.message.parent, original_msg=msg5.message, alias_used="why", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
msg5.message.content = "@SmokeDetector why@!@#-"
chatcommunicate.on_msg(msg5, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with(msg5.message.parent, original_msg=msg5.message, alias_used="why", quiet_action=True)
msg6 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"id": 1000,
"parent": None,
"content": "sd why - 2why 2why- 2- why- "
}
}, spec=chatcommunicate.events.MessageEdited)
get_last_messages.side_effect = lambda _, num: (Fake({"id": i}) for i in range(num))
chatcommunicate.on_msg(msg6, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":1000 [:0] hi\n[:1] <skipped>\n[:2] hi\n[:3] hi\n[:4] <processed without return value>\n[:5] <processed without return value>\n[:6] <skipped>\n[:7] <skipped>\n[:8] <processed without return value>"
def test_message_type():
fake1 = Fake({}, spec=chatcommunicate.Message)
assert chatcommands.message(fake1) == fake1
fake2 = Fake({})
threw_exception = False
try:
chatcommands.message(fake2)
except AssertionError:
threw_exception = True
assert threw_exception
|
multithread_server.py | from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
from os import urandom
from base64 import b64encode
from encryption import *
from time import gmtime, strftime
import rsa,os,shutil
KEY_LENGTH = 1024
BUFFER_SIZE = 8192
PORT = 5003
SYSTEM_MESSAGE_PREFIX = "SYSTEM"
INVALID_NAME_MESSAGE = "The user name {0} is invalid, please try a new one"
JOINED_CHAT_MESSAGE = "User {0} has joined the chat."
LEFT_CHAT_MESSAGE = "User {0} has left the chat. >>"
FILENAME = 'test.py'
VERSION = 1
FOLDER = os.path.dirname(os.path.realpath(__file__)) + '\\server_files\\'
class Client:
'''
a abstract class that includes all informations that server needs to connect with the client
'''
def __init__(self, name, socket, address, aeskey):
self.name = name
self.socket = socket
self.address = address
self.aeskey = aeskey
def accept():
'''
server's welcome method
'''
while True:
sock, addr = SERVER_SOCKET.accept()
Thread(target=welcome, args=(sock, addr,)).start()
def welcome(socket, address):
disconnect_cnt = 0
connect_cnt = 3
# generate random aeskey of 16
# note the differences between b64 decoding
aeskey = b64encode(urandom(12)).decode('utf-8')
global FILENAME
global FOLDER
while connect_cnt != 0:
# string data
if (connect_cnt != 1 ): #no msg needed for that prase
data = socket.recv(BUFFER_SIZE).decode()
if connect_cnt == 3 and data.startswith('-----BEGIN RSA PUBLIC KEY-----'):
#obtain public key from the user send encrypted aes key
pubkey = rsa.PublicKey.load_pkcs1(data.encode())
data = encrypt_rsa(aeskey, pubkey)
socket.send(data)
connect_cnt -= 1
elif connect_cnt == 2:
name = decrypt_aes(data, aeskey)
print(name + ' connected')
# TODO: name validation
clients[socket] = Client(name, socket, address, aeskey)
broadcast(JOINED_CHAT_MESSAGE.format(name), SYSTEM_MESSAGE_PREFIX)
connect_cnt -= 1
elif connect_cnt == 1: #transfer file
FILE = open(FOLDER + FILENAME,'rb')
filemsg = '{0}*{1}'.format(FILENAME,FILE.read().decode('utf-8'))
socket.send(encrypt_aes(filemsg, aeskey))
FILE.close()
connect_cnt -= 1
else:
# invalid connection without setting up phase
disconnect_cnt += 1
if disconnect_cnt > 20:
socket.send("Invalid connection.")
del clients[socket]
socket.close()
return
session(name, socket, aeskey)
# not implemented
def session(name, socket, aeskey):
'''
handles the communication with the client
'''
while True:
msg = decrypt_aes(socket.recv(BUFFER_SIZE).decode('utf-8'), aeskey)
if msg.startswith('PUSH*'):
print('push recieved')
global VERSION
global FOLDER
f = open(FOLDER + FILENAME.rsplit('.',1)[0] + '_v' + str(VERSION) + '.' + FILENAME.rsplit('.',1)[1], 'w+') #FILENAME.rsplit('.',1)[*] splits the extension and filename
f.write(msg.split('*',1)[1])
f.close()
VERSION += 1
broadcast(msg,mode = 'CUSTOM')
elif msg == 'QUIT':
send_aes_encrypted(socket, "Quitting", aeskey)
socket.close()
del clients[socket]
broadcast(LEFT_CHAT_MESSAGE.format(name), SYSTEM_MESSAGE_PREFIX)
break
else:
broadcast(msg, name)
def broadcast(msg, prefix = "", mode = 'DEFAULT'):
'''
broadcast all messages to all the other clients
'''
if mode == 'DEFAULT':
for socket,client in clients.items():
send_aes_encrypted(socket, prefix + ' ' + strftime("%Y-%m-%d %H:%M:%S", gmtime()) + '\n' + msg, client.aeskey)
print('broadcasting: {0}'.format(msg))
elif mode == 'CUSTOM':
for socket,client in clients.items():
send_aes_encrypted(socket, msg, client.aeskey)
print('broadcasting: {0}'.format(msg))
def send_aes_encrypted(socket, msg, aeskey):
'''
send aes encrypted message to the given socket
'''
socket.send(encrypt_aes(msg, aeskey))
def emptyFolder(dir,filename = FILENAME):
for the_file in os.listdir(dir):
if the_file == filename:
pass
else:
file_path = os.path.join(dir, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
#elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
# the dictionary indexed by sockets and stores a Client typed client informations
clients = {}
HOST = '127.0.0.1'
ADDR = (HOST, PORT)
SERVER_SOCKET = socket(AF_INET, SOCK_STREAM)
SERVER_SOCKET.bind(ADDR)
if __name__ == "__main__":
SERVER_SOCKET.listen(5)
emptyFolder(FOLDER)
print("Waiting for connection...")
try:
ACCEPT_THREAD = Thread(target=accept)
ACCEPT_THREAD.start()
except KeyboardInterrupt:
print('keyboardInterrupted!')
ACCEPT_THREAD.join()
SERVER_SOCKET.close()
|
instance.py | """CloudMan worker instance class"""
import datetime as dt
import json
import logging
import logging.config
import threading
import time
from boto.exception import BotoServerError
from boto.exception import EC2ResponseError
from cm.services import ServiceRole
from cm.services import ServiceType
from cm.util import Time, instance_lifecycle, instance_states, misc, spot_states
from cm.util.decorators import TestFlag
log = logging.getLogger('cloudman')
# Time well in the past to seed reboot and last comm times with.
TIME_IN_PAST = dt.datetime(2012, 1, 1, 0, 0, 0)
class Instance(object):
def __init__(self, app, inst=None, m_state=None, last_m_state_change=None,
reboot_required=False, spot_request_id=None):
self.app = app
self.config = app.config
self.spot_request_id = spot_request_id
self.lifecycle = instance_lifecycle.SPOT if self.spot_request_id else instance_lifecycle.ONDEMAND
self.inst = inst # boto object of the instance
self.spot_state = None
self.private_ip = None
self.public_ip = None
self.local_hostname = None
if inst:
try:
self.id = str(inst.id)
except EC2ResponseError, e:
log.error("Error retrieving instance id: %s" % e)
else:
self.id = None
# Machine state as obtained from the cloud middleware (see
# instance_states Bunch)
self.m_state = m_state
self.last_m_state_change = Time.now()
# A time stamp when the most recent update of the instance state
# (m_state) took place
self.last_state_update = Time.now()
self.is_alive = False
self.num_cpus = 1
self.total_memory = 1 # in bytes
self.time_rebooted = TIME_IN_PAST # Initialize to a date in the past
self.reboot_count = 0
self.terminate_attempt_count = 0
self.last_comm = TIME_IN_PAST # Initialize to a date in the past
self.nfs_data = 0
self.nfs_tools = 0
self.nfs_indices = 0
self.nfs_sge = 0
self.nfs_tfs = 0 # Transient file system, NFS-mounted from the master
self.get_cert = 0
self.sge_started = 0
self.slurmd_running = 0
# NodeName by which this instance is tracked in Slurm
self.alias = 'w{0}'.format(self.app.number_generator.next())
self.worker_status = 'Pending' # Pending, Wake, Startup, Ready, Stopping, Error
self.load = 0
self.type = 'Unknown'
self.reboot_required = reboot_required
self.update_spot()
def __repr__(self):
return self.get_desc()
def maintain(self):
""" Based on the state and status of this instance, try to do the right thing
to keep the instance functional. Note that this may lead to terminating
the instance.
"""
def reboot_terminate_logic():
""" Make a decision whether to terminate or reboot an instance.
CALL THIS METHOD CAREFULLY because it defaults to terminating the
instance!
"""
if self.reboot_count < self.config.instance_reboot_attempts:
self.reboot()
elif self.terminate_attempt_count >= self.config.instance_terminate_attempts:
log.info("Tried terminating instance {0} {1} times but was unsuccessful. Giving up."
.format(self.inst.id, self.config.instance_terminate_attempts))
self._remove_instance()
else:
log.info("Instance {0} not responding after {1} reboots. Terminating instance."
.format(self.id, self.reboot_count))
self.terminate()
# Update state then do resolution
state = self.get_m_state()
if state == instance_states.PENDING or state == instance_states.SHUTTING_DOWN:
if (Time.now() - self.last_m_state_change).seconds > self.config.instance_state_change_wait and \
(Time.now() - self.time_rebooted).seconds > self.config.instance_reboot_timeout:
log.debug("'Maintaining' instance {0} stuck in '{1}' state.".format(
self.get_desc(), state))
reboot_terminate_logic()
elif state == instance_states.ERROR:
log.debug("'Maintaining' instance {0} in '{1}' state.".format(self.get_desc(), instance_states.ERROR))
reboot_terminate_logic()
elif state == instance_states.TERMINATED:
log.debug("'Maintaining' instance {0} in '{1}' state.".format(self.get_desc(), instance_states.TERMINATED))
self._remove_instance()
elif state == instance_states.RUNNING:
log.debug("'Maintaining' instance {0} in '{1}' state (last comm before {2} | "
"last m_state change before {3} | time_rebooted before {4}"
.format(self.get_desc(), instance_states.RUNNING,
dt.timedelta(seconds=(Time.now() - self.last_comm).seconds),
dt.timedelta(seconds=(Time.now() - self.last_m_state_change).seconds),
dt.timedelta(seconds=(Time.now() - self.time_rebooted).seconds)))
if (Time.now() - self.last_comm).seconds > self.config.instance_comm_timeout and \
(Time.now() - self.last_m_state_change).seconds > self.config.instance_state_change_wait and \
(Time.now() - self.time_rebooted).seconds > self.config.instance_reboot_timeout:
reboot_terminate_logic()
@TestFlag(None)
def get_cloud_instance_object(self, deep=False):
""" Get the instance object for this instance from the library used to
communicate with the cloud middleware. In the case of boto, this
is the boto EC2 Instance object.
:type deep: bool
:param deep: If True, force the check with the cloud middleware; else
use local field by default
:rtype: boto.ec2.instance.Instance (should really be a more generic repr
but we'll wait for OCCI or something)
:return: cloud instance object for this instance
"""
if deep is True: # reset the current local instance field
self.inst = None
if self.inst is None and self.id is not None:
try:
rs = self.app.cloud_interface.get_all_instances(self.id)
if len(rs) == 0:
log.warning("Instance {0} not found on the cloud?".format(
self.id))
for r in rs:
# Update local fields
self.inst = r.instances[0]
self.id = r.instances[0].id
self.m_state = r.instances[0].state
except EC2ResponseError, e:
log.error("Trouble getting the cloud instance ({0}) object: {1}".format(self.id, e))
except Exception, e:
log.error("Error getting the cloud instance ({0}) object: {1}".format(self.id, e))
elif not self.is_spot():
log.debug(
"Cannot get cloud instance object without an instance ID?")
return self.inst
def is_spot(self):
""" Test is this Instance is a Spot instance.
:rtype: bool
:return: True if the current Instance is Spot instance, False otherwise.
"""
return self.lifecycle == instance_lifecycle.SPOT
def spot_was_filled(self):
""" For Spot-based instances, test if the spot request has been
filled (ie, an instance was started)
:rtype: bool
:return: True if this is a Spot instance and the Spot request
is in state spot_states.ACTIVE. False otherwise.
"""
self.update_spot()
if self.is_spot() and self.spot_state == spot_states.ACTIVE:
return True
return False
def get_status_dict(self):
toret = {'id': self.id,
'alias': self.alias,
'ld': self.load,
'time_in_state': misc.format_seconds(Time.now() - self.last_m_state_change),
'nfs_data': self.nfs_data,
'nfs_tools': self.nfs_tools,
'nfs_indices': self.nfs_indices,
'nfs_sge': self.nfs_sge,
'nfs_tfs': self.nfs_tfs,
'get_cert': self.get_cert,
'slurmd_running': self.slurmd_running,
'worker_status': self.worker_status,
'instance_state': self.m_state,
'instance_type': self.type,
'public_ip': self.public_ip}
if self.load:
lds = self.load.split(' ')
if len(lds) == 3:
toret['ld'] = "%s %s %s" % (float(lds[0]) / self.num_cpus, float(
lds[1]) / self.num_cpus, float(lds[2]) / self.num_cpus)
return toret
def get_status_array(self):
if self.m_state.lower() == "running": # For extra states.
if self.is_alive is not True:
ld = "Starting"
elif self.load:
lds = self.load.split(' ')
if len(lds) == 3:
try:
load1 = float(lds[0]) / self.num_cpus
load2 = float(lds[1]) / self.num_cpus
load3 = float(lds[2]) / self.num_cpus
ld = "%s %s %s" % (load1, load2, load3)
except Exception, e:
log.debug("Problems normalizing load: %s" % e)
ld = self.load
else:
ld = self.load
elif self.worker_status == "Ready":
ld = "Running"
return [self.id, ld, misc.format_seconds(
Time.now() - self.last_m_state_change),
self.nfs_data, self.nfs_tools, self.nfs_indices, self.nfs_sge, self.get_cert,
self.sge_started, self.worker_status]
else:
return [self.id, self.m_state,
misc.format_seconds(Time.now() - self.last_m_state_change),
self.nfs_data, self.nfs_tools, self.nfs_indices,
self.nfs_sge, self.get_cert, self.sge_started,
self.worker_status]
@TestFlag("TestInstanceID")
def get_id(self):
if self.inst is not None and self.id is None:
try:
self.inst.update()
self.id = self.inst.id
except EC2ResponseError, e:
log.error("Error retrieving instance id: %s" % e)
except Exception, e:
log.error("Exception retreiving instance object: %s" % e)
return self.id
def get_desc(self):
""" Get basic but descriptive info about this instance. Useful for logging.
"""
if self.is_spot() and not self.spot_was_filled():
return "'{sid}'".format(sid=self.spot_request_id)
# TODO : DO NOT redefine id, etc.
return "'{id}; {ip}; {sn}'".format(id=self.get_id(), ip=self.get_public_ip(),
sn=self.alias)
def reboot(self, count_reboot=True):
"""
Reboot this instance. If ``count_reboot`` is set, increment the number
of reboots for this instance (a treshold in this count leads to eventual
instance termination, see ``self.config.instance_reboot_attempts``).
"""
if self.inst is not None:
# Show reboot count only if this reboot counts toward the reboot quota
s = " (reboot #{0})".format(self.reboot_count + 1)
log.info("Rebooting instance {0}{1}.".format(self.get_desc(),
s if count_reboot else ''))
try:
self.inst.reboot()
self.time_rebooted = Time.now()
except EC2ResponseError, e:
log.error("Trouble rebooting instance {0}: {1}".format(self.get_desc(), e))
else:
log.debug("Attampted to reboot instance {0} but no instance object? "
"(doing nothing)".format(self.get_desc()))
if count_reboot:
# Increment irespective of success to allow for eventual termination
self.reboot_count += 1
log.debug("Incremented instance reboot count to {0} (out of {1})"
.format(self.reboot_count, self.config.instance_reboot_attempts))
def terminate(self):
self.worker_status = "Stopping"
t_thread = threading.Thread(target=self.__terminate)
t_thread.start()
return t_thread
def __terminate(self):
inst_terminated = self.app.cloud_interface.terminate_instance(
instance_id=self.id,
spot_request_id=self.spot_request_id if self.is_spot() else None)
self.terminate_attempt_count += 1
if inst_terminated is False:
log.error("Terminating instance %s did not go smoothly; instance state: '%s'"
% (self.get_desc(), self.get_m_state()))
else:
# Remove the reference to the instance object because with OpenStack &
# boto the instance.update() method returns the instance as being
# in 'running' state even though the instance does not even exist
# any more.
self.inst = None
self._remove_instance()
def _remove_instance(self, force=False):
""" A convenience method to remove the current instance from the list
of worker instances tracked by the master object.
:type force: bool
:param force: Indicate if the instance should be forcefully (ie, irrespective)
of other logic) removed from the list of instances maintained
by the master object.
"""
try:
if self in self.app.manager.worker_instances:
self.app.manager.worker_instances.remove(self)
log.info(
"Instance '%s' removed from the internal instance list." % self.id)
# If this was the last worker removed, add master back as execution host.
if len(self.app.manager.worker_instances) == 0 and not self.app.manager.master_exec_host:
self.app.manager.toggle_master_as_exec_host()
except ValueError, e:
log.warning("Instance '%s' no longer in instance list, the global monitor probably "
"picked it up and deleted it already: %s" % (self.id, e))
@TestFlag("running")
def get_m_state(self):
""" Update the machine state of the current instance by querying the
cloud middleware for the instance object itself (via the instance
id) and updating self.m_state field to match the state returned by
the cloud middleware.
Also, update local last_state_update timestamp.
:rtype: String
:return: the current state of the instance as obtained from the
cloud middleware
"""
self.last_state_update = Time.now()
self.get_cloud_instance_object(deep=True)
if self.inst:
try:
state = self.inst.state
log.debug("Requested instance {0} update: old state: {1}; new state: {2}"
.format(self.get_desc(), self.m_state, state))
if state != self.m_state:
self.m_state = state
self.last_m_state_change = Time.now()
except EC2ResponseError, e:
log.debug("Error updating instance {0} state: {1}".format(
self.get_id(), e))
self.m_state = instance_states.ERROR
else:
if not self.is_spot() or self.spot_was_filled():
log.debug("Instance object {0} not found during m_state update; "
"setting instance state to {1}".format(self.get_id(), instance_states.TERMINATED))
self.m_state = instance_states.TERMINATED
return self.m_state
@TestFlag(None)
def send_alive_request(self):
self.app.manager.console_monitor.conn.send('ALIVE_REQUEST', self.id)
def send_sync_etc_host(self, msg):
"""
Send a message to instructing the worker to sync it's /etc/hosts file.
"""
# Because the hosts file is synced over the transientFS, give the FS
# some time to become available before sending the msg
if int(self.nfs_tfs):
self.app.manager.console_monitor.conn.send('SYNC_ETC_HOSTS | ' +
msg, self.id)
else:
log.debug("Transient FS on instance {0} not available (code {1}); not "
"syncing /etc/hosts".format(self.get_desc(), self.nfs_tfs))
def update_spot(self, force=False):
""" Get an update on the state of a Spot request. If the request has entered
spot_states.ACTIVE or spot_states.CANCELLED states, update the Instance
object itself otherwise just update state. The method will continue to poll
for an update until the spot request has been filled (ie, enters state
spot_states.ACTIVE). After that, simply return the spot state (see
force parameter).
:type force: bool
:param force: If True, poll for an update on the spot request,
irrespective of the stored spot request state.
"""
if self.is_spot() and (force or self.spot_state != spot_states.ACTIVE):
old_state = self.spot_state
try:
ec2_conn = self.app.cloud_interface.get_ec2_connection()
reqs = ec2_conn.get_all_spot_instance_requests(
request_ids=[self.spot_request_id])
for req in reqs:
self.spot_state = req.state
# Also update the worker_status because otherwise there's no
# single source to distinguish between simply an instance
# in Pending state and a Spot request
self.worker_status = self.spot_state
# If the state has changed, do a deeper update
if self.spot_state != old_state:
if self.spot_state == spot_states.CANCELLED:
# The request was canceled so remove this Instance
# object
log.info("Spot request {0} was canceled; removing Instance object {1}"
.format(self.spot_request_id, self.id))
self._remove_instance()
elif self.spot_state == spot_states.ACTIVE:
# We should have an instance now
self.id = req.instance_id
log.info("Spot request {0} filled with instance {1}"
.format(self.spot_request_id, self.id))
# Potentially give it a few seconds so everything gets registered
for i in range(3):
instance = self.get_cloud_instance_object()
if instance:
self.app.cloud_interface.add_tag(
instance, 'clusterName', self.app.config['cluster_name'])
self.app.cloud_interface.add_tag(
instance, 'role', 'worker')
self.app.cloud_interface.add_tag(
instance, 'Name',
"Worker: {0}".format(self.app.config['cluster_name']))
break
time.sleep(5)
except EC2ResponseError, e:
log.error("Trouble retrieving spot request {0}: {1}".format(
self.spot_request_id, e))
return self.spot_state
@TestFlag("127.0.0.1")
def get_private_ip(self):
# log.debug("Getting instance '%s' private IP: '%s'" % ( self.id, self.private_ip ) )
if self.private_ip is None:
inst = self.get_cloud_instance_object()
if inst is not None:
try:
inst.update()
self.private_ip = inst.private_ip_address
except EC2ResponseError:
log.debug("private_ip_address for instance {0} not (yet?) available."
.format(self.get_id()))
else:
log.debug("private_ip_address for instance {0} with no instance object not available."
.format(self.get_id()))
return self.private_ip
@TestFlag('127.0.0.1')
def get_public_ip(self):
"""
Get the public IP address of this worker instance.
"""
if not self.public_ip:
inst = self.get_cloud_instance_object(deep=True)
# log.debug('Getting public IP for instance {0}'.format(inst.id))
if inst:
try:
inst.update()
self.public_ip = inst.ip_address
if self.public_ip:
log.debug("Got public IP for instance {0}: {1}".format(
self.get_id(), self.public_ip))
else:
log.debug("Still no public IP for instance {0}".format(
self.get_id()))
except EC2ResponseError:
log.debug("ip_address for instance {0} not (yet?) available.".format(
self.get_id()))
except BotoServerError as bse:
log.debug("Meta-data service unavailable (msg: {0})"
.format(bse.message))
else:
log.debug("ip_address for instance {0} with no instance object not available."
.format(self.get_id()))
return self.public_ip
def get_local_hostname(self):
return self.local_hostname
def send_mount_points(self):
mount_points = []
for fs in self.app.manager.get_services(svc_type=ServiceType.FILE_SYSTEM):
if fs.nfs_fs:
fs_type = "nfs"
server = fs.nfs_fs.device
options = fs.nfs_fs.mount_options
elif fs.gluster_fs:
fs_type = "glusterfs"
server = fs.gluster_fs.device
options = fs.gluster_fs.mount_options
else:
fs_type = "nfs"
server = self.app.cloud_interface.get_private_ip()
options = None
mount_points.append(
{'fs_type': fs_type,
'server': server,
'mount_options': options,
'shared_mount_path': fs.get_details()['mount_point'],
'fs_name': fs.get_details()['name']})
jmp = json.dumps({'mount_points': mount_points})
self.app.manager.console_monitor.conn.send('MOUNT | %s' % jmp, self.id)
# log.debug("Sent mount points %s to worker %s" % (mount_points, self.id))
def send_master_pubkey(self):
# log.info("\tMT: Sending MASTER_PUBKEY message: %s" % self.app.manager.get_root_public_key() )
self.app.manager.console_monitor.conn.send(
'MASTER_PUBKEY | %s' % self.app.manager.get_root_public_key(), self.id)
log.debug("Sent master public key to worker instance '%s'." % self.id)
log.debug("\tMT: Message MASTER_PUBKEY %s sent to '%s'" % (self.app.manager.get_root_public_key(), self.id))
def send_start_slurmd(self):
log.debug("\tMT: Sending START_SLURMD message to instance {0}, named {1}"
.format(self.get_desc(), self.alias))
self.app.manager.console_monitor.conn.send('START_SLURMD | {0}'.format(
self.alias), self.id)
def send_start_sge(self):
log.debug("\tMT: Sending START_SGE message to instance '%s'" % self.id)
self.app.manager.console_monitor.conn.send('START_SGE', self.id)
def send_add_s3fs(self, bucket_name, svc_roles):
msg = 'ADDS3FS | {0} | {1}'.format(bucket_name, ServiceRole.to_string(svc_roles))
self._send_msg(msg)
def _send_msg(self, msg):
"""
An internal convenience method to log and send a message to the current instance.
"""
log.debug("\tMT: Sending message '{msg}' to instance {inst}".format(msg=msg, inst=self.id))
self.app.manager.console_monitor.conn.send(msg, self.id)
def handle_message(self, msg):
# log.debug( "Handling message: %s from %s" % ( msg, self.id ) )
self.is_alive = True
self.last_comm = Time.now()
# Transition from states to a particular response.
if self.app.manager.console_monitor.conn:
msg_type = msg.split(' | ')[0]
if msg_type == "ALIVE":
self.worker_status = "Starting"
log.info("Instance %s reported alive" % self.get_desc())
msp = msg.split(' | ')
self.private_ip = msp[1]
self.public_ip = msp[2]
self.zone = msp[3]
self.type = msp[4]
self.ami = msp[5]
try:
self.local_hostname = msp[6]
self.num_cpus = int(msp[7])
self.total_memory = int(msp[8])
self.hostname = msp[9]
except:
# Older versions of CloudMan did not pass this value so if the master
# and the worker are running 2 diff versions (can happen after an
# automatic update), don't crash here.
self.local_hostname = self.public_ip
log.debug("INSTANCE_ALIVE private_ip: %s public_ip: %s zone: %s "
"type: %s AMI: %s local_hostname: %s, CPUs: %s, hostname: %s"
% (self.private_ip, self.public_ip, self.zone,
self.type, self.ami, self.local_hostname,
self.num_cpus, self.hostname))
# Add instance IP/name to /etc/hosts
misc.add_to_etc_hosts(self.private_ip, [self.alias, self.local_hostname,
self.hostname])
# Instance is alive and responding.
self.send_mount_points()
elif msg_type == "GET_MOUNTPOINTS":
self.send_mount_points()
elif msg_type == "MOUNT_DONE":
log.debug("Got MOUNT_DONE message")
# Update the list of mount points that have mounted
if len(msg.split(' | ')) > 1:
msg_body = msg.split(' | ')[1]
try:
body = json.loads(msg_body)
mounted_fs = body.get('mounted_fs', {})
# Currently, only interested in the transient FS
self.nfs_tfs = mounted_fs.get('transient_nfs', 0)
log.debug("Got transient_nfs state on {0}: {1}".format(
self.alias, self.nfs_tfs))
except ValueError, vexc:
log.warning('ValueError trying to decode msg: {0}'
.format(vexc))
self.app.manager.sync_etc_hosts()
self.send_master_pubkey()
# Add hostname to /etc/hosts (for SGE config)
if self.app.cloud_type in ('openstack', 'eucalyptus'):
hn2 = ''
if '.' in self.local_hostname:
hn2 = (self.local_hostname).split('.')[0]
worker_host_line = '{ip} {hn1} {hn2}\n'.format(ip=self.private_ip,
hn1=self.local_hostname,
hn2=hn2)
log.debug("worker_host_line: {0}".format(worker_host_line))
with open('/etc/hosts', 'r+') as f:
hosts = f.readlines()
if worker_host_line not in hosts:
log.debug("Adding worker {0} to /etc/hosts".format(
self.local_hostname))
f.write(worker_host_line)
if self.app.cloud_type == 'opennebula':
f = open("/etc/hosts", 'a')
f.write("%s\tworker-%s\n" % (self.private_ip, self.id))
f.close()
# log.debug("Update /etc/hosts through master")
# self.app.manager.update_etc_host()
elif msg_type == "WORKER_H_CERT":
log.debug("Got WORKER_H_CERT message")
self.is_alive = True # This is for the case that an existing worker is added to a new master.
self.app.manager.save_host_cert(msg.split(" | ")[1])
log.debug("Worker '%s' host certificate received and appended "
"to /root/.ssh/known_hosts" % self.id)
for job_manager_svc in self.app.manager.service_registry.active(
service_role=ServiceRole.JOB_MANAGER):
job_manager_svc.add_node(self)
# Instruct the worker to start appropriate job manager daemon
if ServiceRole.SLURMCTLD in job_manager_svc.svc_roles:
self.send_start_slurmd()
else:
self.send_start_sge()
else:
log.warning('Could not get a handle on job manager service to '
'add node {0}'.format(self.get_desc()))
# If there are any bucket-based FSs, tell the worker to add those
fss = self.app.manager.get_services(svc_type=ServiceType.FILE_SYSTEM)
for fs in fss:
if len(fs.buckets) > 0:
for b in fs.buckets:
self.send_add_s3fs(b.bucket_name, fs.svc_roles)
log.info("Waiting on worker instance %s to configure itself." % self.get_desc())
elif msg_type == "NODE_READY":
self.worker_status = "Ready"
log.info("Instance %s ready" % self.get_desc())
# Make sure the instace is tagged (this is also necessary to do
# here for OpenStack because it does not allow tags to be added
# until an instance is 'running')
self.app.cloud_interface.add_tag(self.inst, 'clusterName', self.app.config['cluster_name'])
self.app.cloud_interface.add_tag(self.inst, 'role', 'worker')
self.app.cloud_interface.add_tag(self.inst, 'alias', self.alias)
self.app.cloud_interface.add_tag(
self.inst, 'Name', "Worker: {0}".format(self.app.config['cluster_name']))
self.app.manager.update_condor_host(self.public_ip)
elif msg_type == "NODE_STATUS":
# log.debug("Node {0} status message: {1}".format(self.get_desc(), msg))
if not self.worker_status == 'Stopping':
msplit = msg.split(' | ')
self.nfs_data = msplit[1]
self.nfs_tools = msplit[2] # Workers currently do not update this field
self.nfs_indices = msplit[3]
self.nfs_sge = msplit[4]
self.get_cert = msplit[5]
self.sge_started = msplit[6]
self.load = msplit[7]
self.worker_status = msplit[8]
self.nfs_tfs = msplit[9]
self.slurmd_running = msplit[10]
else:
log.debug("Worker {0} in state Stopping so not updating status"
.format(self.get_desc()))
elif msg_type == 'NODE_SHUTTING_DOWN':
msplit = msg.split(' | ')
self.worker_status = msplit[1]
else: # Catch-all condition
log.debug("Unknown Message: %s" % msg)
else:
log.error("Epic Failure, squeue not available?")
|
h5storageserver.py | ###############################################################################
# Storage array data streamer
# Urs Utzinger 2020
###############################################################################
###############################################################################
# Imports
###############################################################################
# Multi Threading
from threading import Thread
from queue import Queue
# System
import logging, time
# HDF5
import h5py
###############################################################################
# HDF5 Storage Server
###############################################################################
class h5Server(Thread):
"""
HDF5 file array saver
"""
# Initialize the storage Thread
def __init__(self, filename = None):
# Threading Queue, Locks, Events
self.queue = Queue(maxsize=32)
self.log = Queue(maxsize=32)
self.stopped = True
# Initialize HDF5
if filename is not None:
try:
self.hdf5 = h5py.File(filename,'w')
except:
self.log.put_nowait((logging.ERROR, "HDF5:Could not create HDF5!"))
return False
else:
self.log.put_nowait((logging.ERROR, "HDF5:Need to provide filename to store data!"))
return False
# Init Frame and Thread
self.measured_cps = 0.0
Thread.__init__(self)
# Thread routines #################################################
# Start Stop and Update Thread
###################################################################
def stop(self):
"""stop the thread"""
self.stopped = True
def start(self):
"""set the thread start conditions"""
self.stopped = False
T = Thread(target=self.update)
T.daemon = True # run in background
T.start()
# After Stating of the Thread, this runs continously
def update(self):
"""run the thread"""
last_time = time.time()
num_cubes = 0
while not self.stopped:
(cube_time, data_cube) = self.queue.get(block=True, timeout=None) # This waits until item is available and removed from queue
self.dset = self.hdf5.create_dataset(str(cube_time), data=data_cube) # 11ms
num_cubes += 1
# Storage througput calculation
current_time = time.time()
if (current_time - last_time) >= 5.0: # framearray rate every 5 secs
self.measured_cps = num_cubes/5.0
self.log.put_nowait((logging.INFO, "HDF5:CPS:{}".format(self.measured_cps)))
last_time = current_time
num_cubes = 0
self.hdf5.close()
###############################################################################
# Testing
###############################################################################
if __name__ == '__main__':
import numpy as np
from datetime import datetime
import cv2
display_interval = 0.01
height =540
width = 720
depth = 14
cube = np.random.randint(0, 255, (depth, height, width), 'uint8')
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("HDF5")
# Setting up Storage
now = datetime.now()
filename = now.strftime("%Y%m%d%H%M%S") + ".hdf5"
hdf5 = h5Server("C:\\temp\\" + filename)
logger.log(logging.DEBUG, "Starting HDF5 Server")
hdf5.start()
window_handle = cv2.namedWindow("HDF5", cv2.WINDOW_AUTOSIZE)
font = cv2.FONT_HERSHEY_SIMPLEX
textLocation = (10,20)
fontScale = 1
fontColor = (255,255,255)
lineType = 2
last_display = time.time()
num_frame = 0
while(cv2.getWindowProperty("HDF5", 0) >= 0):
current_time = time.time()
if (current_time - last_display) > display_interval:
frame = cube[0,:,:].copy()
cv2.putText(frame,"Frame:{}".format(num_frame), textLocation, font, fontScale, fontColor, lineType)
cv2.imshow('HDF5', frame)
num_frame += 1
last_display = current_time
key = cv2.waitKey(1)
if (key == 27) or (key & 0xFF == ord('q')): break
try: hdf5.queue.put_nowait((current_time, cube))
except: pass
while not hdf5.log.empty():
(level, msg)=hdf5.log.get_nowait()
logger.log(level, "HDF5:{}".format(msg))
hdf5.stop()
cv2.destroyAllWindows()
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
import requests
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.models import ContainerServiceServicePrincipalProfile
from azure.mgmt.containerservice.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.v2018_03_31.models import ManagedCluster
from azure.mgmt.containerservice.v2018_03_31.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2018_03_31.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2018_03_31.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.models import NetworkProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group, disable_browser, ssh_key_file=ssh_key_file)
elif str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
else:
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
return
def acs_install_cli(cmd, client, resource_group, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
elif orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
else:
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None):
"""Install kubectl, a command-line interface for Kubernetes clusters."""
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group):
from subprocess import PIPE, Popen
instance = client.get(resource_group_name, name)
helm_not_installed = 'Helm not detected, please verify if it is installed.'
url_chart = chart_url
if image_tag is None:
image_tag = 'latest'
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# If SPN is specified, the secret should also be specified
if service_principal is not None and client_secret is None:
raise CLIError('--client-secret must be specified when --service-principal is specified')
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
# Validate the location upon the ACI avaiable regions
_validate_aci_location(norm_location)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = _get_subscription_id(cmd.cli_ctx)
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Linux', instance.enable_rbac, instance.fqdn)
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Windows', instance.enable_rbac, instance.fqdn)
def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, os_type, use_rbac, masterFqdn):
rbac_install = "true" if use_rbac else "false"
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
k8s_master = 'https://{}'.format(masterFqdn)
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format(
node_name, node_taint, os_type, image_tag, rbac_install)
if service_principal:
values += ",env.azureClientId=" + service_principal
if client_secret:
values += ",env.azureClientKey=" + client_secret
if subscription_id:
values += ",env.azureSubscriptionId=" + subscription_id
if tenant_id:
values += ",env.azureTenantId=" + tenant_id
if aci_resource_group:
values += ",env.aciResourceGroup=" + aci_resource_group
if norm_location:
values += ",env.aciRegion=" + norm_location
# Currently, we need to set the master FQDN.
# This is temporary and we should remove it when possible
values += ",env.masterUri=" + k8s_master
if helm_cmd == "install":
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values])
elif helm_cmd == "upgrade":
subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
if os_type.lower() in ['linux', 'both']:
helm_release_name = connector_name.lower() + '-linux-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
if os_type.lower() in ['windows', 'both']:
helm_release_name = connector_name.lower() + '-windows-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force', '--delete-local-data'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
try:
subprocess.check_output(
['kubectl', 'delete', 'node', node_name],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not delete the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _get_subscription_id(cli_ctx):
_, sub_id, _ = Profile(cli_ctx=cli_ctx).get_login_credentials(subscription_id=None)
return sub_id
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = _get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentProperties
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceManagementClient, subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if addition[key]:
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
raise CLIError('A different object named {} already exists in {}'.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
else:
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope)
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
elif len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
_, browse_path = tempfile.mkstemp()
# TODO: need to add an --admin option?
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--output", "name",
"--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{}'.format(term_id),
json={"url": result['url']})
logger.warning('To view the console, please open %s in a new tab', result['url'])
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(proxy_url)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", "--address", listen_address, dashboard_pod,
"{0}:9090".format(listen_port)], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", dashboard_pod, "{0}:9090".format(listen_port)])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
# TODO: Better error handling here.
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
# pylint: disable=too-many-statements
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_count=3,
nodepool_name="nodepool1",
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
skip_subnet_role_assignment=False,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = _get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
max_pods=int(max_pods) if max_pods else None
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ContainerServiceServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
network_profile = None
if any([pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=False if disable_rbac else True,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name, parameters=mc)
# add cluster spn with Monitoring Metrics Publisher role assignment to the cluster resource
# mdm metrics supported only in azure public cloud so add the role assignment only in this cloud
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud' and monitoring:
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_profile.client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for monitoring addon. '
'Are you an Owner on this subscription?')
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = _get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = _get_subscription_id(cmd.cli_ctx)
service_principal_client_id = instance.service_principal_profile.client_id
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
else:
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, no_wait=False, **kwargs): # pylint: disable=unused-argument
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
instance.kubernetes_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces-preview'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces_preview.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None, prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an interactive selection experience.
:type space_name: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"eastus": "EUS",
"westeurope": "WEU",
"southeastasia": "SEA",
"australiasoutheast": "ASE",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"japaneast": "EJP",
"uksouth": "SUK",
"canadacentral": "CCA",
"centralindia": "CIN",
"eastus2euap": "EAP"
}
AzureCloudRegionToOmsRegionMap = {
"australiaeast": "australiasoutheast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "eastus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "eastus",
"eastasia": "southeastasia",
"eastus": "eastus",
"eastus2": "eastus",
"japaneast": "japaneast",
"japanwest": "japaneast",
"northcentralus": "eastus",
"northeurope": "westeurope",
"southcentralus": "eastus",
"southeastasia": "southeastasia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westus": "eastus",
"westus2": "eastus",
"centralindia": "centralindia",
"southindia": "centralindia",
"westindia": "centralindia",
"koreacentral": "southeastasia",
"koreasouth": "southeastasia",
"francecentral": "westeurope",
"francesouth": "westeurope"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
default_region_name = "eastus"
default_region_code = "EUS"
workspace_region = default_region_name
workspace_region_code = default_region_code
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap[
rg_location] if AzureCloudRegionToOmsRegionMap[rg_location] else default_region_name
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap[
workspace_region] if AzureCloudLocationToOmsRegionCodeMap[workspace_region] else default_region_code
elif cloud_name.lower() == 'azurechinacloud':
default_region_name = "chinaeast2"
default_region_code = "EAST2"
workspace_region = AzureChinaRegionToOmsRegionMap[
rg_location] if AzureChinaRegionToOmsRegionMap[rg_location] else default_region_name
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap[
workspace_region] if AzureChinaLocationToOmsRegionCodeMap[workspace_region] else default_region_code
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8')
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, update=False):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not aad_client_app_id:
if not aad_client_app_secret and update:
aad_client_app_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8')
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(reply_url)))
if update:
if list_aad_filtered:
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=identifier,
identifier_uris=[reply_url],
reply_urls=[reply_url],
homepage=reply_url,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = list_aad_filtered[0].app_id
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result = create_application(client=rbac_client.applications,
display_name=identifier,
identifier_uris=[reply_url],
reply_urls=[reply_url],
homepage=reply_url,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
else:
aad_client_app_id = list_aad_filtered[0].app_id
aad_client_app_secret = 'whatever'
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider')
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8')
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal)
return load_acs_service_principal(subscription_id)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'public_hostname', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
fqdn,
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False):
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(2),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
update_aad_secret = False
try:
client.get(resource_group_name, name)
except CloudError:
update_aad_secret = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=fqdn,
name=name, update=update_aad_secret)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
fqdn=fqdn,
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile])
try:
# long_running_operation_timeout=300
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
except CloudError as ex:
raise ex
def openshift_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
instance.agent_pool_profiles[0].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
|
runtest-memorysize.py | from threading import Thread
import subprocess
mapnumber=3
antComAlgorithm=1
numAnts=25
mapMaxX =1600
mapMaxY =320
TEST="memorysize"
def run_one_test(trial,mapnumber,antComAlgorithm,numAnts,mapMaxX,mapMaxY,TEST,antPositionMemorySize):
print("map {} algrithm {} test {}: running..".format(mapnumber,antComAlgorithm,trial))
taskFinishedTime=subprocess.call(['love','./', str(mapnumber),
str(antComAlgorithm),
str(numAnts),
str(mapMaxX),
str(mapMaxY),
TEST ,
str(trial),
str(antPositionMemorySize)
],shell=True)
# with open("map{}_algorithm{}_ants{}.txt".format(mapnumber,antComAlgorithm,numAnts),'a') as f:
# f.write('{}\n'.format(taskFinishedTime))
return taskFinishedTime
def run_thread(mapMaxX,mapMaxY,numAnts,antPositionMemorySize):
for trial in range(1,11):
run_one_test(trial,mapnumber,antComAlgorithm,numAnts,mapMaxX,mapMaxY,TEST,antPositionMemorySize)
mapMaxX,mapMaxY=(640,320)
numAnts =100
for antPositionMemorySize in [10,15,20]:
a_thread=Thread(target=run_thread,args=[mapMaxX,mapMaxY,numAnts,antPositionMemorySize])
a_thread.start()
|
cleanup3db.py | import os
import sys
import time
import argparse
from glob import glob
import os.path as path
from datetime import datetime
from multiprocessing import Process, Queue
from sqlalchemy import create_engine
from configparser import ConfigParser
def config(filename='database.ini', section='cah_production'):
# create a parser
parser = ConfigParser()
# read config file
parser.read(filename)
# get section, default to postgresql
db = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
db[param[0]] = param[1]
else:
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
return db
def confirm_delete(engine, uuid, jobset="en"):
jobtable = "jobs"
if jobset=="intl":
jobtable = "jobs_intl"
select_stmt1 = f"select count(*) from {jobtable} where status > 1 and jobid = '{uuid}'"
conn = engine.raw_connection()
cur = conn.cursor()
cur.execute(select_stmt1)
jobcount = int(cur.fetchone()[0])
conn.commit()
cur.close()
conn.close()
return jobcount
def worker(engine, q: Queue, jobset = "en"):
jobspath = '/mnt/md0/gpujobs/'
if jobset == "intl":
jobspath = '/mnt/md0/gpujobsml/'
while q.qsize()>0:
try:
uuid = q.get_nowait()
if confirm_delete(engine, uuid, jobset)==1:
file = f"{jobspath}{uuid}.tar.gz"
if os.path.isfile(file) and os.path.getmtime(file) < time.time() - 60*60: # this makes the code more robust
os.remove(file)
print(f"deleted {file}")
except Exception as e:
print (f"worker raised error {e}")
pass
parser = argparse.ArgumentParser(prog=sys.argv[0], usage='%(prog)s -s/--set')
parser.add_argument("-s","--set",action='append',help="Choose current set (en, nolang, intl)",required=False)
args = parser.parse_args()
params = config()
engine = create_engine(f'postgresql://{params["user"]}:{params["password"]}@{params["host"]}:5432/{params["database"]}',pool_size=25, max_overflow=50)
jobset = "en"
if args.set is not None:
jobset = args.set[0]
jobspath = '/mnt/md0/gpujobs/*.tar.gz'
if jobset == "intl":
jobspath = '/mnt/md0/gpujobsml/*.tar.gz'
now = datetime.now().strftime("%Y/%m/%d_%H:%M")
list_of_files = glob(jobspath)
frm = len(list_of_files)
start = time.time()
q = Queue()
procs = []
for i in range(10):
procs.append(Process(target=worker, args=[engine, q, jobset]))
for file in list_of_files:
if time.time() - path.getmtime(file) < 300:
continue
uuid = file.split("/")[4].split(".")[0]
q.put(uuid)
time.sleep(20)
for proc in procs:
proc.start()
for proc in procs:
proc.join()
list_of_files = glob(jobspath)
end = len(list_of_files)
with open("jobs.txt","wt") as f:
for file in list_of_files:
f.write(file + "\n")
print(f"[{now}] from {frm} to {end} \"task executed in\" {round(time.time()-start,2)} sec")
|
main.py | import random
import requests
from threading import Thread
print("Loading nHentai Searcher...")
url = "https://nhentai.net/"
print("nHentai has Status", requests.head(url).status_code)
codes = []
def start_searching():
while True:
for i in range(9):
s = ""
for k in range(6):
j = random.randrange(10)
s += str(j)
codes.append(s)
f = open('working.txt', 'w+')
for code in codes:
status = requests.head(url + "g/" + code).status_code
if status == 301:
print(str(code))
f.write(str(code) + ", ")
f.close()
codes.clear()
thread = Thread(target = start_searching)
thread.start()
thread.join()
|
cli_command_helpers.py | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import sys
from glob import glob
from pathlib import Path
import click
import colorclass
import luigi
import pkg_resources
import json
import terminaltables
from jinja2 import Template
from luigi import LuigiStatusCode
from servicecatalog_puppet import asset_helpers, manifest_utils, aws, luigi_tasks_and_targets
from servicecatalog_puppet import constants
import logging
import os
from threading import Thread
import yaml
from betterboto import client as betterboto_client
from jinja2 import Environment, FileSystemLoader
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def get_regions(default_region=None):
logger.info("getting regions, default_region: {}".format(default_region))
with betterboto_client.ClientContextManager(
'ssm',
region_name=default_region if default_region else get_home_region()
) as ssm:
response = ssm.get_parameter(Name=constants.CONFIG_PARAM_NAME)
config = yaml.safe_load(response.get('Parameter').get('Value'))
return config.get('regions')
def get_home_region():
with betterboto_client.ClientContextManager('ssm') as ssm:
response = ssm.get_parameter(Name=constants.HOME_REGION_PARAM_NAME)
return response.get('Parameter').get('Value')
def get_org_iam_role_arn():
with betterboto_client.ClientContextManager('ssm', region_name=get_home_region()) as ssm:
try:
response = ssm.get_parameter(Name=constants.CONFIG_PARAM_NAME_ORG_IAM_ROLE_ARN)
return yaml.safe_load(response.get('Parameter').get('Value'))
except ssm.exceptions.ParameterNotFound as e:
logger.info("No parameter set for: {}".format(constants.CONFIG_PARAM_NAME_ORG_IAM_ROLE_ARN))
return None
def generate_bucket_policies_for_shares(deployment_map, puppet_account_id):
shares = {
'accounts': [],
'organizations': [],
}
for account_id, deployment in deployment_map.items():
if account_id == puppet_account_id:
continue
if deployment.get('expanded_from') is None:
if account_id not in shares['accounts']:
shares['accounts'].append(account_id)
else:
if deployment.get('organization') not in shares['organizations']:
shares['organizations'].append(deployment.get('organization'))
return shares
def write_share_template(portfolio_use_by_account, region, host_account_id, sharing_policies):
output = os.path.sep.join([constants.TEMPLATES, 'shares', region])
if not os.path.exists(output):
os.makedirs(output)
with open(os.sep.join([output, "shares.template.yaml"]), 'w') as f:
f.write(
env.get_template('shares.template.yaml.j2').render(
portfolio_use_by_account=portfolio_use_by_account,
host_account_id=host_account_id,
HOME_REGION=get_home_region(),
sharing_policies=sharing_policies,
)
)
def create_share_template(deployment_map, import_map, puppet_account_id):
logger.info("deployment_map: {}".format(deployment_map))
ALL_REGIONS = get_regions()
for region in ALL_REGIONS:
logger.info("starting to build shares for region: {}".format(region))
with betterboto_client.ClientContextManager('servicecatalog', region_name=region) as servicecatalog:
portfolio_ids = {}
response = servicecatalog.list_portfolios_single_page()
for portfolio_detail in response.get('PortfolioDetails'):
portfolio_ids[portfolio_detail.get('DisplayName')] = portfolio_detail.get('Id')
logger.info("Portfolios in use in region: {}".format(portfolio_ids))
portfolio_use_by_account = {}
for account_id, launch_details in deployment_map.items():
if portfolio_use_by_account.get(account_id) is None:
portfolio_use_by_account[account_id] = []
for launch_id, launch in launch_details.get('launches').items():
p = portfolio_ids[launch.get('portfolio')]
if p not in portfolio_use_by_account[account_id]:
portfolio_use_by_account[account_id].append(p)
for account_id, import_details in import_map.items():
if portfolio_use_by_account.get(account_id) is None:
portfolio_use_by_account[account_id] = []
for spoke_local_portfolio_id, spoke_local_portfolio in import_details.get('spoke-local-portfolios').items():
p = portfolio_ids[spoke_local_portfolio.get('portfolio')]
if p not in portfolio_use_by_account[account_id]:
portfolio_use_by_account[account_id].append(p)
host_account_id = response.get('PortfolioDetails')[0].get('ARN').split(":")[4]
sharing_policies = generate_bucket_policies_for_shares(deployment_map, puppet_account_id)
write_share_template(portfolio_use_by_account, region, host_account_id, sharing_policies)
template_dir = asset_helpers.resolve_from_site_packages('templates')
env = Environment(
loader=FileSystemLoader(template_dir),
extensions=['jinja2.ext.do'],
)
def get_puppet_account_id():
with betterboto_client.ClientContextManager('sts') as sts:
return sts.get_caller_identity().get('Account')
def set_regions_for_deployment_map(deployment_map, section):
logger.info('Starting to write the templates')
ALL_REGIONS = get_regions()
for account_id, account_details in deployment_map.items():
for launch_name, launch_details in account_details.get(section).items():
logger.info('Looking at account: {} and launch: {}'.format(account_id, launch_name))
if launch_details.get('match') == 'account_match':
logger.info('Setting regions for account matched')
for a in launch_details.get('deploy_to').get('accounts'):
if a.get('account_id') == account_id:
regions = a.get('regions')
if regions == "enabled":
regions = account_details.get('regions_enabled')
elif regions == "default_region" or regions is None:
regions = account_details.get('default_region')
elif regions == "all":
regions = ALL_REGIONS
elif isinstance(regions, list):
for region in regions:
if region not in ALL_REGIONS:
raise Exception("Unknown region: {}".format(region))
elif isinstance(regions, str) and regions in ALL_REGIONS:
pass
else:
raise Exception("Unknown regions: {}".format(regions))
if isinstance(regions, str):
regions = [regions]
launch_details['regions'] = regions
elif launch_details.get('match') == 'tag_match':
logger.info('Setting regions for tag matched')
for t in launch_details.get('deploy_to').get('tags'):
if t.get('tag') in account_details.get('tags'):
regions = t.get('regions')
if regions == "enabled":
regions = account_details.get('regions_enabled')
elif regions == "default_region" or regions is None:
regions = account_details.get('default_region')
elif regions == "all":
regions = ALL_REGIONS
elif isinstance(regions, list):
for region in regions:
if region not in ALL_REGIONS:
raise Exception("Unknown region: {}".format(region))
elif isinstance(regions, str) and regions in ALL_REGIONS:
pass
else:
raise Exception("Unknown regions: {}".format(regions))
if isinstance(regions, str):
regions = [regions]
launch_details['regions'] = regions
assert launch_details.get('regions') is not None, "Launch {} has no regions set".format(launch_name)
launch_details['regional_details'] = {}
if section == constants.LAUNCHES:
# TODO move this to provision product task so this if statement is no longer needed
for region in launch_details.get('regions'):
logger.info('Starting region: {}'.format(region))
product_id, version_id = aws.get_provisioning_artifact_id_for(
launch_details.get('portfolio'),
launch_details.get('product'),
launch_details.get('version'),
account_id,
region
)
launch_details['regional_details'][region] = {
'product_id': product_id,
'version_id': version_id,
}
return deployment_map
def get_parameters_for_launch(required_parameters, deployment_map, manifest, launch_details, account_id, status):
regular_parameters = []
ssm_parameters = []
for required_parameter_name in required_parameters.keys():
account_ssm_param = deployment_map.get(account_id).get('parameters', {}).get(required_parameter_name, {}).get(
'ssm')
account_regular_param = deployment_map.get(account_id).get('parameters', {}).get(required_parameter_name,
{}).get('default')
launch_params = launch_details.get('parameters', {})
launch_ssm_param = launch_params.get(required_parameter_name, {}).get('ssm')
launch_regular_param = launch_params.get(required_parameter_name, {}).get('default')
manifest_params = manifest.get('parameters', {})
manifest_ssm_param = manifest_params.get(required_parameter_name, {}).get('ssm')
manifest_regular_param = manifest_params.get(required_parameter_name, {}).get('default')
if status == constants.PROVISIONED and account_ssm_param:
ssm_parameters.append(
get_ssm_config_for_parameter(account_ssm_param, required_parameter_name)
)
elif status == constants.PROVISIONED and account_regular_param:
regular_parameters.append({
'name': required_parameter_name,
'value': str(account_regular_param),
})
elif launch_ssm_param:
ssm_parameters.append(
get_ssm_config_for_parameter(launch_ssm_param, required_parameter_name)
)
elif launch_regular_param:
regular_parameters.append({
'name': required_parameter_name,
'value': launch_regular_param,
})
elif status == constants.PROVISIONED and manifest_ssm_param:
ssm_parameters.append(
get_ssm_config_for_parameter(manifest_ssm_param, required_parameter_name)
)
elif status == constants.PROVISIONED and manifest_regular_param:
regular_parameters.append({
'name': required_parameter_name,
'value': manifest_regular_param,
})
return regular_parameters, ssm_parameters
def get_ssm_config_for_parameter(account_ssm_param, required_parameter_name):
if account_ssm_param.get('region') is not None:
return {
'name': account_ssm_param.get('name'),
'region': account_ssm_param.get('region'),
'parameter_name': required_parameter_name,
}
else:
return {
'name': account_ssm_param.get('name'),
'parameter_name': required_parameter_name,
}
def wire_dependencies(all_tasks):
tasks_to_run = []
for task_uid, task in all_tasks.items():
for dependency in task.get('depends_on', []):
for task_uid_2, task_2 in all_tasks.items():
if task_2.get('launch_name') == dependency:
task.get('dependencies').append(task_2)
del task['depends_on']
tasks_to_run.append(task)
return tasks_to_run
def get_puppet_version():
return pkg_resources.require("aws-service-catalog-puppet")[0].version
def _do_bootstrap_org_master(puppet_account_id, cloudformation, puppet_version):
logger.info('Starting bootstrap of org master')
stack_name = "{}-org-master".format(constants.BOOTSTRAP_STACK_NAME)
template = asset_helpers.read_from_site_packages('{}.template.yaml'.format(stack_name))
template = Template(template).render(VERSION=puppet_version)
args = {
'StackName': stack_name,
'TemplateBody': template,
'Capabilities': ['CAPABILITY_NAMED_IAM'],
'Parameters': [
{
'ParameterKey': 'PuppetAccountId',
'ParameterValue': str(puppet_account_id),
}, {
'ParameterKey': 'Version',
'ParameterValue': puppet_version,
'UsePreviousValue': False,
},
],
'Tags':[
{
"Key": "ServiceCatalogPuppet:Actor",
"Value": "Framework",
}
]
}
cloudformation.create_or_update(**args)
response = cloudformation.describe_stacks(StackName=stack_name)
if len(response.get('Stacks')) != 1:
raise Exception("Expected there to be only one {} stack".format(stack_name))
stack = response.get('Stacks')[0]
for output in stack.get('Outputs'):
if output.get('OutputKey') == constants.PUPPET_ORG_ROLE_FOR_EXPANDS_ARN:
logger.info('Finished bootstrap of org-master')
return output.get("OutputValue")
raise Exception(
"Could not find output: {} in stack: {}".format(constants.PUPPET_ORG_ROLE_FOR_EXPANDS_ARN, stack_name))
def _do_bootstrap_spoke(puppet_account_id, cloudformation, puppet_version):
logger.info('Starting bootstrap of spoke')
template = asset_helpers.read_from_site_packages('{}-spoke.template.yaml'.format(constants.BOOTSTRAP_STACK_NAME))
template = Template(template).render(VERSION=puppet_version)
args = {
'StackName': "{}-spoke".format(constants.BOOTSTRAP_STACK_NAME),
'TemplateBody': template,
'Capabilities': ['CAPABILITY_NAMED_IAM'],
'Parameters': [
{
'ParameterKey': 'PuppetAccountId',
'ParameterValue': str(puppet_account_id),
}, {
'ParameterKey': 'Version',
'ParameterValue': puppet_version,
'UsePreviousValue': False,
},
],
'Tags':[
{
"Key": "ServiceCatalogPuppet:Actor",
"Value": "Framework",
}
]
}
cloudformation.create_or_update(**args)
logger.info('Finished bootstrap of spoke')
def _do_bootstrap(puppet_version):
click.echo('Starting bootstrap')
ALL_REGIONS = get_regions(os.environ.get("AWS_DEFAULT_REGION"))
with betterboto_client.MultiRegionClientContextManager('cloudformation', ALL_REGIONS) as clients:
click.echo('Creating {}-regional'.format(constants.BOOTSTRAP_STACK_NAME))
threads = []
template = asset_helpers.read_from_site_packages(
'{}.template.yaml'.format('{}-regional'.format(constants.BOOTSTRAP_STACK_NAME)))
template = Template(template).render(VERSION=puppet_version)
args = {
'StackName': '{}-regional'.format(constants.BOOTSTRAP_STACK_NAME),
'TemplateBody': template,
'Capabilities': ['CAPABILITY_IAM'],
'Parameters': [
{
'ParameterKey': 'Version',
'ParameterValue': puppet_version,
'UsePreviousValue': False,
},
{
'ParameterKey': 'DefaultRegionValue',
'ParameterValue': os.environ.get('AWS_DEFAULT_REGION'),
'UsePreviousValue': False,
},
],
'Tags': [
{
"Key": "ServiceCatalogPuppet:Actor",
"Value": "Framework",
}
]
}
for client_region, client in clients.items():
process = Thread(name=client_region, target=client.create_or_update, kwargs=args)
process.start()
threads.append(process)
for process in threads:
process.join()
click.echo('Finished creating {}-regional'.format(constants.BOOTSTRAP_STACK_NAME))
with betterboto_client.ClientContextManager('cloudformation') as cloudformation:
click.echo('Creating {}'.format(constants.BOOTSTRAP_STACK_NAME))
template = asset_helpers.read_from_site_packages('{}.template.yaml'.format(constants.BOOTSTRAP_STACK_NAME))
template = Template(template).render(VERSION=puppet_version, ALL_REGIONS=ALL_REGIONS)
args = {
'StackName': constants.BOOTSTRAP_STACK_NAME,
'TemplateBody': template,
'Capabilities': ['CAPABILITY_NAMED_IAM'],
'Parameters': [
{
'ParameterKey': 'Version',
'ParameterValue': puppet_version,
'UsePreviousValue': False,
},
{
'ParameterKey': 'OrgIamRoleArn',
'ParameterValue': str(get_org_iam_role_arn()),
'UsePreviousValue': False,
},
],
}
cloudformation.create_or_update(**args)
click.echo('Finished creating {}.'.format(constants.BOOTSTRAP_STACK_NAME))
with betterboto_client.ClientContextManager('codecommit') as codecommit:
response = codecommit.get_repository(repositoryName=constants.SERVICE_CATALOG_PUPPET_REPO_NAME)
clone_url = response.get('repositoryMetadata').get('cloneUrlHttp')
clone_command = "git clone --config 'credential.helper=!aws codecommit credential-helper $@' " \
"--config 'credential.UseHttpPath=true' {}".format(clone_url)
click.echo(
'You need to clone your newly created repo now and will then need to seed it: \n{}'.format(
clone_command
)
)
def deploy_spoke_local_portfolios(manifest, launch_tasks):
section = constants.SPOKE_LOCAL_PORTFOLIOS
deployment_map = manifest_utils.build_deployment_map(manifest, section)
deployment_map = set_regions_for_deployment_map(deployment_map, section)
tasks_to_run = []
puppet_account_id = get_puppet_account_id()
for account_id, deployments_for_account in deployment_map.items():
for launch_name, launch_details in deployments_for_account.get(section).items():
for region_name in launch_details.get('regions'):
depends_on = launch_details.get('depends_on')
dependencies = []
for dependency in depends_on:
for task_uid, task in launch_tasks.items():
if task.get('launch_name') == dependency:
dependencies.append(task)
hub_portfolio = aws.get_portfolio_for(
launch_details.get('portfolio'), puppet_account_id, region_name
)
create_spoke_local_portfolio_task_params = {
'account_id': account_id,
'region': region_name,
'portfolio': launch_details.get('portfolio'),
'provider_name': hub_portfolio.get('ProviderName'),
'description': hub_portfolio.get('Description'),
}
create_spoke_local_portfolio_task = luigi_tasks_and_targets.CreateSpokeLocalPortfolioTask(
**create_spoke_local_portfolio_task_params
)
tasks_to_run.append(create_spoke_local_portfolio_task)
create_spoke_local_portfolio_task_as_dependency_params = {
'account_id': account_id,
'region': region_name,
'portfolio': launch_details.get('portfolio'),
}
create_associations_task_params = {
'associations': launch_details.get('associations'),
'puppet_account_id': puppet_account_id,
}
create_associations_for_portfolio_task = luigi_tasks_and_targets.CreateAssociationsForPortfolioTask(
**create_spoke_local_portfolio_task_as_dependency_params,
**create_associations_task_params,
dependencies=dependencies,
)
tasks_to_run.append(create_associations_for_portfolio_task)
import_into_spoke_local_portfolio_task_params = {
'hub_portfolio_id': hub_portfolio.get('Id')
}
import_into_spoke_local_portfolio_task = luigi_tasks_and_targets.ImportIntoSpokeLocalPortfolioTask(
**create_spoke_local_portfolio_task_as_dependency_params,
**import_into_spoke_local_portfolio_task_params,
)
tasks_to_run.append(import_into_spoke_local_portfolio_task)
launch_constraints = launch_details.get('constraints', {}).get('launch', [])
if len(launch_constraints) > 0:
create_launch_role_constraints_for_portfolio_task_params = {
'launch_constraints': launch_constraints,
'puppet_account_id': puppet_account_id,
}
create_launch_role_constraints_for_portfolio = luigi_tasks_and_targets.CreateLaunchRoleConstraintsForPortfolio(
**create_spoke_local_portfolio_task_as_dependency_params,
**import_into_spoke_local_portfolio_task_params,
**create_launch_role_constraints_for_portfolio_task_params,
dependencies=dependencies,
)
tasks_to_run.append(create_launch_role_constraints_for_portfolio)
return tasks_to_run
def deploy_launches(manifest):
section = constants.LAUNCHES
deployment_map = manifest_utils.build_deployment_map(manifest, section)
deployment_map = set_regions_for_deployment_map(deployment_map, section)
puppet_account_id = get_puppet_account_id()
all_tasks = deploy_launches_task_builder(deployment_map, manifest, puppet_account_id, section)
logger.info(f"Deployment plan: {json.dumps(all_tasks)}")
return all_tasks
def deploy_launches_task_builder(deployment_map, manifest, puppet_account_id, section):
all_tasks = {}
for account_id, deployments_for_account in deployment_map.items():
for launch_name, launch_details in deployments_for_account.get(section).items():
for region_name, regional_details in launch_details.get('regional_details').items():
these_all_tasks = deploy_launches_task_builder_for_account_launch_region(
account_id,
deployment_map,
launch_details,
launch_name,
manifest,
puppet_account_id,
region_name,
regional_details,
)
all_tasks.update(these_all_tasks)
return all_tasks
def deploy_launches_task_builder_for_account_launch_region(
account_id, deployment_map, launch_details, launch_name, manifest,
puppet_account_id, region_name, regional_details
):
all_tasks = {}
product_id = regional_details.get('product_id')
required_parameters = {}
role = f"arn:aws:iam::{account_id}:role/servicecatalog-puppet/PuppetRole"
with betterboto_client.CrossAccountClientContextManager(
'servicecatalog', role, f'sc-{account_id}-{region_name}', region_name=region_name
) as service_catalog:
response = service_catalog.describe_provisioning_parameters(
ProductId=product_id,
ProvisioningArtifactId=regional_details.get('version_id'),
PathId=aws.get_path_for_product(service_catalog, product_id),
)
for provisioning_artifact_parameters in response.get('ProvisioningArtifactParameters', []):
parameter_key = provisioning_artifact_parameters.get('ParameterKey')
required_parameters[parameter_key] = True
regular_parameters, ssm_parameters = get_parameters_for_launch(
required_parameters,
deployment_map,
manifest,
launch_details,
account_id,
launch_details.get('status', constants.PROVISIONED),
)
logger.info(f"Found a new launch: {launch_name}")
task = {
'launch_name': launch_name,
'portfolio': launch_details.get('portfolio'),
'product': launch_details.get('product'),
'version': launch_details.get('version'),
'product_id': regional_details.get('product_id'),
'version_id': regional_details.get('version_id'),
'account_id': account_id,
'region': region_name,
'puppet_account_id': puppet_account_id,
'parameters': regular_parameters,
'ssm_param_inputs': ssm_parameters,
'depends_on': launch_details.get('depends_on', []),
"status": launch_details.get('status', constants.PROVISIONED),
"worker_timeout": launch_details.get('timeoutInSeconds', constants.DEFAULT_TIMEOUT),
"ssm_param_outputs": launch_details.get('outputs', {}).get('ssm', []),
'dependencies': [],
}
if manifest.get('configuration'):
if manifest.get('configuration').get('retry_count'):
task['retry_count'] = manifest.get('configuration').get('retry_count')
if launch_details.get('configuration'):
if launch_details.get('configuration').get('retry_count'):
task['retry_count'] = launch_details.get('configuration').get('retry_count')
all_tasks[f"{task.get('account_id')}-{task.get('region')}-{task.get('launch_name')}"] = task
return all_tasks
def run_tasks(tasks_to_run):
for type in ["failure", "success", "timeout", "process_failure", "processing_time", "broken_task", ]:
os.makedirs(Path(constants.RESULTS_DIRECTORY) / type)
run_result = luigi.build(
tasks_to_run,
local_scheduler=True,
detailed_summary=True,
workers=10,
log_level='INFO',
)
table_data = [
['Action','Launch', 'Account', 'Region', 'Portfolio', 'Product', 'Version', 'Duration'],
]
table = terminaltables.AsciiTable(table_data)
for filename in glob('results/processing_time/*.json'):
result = json.loads(open(filename, 'r').read())
params = result.get('params_for_results')
table_data.append([
result.get('task_type'),
params.get('launch_name'),
params.get('account_id'),
params.get('region'),
params.get('portfolio'),
params.get('product'),
params.get('version'),
result.get('duration'),
])
click.echo(table.table)
for filename in glob('results/failure/*.json'):
result = json.loads(open(filename, 'r').read())
click.echo(colorclass.Color("{red}" + result.get('task_type') + " failed{/red}"))
click.echo(f"{yaml.safe_dump({'parameters':result.get('task_params')})}")
click.echo("\n".join(result.get('exception_stack_trace')))
click.echo('')
exit_status_codes = {
LuigiStatusCode.SUCCESS: 0,
LuigiStatusCode.SUCCESS_WITH_RETRY: 0,
LuigiStatusCode.FAILED: 1,
LuigiStatusCode.FAILED_AND_SCHEDULING_FAILED: 2,
LuigiStatusCode.SCHEDULING_FAILED: 3,
LuigiStatusCode.NOT_RUN: 4,
LuigiStatusCode.MISSING_EXT: 5,
}
sys.exit(exit_status_codes.get(run_result.status))
def run_tasks_for_dry_run(tasks_to_run):
for type in ["failure", "success", "timeout", "process_failure", "processing_time", "broken_task", ]:
os.makedirs(Path(constants.RESULTS_DIRECTORY) / type)
run_result = luigi.build(
tasks_to_run,
local_scheduler=True,
detailed_summary=True,
workers=10,
log_level='INFO',
)
for filename in glob('results/failure/*.json'):
result = json.loads(open(filename, 'r').read())
click.echo(colorclass.Color("{red}" + result.get('task_type') + " failed{/red}"))
click.echo(f"{yaml.safe_dump({'parameters':result.get('task_params')})}")
click.echo("\n".join(result.get('exception_stack_trace')))
click.echo('')
exit_status_codes = {
LuigiStatusCode.SUCCESS: 0,
LuigiStatusCode.SUCCESS_WITH_RETRY: 0,
LuigiStatusCode.FAILED: 1,
LuigiStatusCode.FAILED_AND_SCHEDULING_FAILED: 2,
LuigiStatusCode.SCHEDULING_FAILED: 3,
LuigiStatusCode.NOT_RUN: 4,
LuigiStatusCode.MISSING_EXT: 5,
}
click.echo("Dry run results")
table_data = [
['Result','Launch', 'Account', 'Region', 'Current Version', 'New Version', 'Notes'],
]
table = terminaltables.AsciiTable(table_data)
for filename in glob('output/TerminateProductDryRunTask/*.json'):
result = json.loads(open(filename, 'r').read())
table_data.append([
result.get('effect'),
result.get('params').get('launch_name'),
result.get('params').get('account_id'),
result.get('params').get('region'),
result.get('current_version'),
result.get('new_version'),
result.get('notes'),
])
for filename in glob('output/ProvisionProductDryRunTask/*.json'):
result = json.loads(open(filename, 'r').read())
table_data.append([
result.get('effect'),
result.get('params').get('launch_name'),
result.get('params').get('account_id'),
result.get('params').get('region'),
result.get('current_version'),
result.get('new_version'),
result.get('notes'),
])
click.echo(table.table)
sys.exit(exit_status_codes.get(run_result.status))
|
client.py | import sys
import socket
import threading
import struct
import collections
import configparser
class Client:
def __init__(self):
self.message = None
self.__init_socket()
parser = configparser.ConfigParser()
parser.read("client_settings.ini")
self.port = int(parser.get("client", "port"))
self.host = parser.get("client", "host")
self.data = collections.deque()
self.__connect()
t1 = threading.Thread(target=self.__receive)
t1.start()
def __init_socket(self):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Created Socket")
except socket.error:
print("Failed to create socket")
sys.exit()
def __connect(self):
self.sock.connect((self.host, self.port))
print("Connected")
def send(self, data):
try:
self.sock.send(struct.pack("i", len(data)) + data.encode())
except socket.error:
self.sock.close()
def __receive(self):
while True:
try:
self.message = ""
size = struct.unpack("i", self.sock.recv(struct.calcsize("i")))[0]
while len(self.message) < size:
message_chunk = self.sock.recv(size - len(self.message)).decode()
self.message += message_chunk
if self.message is not None and len(self.message) > 0:
self.data.append(self.message)
if self.message == "exit":
self.sock.close()
return
except socket.error:
self.sock.close()
|
__init__.py | import requests
import datetime
import dateutil
import logging
import boto3
import gzip
import io
import csv
import time
import os
import sys
import json
import hashlib
import hmac
import base64
import re
from threading import Thread
from io import StringIO
import azure.functions as func
sentinel_customer_id = os.environ.get('WorkspaceID')
sentinel_shared_key = os.environ.get('WorkspaceKey')
aws_access_key_id = os.environ.get('AWSAccessKeyId')
aws_secret_acces_key = os.environ.get('AWSSecretAccessKey')
aws_s3_bucket = os.environ.get('S3Bucket')
aws_region_name = os.environ.get('AWSRegionName')
s3_folder = os.environ.get('S3Folder')
sentinel_log_type = os.environ.get('LogAnalyticsCustomLogName')
fresh_event_timestamp = os.environ.get('FreshEventTimeStamp')
logAnalyticsUri = os.environ.get('LAURI')
if ((logAnalyticsUri in (None, '') or str(logAnalyticsUri).isspace())):
logAnalyticsUri = 'https://' + sentinel_customer_id + '.ods.opinsights.azure.com'
pattern = r'https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$'
match = re.match(pattern,str(logAnalyticsUri))
if(not match):
raise Exception("Invalid Log Analytics Uri.")
# Boolean Values
isCoreFieldsAllTable = os.environ.get('CoreFieldsAllTable')
isSplitAWSResourceTypes = os.environ.get('SplitAWSResourceTypes')
# TODO: Read Collection schedule from environment variable as CRON expression; This is also Azure Function Trigger Schedule
collection_schedule = int(fresh_event_timestamp)
def main(mytimer: func.TimerRequest) -> None:
if mytimer.past_due:
logging.info('The timer is past due!')
logging.info('Starting program')
cli = S3Client(aws_access_key_id, aws_secret_acces_key, aws_region_name, aws_s3_bucket)
ts_from, ts_to = cli.get_time_interval()
print("From:{0}".format(ts_from))
print("To:{0}".format(ts_to))
logging.info('Searching files last modified from {} to {}'.format(ts_from, ts_to))
obj_list = cli.get_files_list(ts_from, ts_to)
failed_sent_events_number = 0
successfull_sent_events_number = 0
coreEvents = []
for obj in obj_list:
log_events = cli.process_obj(obj)
for log in log_events:
if len(log) > 0:
coreEvents.append(log)
file_events = 0
t0 = time.time()
logging.info('Total number of files is {}'.format(len(coreEvents)))
for event in coreEvents:
sentinel = AzureSentinelConnector(logAnalyticsUri, sentinel_customer_id, sentinel_shared_key, sentinel_log_type, queue_size=10000, bulks_number=10)
with sentinel:
sentinel.send(event)
file_events += 1
failed_sent_events_number += sentinel.failed_sent_events_number
successfull_sent_events_number += sentinel.successfull_sent_events_number
if failed_sent_events_number:
logging.info('{} AWS S3 files have not been sent'.format(failed_sent_events_number))
if successfull_sent_events_number:
logging.info('Program finished. {} AWS S3 files have been sent.'.format(successfull_sent_events_number))
if successfull_sent_events_number == 0 and failed_sent_events_number == 0:
logging.info('No Fresh AWS S3 files')
def convert_list_to_csv_line(ls):
line = StringIO()
writer = csv.writer(line)
writer.writerow(ls)
return line.getvalue()
class S3Client:
def __init__(self, aws_access_key_id, aws_secret_acces_key, aws_region_name, aws_s3_bucket):
self.aws_access_key_id = aws_access_key_id
self.aws_secret_acces_key = aws_secret_acces_key
self.aws_region_name = aws_region_name
self.aws_s3_bucket = self._get_s3_bucket_name(aws_s3_bucket)
self.aws_s3_prefix = self._get_s3_prefix(aws_s3_bucket)
self.total_events = 0
self.input_date_format = '%Y-%m-%d %H:%M:%S'
self.output_date_format = '%Y-%m-%dT%H:%M:%SZ'
self.s3 = boto3.client(
's3',
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_acces_key,
region_name=self.aws_region_name
)
def _get_aws_account_id(self):
self.sts = boto3.client(
"sts",
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_acces_key,
region_name=self.aws_region_name
)
return self.sts.get_caller_identity()["Account"]
def _get_s3_bucket_name(self, aws_s3_bucket):
aws_s3_bucket = self._normalize_aws_s3_bucket_string(aws_s3_bucket)
tokens = aws_s3_bucket.split('/')
aws_s3_bucket = tokens[0]
return aws_s3_bucket
def _get_s3_prefix(self, aws_s3_bucket):
aws_s3_bucket = self._normalize_aws_s3_bucket_string(aws_s3_bucket)
tokens = aws_s3_bucket.split('/')
if len(tokens) > 1:
prefix = '/'.join(tokens[1:]) + '/'
else:
prefix = ''
return prefix
def _normalize_aws_s3_bucket_string(self, aws_s3_bucket):
aws_s3_bucket = aws_s3_bucket.strip()
aws_s3_bucket = aws_s3_bucket.replace('s3://', '')
if aws_s3_bucket.startswith('/'):
aws_s3_bucket = aws_s3_bucket[1:]
if aws_s3_bucket.endswith('/'):
aws_s3_bucket = aws_s3_bucket[:-1]
return aws_s3_bucket
def get_time_interval(self):
ts_from = datetime.datetime.utcnow() - datetime.timedelta(minutes=collection_schedule + 1)
ts_to = datetime.datetime.utcnow() - datetime.timedelta(minutes=1)
ts_from = ts_from.replace(tzinfo=datetime.timezone.utc, second=0, microsecond=0)
ts_to = ts_to.replace(tzinfo=datetime.timezone.utc, second=0, microsecond=0)
return ts_from, ts_to
def _make_objects_list_request(self, marker='', prefix=''):
response = self.s3.list_objects(
Bucket=self.aws_s3_bucket,
Marker=marker,
Prefix=prefix
)
try:
response_code = response.get('ResponseMetadata', {}).get('HTTPStatusCode', None)
if response_code == 200:
return response
else:
raise Exception('HTTP Response Code - {}'.format(response_code))
except Exception as err:
logging.error('Error while getting objects list - {}'.format(err))
raise Exception
def get_files_list(self, ts_from, ts_to):
files = []
folders = self.s3.list_objects(Bucket=self.aws_s3_bucket, Prefix=self.aws_s3_prefix, Delimiter='/')
marker_end = (ts_from - datetime.timedelta(minutes=60)).strftime("/%Y-%m-%d/%Y-%m-%d-%H-%M")
for o in folders.get('CommonPrefixes'):
marker = o.get('Prefix') + s3_folder + marker_end
folder = o.get('Prefix') + s3_folder
while True:
response = self._make_objects_list_request(marker=marker, prefix=folder)
for file_obj in response.get('Contents', []):
if ts_to > file_obj['LastModified'] >= ts_from:
files.append(file_obj)
if response['IsTruncated'] is True:
marker = response['Contents'][-1]['Key']
else:
break
return self.sort_files_by_date(files)
def download_obj(self, key):
logging.info('Started downloading {}'.format(key))
res = self.s3.get_object(Bucket=self.aws_s3_bucket, Key=key)
try:
response_code = res.get('ResponseMetadata', {}).get('HTTPStatusCode', None)
if response_code == 200:
body = res['Body']
data = body.read()
logging.info('File {} downloaded'.format(key))
return data
else:
logging.error('Error while getting object {}. HTTP Response Code - {}'.format(key, response_code))
except Exception as err:
logging.error('Error while getting object {} - {}'.format(key, err))
def unpack_file(self, downloaded_obj, key):
try:
file_obj = io.BytesIO(downloaded_obj)
if '.csv.gz' in key.lower():
extracted_file = gzip.GzipFile(fileobj=file_obj).read().decode()
elif '.json.gz' in key.lower():
extracted_file = gzip.GzipFile(fileobj=file_obj)
elif '.jsonl.gz' in key.lower():
extracted_file = gzip.GzipFile(fileobj=file_obj).read().decode('utf-8')
elif '.json' in key.lower():
extracted_file = file_obj
return extracted_file
except Exception as err:
logging.error('Error while unpacking file {} - {}'.format(key, err))
@staticmethod
def format_date(date_string, input_format, output_format):
try:
date = datetime.datetime.strptime(date_string, input_format)
date_string = date.strftime(output_format)
except Exception:
pass
return date_string
@staticmethod
def sort_files_by_date(ls):
return sorted(ls, key=lambda k: k['LastModified'])
def process_obj(self, obj):
key = obj['Key']
if '.json.gz' in key.lower():
downloaded_obj = self.download_obj(key)
json_file = self.unpack_file(downloaded_obj, key)
logEvents = json.load(json_file)['Records']
sortedLogEvents = sorted(logEvents, key=lambda r: r['eventTime'])
elif '.jsonl.gz' in key.lower():
downloaded_obj = self.download_obj(key)
json_file = self.unpack_file(downloaded_obj, key)
sortedLogEvents = json_file.split('\n')
elif '.csv.gz' in key.lower():
downloaded_obj = self.download_obj(key)
csv_file = self.unpack_file(downloaded_obj, key)
sortedLogEvents = self.parse_csv_file(csv_file)
elif '.json' in key.lower():
downloaded_obj = self.download_obj(key)
sortedLogEvents = self.unpack_file(downloaded_obj, key)
return sortedLogEvents
def parse_csv_file(self, csv_file):
csv_reader = csv.reader(csv_file.split('\n'), delimiter=',')
for row in csv_reader:
if len(row) > 1:
event = {"message": convert_list_to_csv_line(row)}
event = self.convert_empty_string_to_null_values(event)
yield event
class AzureSentinelConnector:
def __init__(self, log_analytics_uri, customer_id, shared_key, log_type, queue_size=200, bulks_number=10, queue_size_bytes=25 * (2**20)):
self.log_analytics_uri = log_analytics_uri
self.customer_id = customer_id
self.shared_key = shared_key
self.log_type = log_type
self.queue_size = queue_size
self.bulks_number = bulks_number
self.queue_size_bytes = queue_size_bytes
self._queue = []
self._bulks_list = []
self.successfull_sent_events_number = 0
self.failed_sent_events_number = 0
def send(self, event):
self._queue.append(event)
if len(self._queue) >= self.queue_size:
self.flush(force=False)
def flush(self, force=True):
self._bulks_list.append(self._queue)
if force:
self._flush_bulks()
else:
if len(self._bulks_list) >= self.bulks_number:
self._flush_bulks()
self._queue = []
def _flush_bulks(self):
jobs = []
for queue in self._bulks_list:
if queue:
queue_list = self._split_big_request(queue)
for q in queue_list:
jobs.append(Thread(target=self._post_data, args=(self.customer_id, self.shared_key, q, self.log_type, )))
for job in jobs:
job.start()
for job in jobs:
job.join()
self._bulks_list = []
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
self.flush()
def _build_signature(self, customer_id, shared_key, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(customer_id, encoded_hash)
return authorization
def _post_data(self, customer_id, shared_key, body, log_type):
events_number = len(body)
body = json.dumps(body)
body = re.sub(r'\\', '', body)
body = re.sub(r'"{', '{', body)
body = re.sub(r'}"', '}', body)
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = self._build_signature(customer_id, shared_key, rfc1123date, content_length, method, content_type, resource)
uri = self.log_analytics_uri + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': log_type,
'x-ms-date': rfc1123date
}
response = requests.post(uri, data=body, headers=headers)
if (response.status_code >= 200 and response.status_code <= 299):
logging.info('{} events have been successfully sent to Azure Sentinel'.format(events_number))
self.successfull_sent_events_number += events_number
else:
logging.error("Error during sending events to Azure Sentinel. Response code: {}".format(response.status_code))
self.failed_sent_events_number += events_number
def _check_size(self, queue):
data_bytes_len = len(json.dumps(queue).encode())
return data_bytes_len < self.queue_size_bytes
def _split_big_request(self, queue):
if self._check_size(queue):
return [queue]
else:
middle = int(len(queue) / 2)
queues_list = [queue[:middle], queue[middle:]]
return self._split_big_request(queues_list[0]) + self._split_big_request(queues_list[1])
|
test.py | from threading import Thread
import time, sys
class val:
val = 0
def printing(testing):
while True:
print(testing)
time.sleep(1)
def myDec(testing):
def wtf(func):
def func_wrapper():
try:
print1 = Thread(target=printing, args=(testing,))
print2 = Thread(target=printing, args=(testing,))
print1.daemon = True
print2.daemon = True
print1.start()
print2.start()
func()
while print1.isAlive() or print2.isAlive(): pass
except (KeyboardInterrupt, SystemExit):
print("main thread terminating...")
sys.exit()
return func_wrapper
return wtf
@myDec(1)
def test():
print("two thread has ran! and I executed my code!")
return
test() |
app.py | import gevent.monkey
gevent.monkey.patch_all()
from flask import Flask, request, jsonify
from utils import get_logger
from extractor import ActivityExtractor, ActivityExtractorException
from generator import ActivityGenerator
from strava.generator import StravaActivityGenerator
from strava.extractor import StravaActivityExtractor
from garmin.extractor import GarminActivityExtractor
from file.extractor import FitFileActivityExtractor
import producer
import time
import os
from threading import Thread
app = Flask(__name__)
logger = get_logger()
@app.route('/donate-activities')
def donate():
app = request.args.get('app', 'strava')
file = request.files.get('file')
try:
if file:
extractor = ActivityExtractor.get_provider(provider_name=app, file_stream=file)
else:
extractor = ActivityExtractor.get_provider(provider_name=app, creds=request.args)
activities = extractor.get_activities()
donated_activity_ids = extractor.get_activity_ids(activities)
logger.info(f'Extracted activities to be donated and processed: {donated_activity_ids}')
except ActivityExtractorException as e:
return jsonify(success=False, error=e.message), e.status
try:
producer.produce(activities)
except Exception as e:
logger.error('Producing activity records failed with:' + str(e))
return jsonify(success=False, error=str(e)), 500
return jsonify(success=True, donated_activities=donated_activity_ids)
@app.route('/generate-data/stop')
def stop_generation():
app = request.args.get('app', 'all')
apps = ActivityGenerator.get_provider_names() if app == 'all' else [app]
for app in apps:
os.environ[f'{app}_generation'] = 'stopped'
logger.info(f"Following activity data generation processes are set to 'stopped': {apps}")
return jsonify(success=True)
@app.route('/generate-data/start')
def start_generation():
try:
app = request.args.get('app', 'strava')
interval = request.args.get('interval', 1)
generator = ActivityGenerator.get_provider(provider_name=app)
except Exception as e:
return jsonify(success=False, error=str(e)), 404
os.environ[f'{app}_generation'] = 'running'
logger.info(f'Creating Thread for activity generator and producer {generator}')
t = Thread(target=generate_and_produce, args=[generator, interval])
t.daemon = True
t.start()
return jsonify(success=True)
def generate_and_produce(generator, interval):
logger.debug('Generate and produce activities..')
provider = generator.PROVIDER_NAME
while os.getenv(f'{provider}_generation') == 'running':
logger.debug('{provider}: running - generating activity..')
activities = [generator.generate_dummy_activity()]
try:
producer.produce(activities)
except Exception as e:
logger.warning('Producing generated activity records failed with:' + str(e))
time.sleep(interval)
if __name__ == '__main__':
app.run(port=7778, host='0.0.0.0')
|
__init__.py |
###############################################################################
# DO NOT MODIFY THIS FILE #
###############################################################################
import inspect
import logging
import sys
import textwrap
import time
from collections import namedtuple
from enum import Enum
from multiprocessing import Process, Pipe
from queue import Empty
from .isolation import Isolation, DebugState
__all__ = ['Isolation', 'DebugState', 'Status', 'play', 'fork_get_action']
logger = logging.getLogger(__name__)
Agent = namedtuple("Agent", "agent_class name")
PROCESS_TIMEOUT = 5 # time to interrupt agent search processes (in seconds)
GAME_INFO = """\
Initial game state: {}
First agent: {!s}
Second agent: {!s}
"""
ERR_INFO = """\
Error playing game: {!s}
Initial state: {}
First agent: {!s}
Second agent: {!s}
Final state: {}
Action history: {!s}
"""
RESULT_INFO = """\
Status: {}
Final State: {}
History: {}
Winner: {}
Loser: {}
"""
class Status(Enum):
NORMAL = 0
EXCEPTION = 1
TIMEOUT = 2
INVALID_MOVE = 3
GAME_OVER = 4
class StopSearch(Exception): pass # Exception class used to halt search
class TimedQueue:
"""Modified queue class to block .put() after a time limit expires,
and to include both a context object & action choice in the queue.
"""
def __init__(self, receiver, sender, time_limit):
self.__sender = sender
self.__receiver = receiver
self.__time_limit = time_limit / 1000
self.__stop_time = None
self.agent = None
def start_timer(self):
self.__stop_time = self.__time_limit + time.perf_counter()
def put(self, item, block=True, timeout=None):
if self.__stop_time and time.perf_counter() > self.__stop_time:
print("Stop Search Raised. Too much time taken")
raise StopSearch
if self.__receiver.poll():
self.__receiver.recv()
self.__sender.send((getattr(self.agent, "context", None), item))
def put_nowait(self, item):
self.put(item, block=False)
def get(self, block=True, timeout=None):
return self.__receiver.recv()
def get_nowait(self):
return self.get(block=False)
def qsize(self): return int(self.__receiver.poll())
def empty(self): return ~self.__receiver.poll()
def full(self): return self.__receiver.poll()
def play(args): return _play(*args) # multithreading ThreadPool.map doesn't expand args
def _play(agents, game_state, time_limit, match_id, debug=False):
""" Run a match between two agents by alternately soliciting them to
select a move and applying it to advance the game state.
Parameters
----------
agents : tuple
agents[i] is an instance of isolation.Agent class (namedtuple)
game_state: Isolation
an instance of Isolation.Isolation in the initial game state;
assumes that agents[game_state.ply_count % 2] is the active
player in the initial state
time_limit : numeric
The maximum number of milliseconds to allow before timeout during
each turn (see notes)
Returns
-------
(agent, list<[(int, int),]>, Status)
Return multiple including the winning agent, the actions that
were applied to the initial state, a status code describing the
reason the game ended, and any error information
"""
initial_state = game_state
game_history = []
winner = None
status = Status.NORMAL
players = [a.agent_class(player_id=i) for i, a in enumerate(agents)]
logger.info(GAME_INFO.format(initial_state, *agents))
while not game_state.terminal_test():
active_idx = game_state.player()
# any problems during get_action means the active player loses
winner, loser = agents[1 - active_idx], agents[active_idx]
try:
action = fork_get_action(game_state, players[active_idx], time_limit, debug)
except Empty:
status = Status.TIMEOUT
logger.warning(textwrap.dedent("""\
The queue was empty after get_action() was called. This means that either
the queue.put() method was not called by the get_action() method, or that
the queue was empty after the procedure was killed due to timeout {} seconds
after the move time limit of {} milliseconds had expired.
""".format(players[active_idx], PROCESS_TIMEOUT, time_limit)).replace("\n", " "))
break
except Exception as err:
status = Status.EXCEPTION
logger.error(ERR_INFO.format(
err, initial_state, agents[0], agents[1], game_state, game_history
))
break
if action not in game_state.actions():
status = Status.INVALID_MOVE
break
game_state = game_state.result(action)
game_history.append(action)
else:
status = Status.GAME_OVER
if game_state.utility(active_idx) > 0:
winner, loser = loser, winner # swap winner/loser if active player won
logger.info(RESULT_INFO.format(status, game_state, game_history, winner, loser))
return winner, game_history, match_id
def fork_get_action(game_state, active_player, time_limit, debug=False):
receiver, sender = Pipe()
action_queue = TimedQueue(receiver, sender, time_limit)
if debug: # run the search in the main process and thread
from copy import deepcopy
active_player.queue = None
active_player = deepcopy(active_player)
active_player.queue = action_queue
_request_action(active_player, action_queue, game_state)
time.sleep(time_limit / 1000)
else: # spawn a new process to run the search function
try:
p = Process(target=_request_action, args=(active_player, action_queue, game_state))
p.start()
p.join(timeout=PROCESS_TIMEOUT + time_limit / 1000)
finally:
if p and p.is_alive(): p.terminate()
new_context, action = action_queue.get_nowait() # raises Empty if agent did not respond
active_player.context = new_context
return action
def _request_action(agent, queue, game_state):
""" Augment agent instances with a countdown timer on every method before
calling the get_action() method and catch countdown timer exceptions.
"""
agent.queue = queue
queue.agent = agent
try:
queue.start_timer()
agent.get_action(game_state)
except StopSearch:
print("TOO MUCH TIME PASSED")
pass
|
state.py | import pygame
import random
import os
from display.constants import *
from display.characters import ZUCC, Human
from display.background import Background
from display.text import Text
from display.projectiles import Projectile
from display.menu import Menu
from display.statecode import StateCode
from display.projectilemaker import ProjectileMaker
from display.datamanager import DataManager
from display.particles import Particle
from comms.InputEvent import InputEvent
from threading import Thread
import random
import time
class StateMachine:
def __init__(self):
self.state = StateCode.LOGO # Set initial state
@staticmethod
def playLogo(screen):
screen.fill((0, 0, 0))
logo = pygame.image.load(os.path.join("assets", "logos", "team.png"))
logo = pygame.transform.scale(logo, (screen_width, screen_height))
logo.set_alpha(20)
screen.blit(logo, (0,0))
pygame.display.flip()
pygame.mixer.music.load(os.path.join("assets", "sounds", "theme.ogg"))
pygame.mixer.music.set_endevent(MUSIC_DEATH)
pygame.mixer.music.play()
waiting = True
while waiting:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return StateCode.END
elif event.type == MUSIC_DEATH:
waiting = False
return StateCode.MENU
@staticmethod
def playMenu(screen):
pygame.mixer.music.load(os.path.join("assets", "sounds", "music.mp3"))
pygame.mixer.music.play(-1)
menu = Menu(screen)
return menu.run()
@staticmethod
def playIntro(screen):
welcomeText = Text((550,400), (255,255,255))
welcomeText.text = "zucc"
welcomeText.font = welcomeText.make_font(['Lucida Console'], 128)
introRunning = True
bg = Background(screen)
clock = pygame.time.Clock()
sprites = pygame.sprite.Group()
zucc = ZUCC()
sprites.add(zucc)
groundLevel = zucc.rect.y
zucc.rect.y = -500
while introRunning:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return StateCode.END
zucc.moveY(3)
if zucc.rect.y >= groundLevel - 3:
introRunning = False
bg.render()
sprites.update()
welcomeText.render(screen)
sprites.draw(screen)
pygame.display.flip()
clock.tick(60)
return StateCode.PLAYING
@staticmethod
def playGame(screen, keyboard):
sprites = pygame.sprite.Group()
zucc = ZUCC()
sprites.add(zucc)
human = Human()
sprites.add(human)
bg = Background(screen)
clock = pygame.time.Clock()
game_playing = True
if not keyboard:
input_event = InputEvent(game_playing)
input_thread = Thread(target=input_event.run)
input_thread.start()
projectile_event = ProjectileMaker()
projectile_thread = Thread(target=projectile_event.run)
projectile_thread.start()
data_manager = DataManager()
start_ticks = pygame.time.get_ticks()
day_past = 0
evolveTicker = False
while game_playing:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_playing = False
if event.type == SENDPROJECTILE:
proj = Projectile(icons[random.randint(0,len(icons) - 1)], random.randint(1,5), random.randint(25, 150), random.randint(2 ,20), 5, 'app')
sprites.add(proj)
if keyboard:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_e:
zucc.evolve()
else:
if event.type == GETINPUT:
zucc.ySpeed = event.zucc
human.ySpeed = event.human
if keyboard:
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
zucc.ySpeed = -3
elif keys[pygame.K_DOWN]:
zucc.ySpeed = 3
else:
zucc.ySpeed = 0
for sprite in sprites:
if type(sprite).__name__ == "Projectile":
if sprite.type == 'app' and sprite.rect.colliderect(human.collision_rect):
sprite.kill()
personal_data = data_manager.drop()
data = Projectile('data', random.randrange(-4, -8, -1), random.randrange(50, 100), random.randrange(1, 9), 5, 'data', False, human.collision_rect.x, human.collision_rect.y, data=personal_data)
sprites.add(data)
if sprite.type == 'data' and sprite.rect.colliderect(zucc.collision_rect):
sprite.kill()
if data_manager.pickup(sprite.data):
return StateCode.ZUCC_WIN
elif evolveTicker:
zucc.evolve()
evolveTicker = False
else:
evolveTicker = True
if sprite.type == 'power_size' and sprite.rect.colliderect(human.collision_rect):
sprite.kill()
Projectile.override = True
Projectile.override_value = 2
Projectile.override_frames = 500
if sprite.type == 'power_size' and sprite.rect.colliderect(zucc.collision_rect):
sprite.kill()
Projectile.override = True
Projectile.override_value = 8
Projectile.override_frames = 500
if(Projectile.override):
Projectile.override_frames-=1
if(Projectile.override_frames == 0):
Projectile.override = False
if(random.randrange(0, 1500) == 1):
power = Projectile('power_size', random.choice( [random.randrange(-15, -5), random.randrange(5, 15)] ), random.randrange(50, 300), random.randrange(2, 12), 4, 'power_size', False, screen_width / 2, screen_height / 2)
sprites.add(power)
for p in Particle.makeParticleFamily(25, screen_width / 2, screen_height / 2, 'gold'):
sprites.add(p)
for p in Particle.makeParticleFamily(5, human.collision_rect.x + human.collision_rect.width/2 - 20, human.collision_rect.y + human.collision_rect.height, 'orange'):
sprites.add(p)
for p in Particle.makeParticleFamily(5, zucc.collision_rect.x + zucc.collision_rect.width/2 - 20, zucc.collision_rect.y + zucc.collision_rect.height, 'facebook'):
sprites.add(p)
sprites.update()
bg.render()
# Show player data
data = data_manager.getRemaining()
x = 400
zucc_label = Text((x, 0), (37,55,140))
zucc_label.text = "ZUCC has absorbed:"
zucc_label.font = zucc_label.make_font(['Lucida Console'], 36)
zucc_label.render(screen)
y = 38
for data_name, is_good in data.items():
if not is_good:
zucc_data = Text((x, y), (255,255,255))
zucc_data.text = data_name
zucc_data.font = zucc_data.make_font(['Lucida Console'], 36)
zucc_data.render(screen)
y += 38
x = screen_width - 300
user_label = Text((x, 0), (54,125,33))
user_label.text = "Your remaining info:"
user_label.font = zucc_label.make_font(['Lucida Console'], 36)
user_label.render(screen)
y = 38
for data_name, is_good in data.items():
if is_good:
user_data = Text((x, y), (255, 255, 255))
user_data.text = data_name
user_data.font = user_data.make_font(['Lucida Console'], 36)
user_data.render(screen)
y += 38
# Show countdown to GDPR
if day_past < 20:
day_past = (pygame.time.get_ticks() - start_ticks) / 9000
else:
return StateCode.HUMAN_WIN
countdown_gdpr = Text((screen_width / 2 - 100, 0), (255,255,255))
countdown_gdpr.text = '{:02d} days till GDPR'.format(int(20 - day_past))
countdown_gdpr.font = countdown_gdpr.make_font(['Lucida Console'], 36)
countdown_gdpr.render(screen)
# Show progress bar
pygame.draw.rect(screen, (75,102,173), pygame.Rect(screen_width / 2 - 200,48,400,10))
pygame.draw.rect(screen, (255,255,255), pygame.Rect(screen_width / 2 - 200,48, 400 * (day_past/20),10))
sprites.draw(screen)
pygame.display.flip()
clock.tick(60)
return StateCode.END
@staticmethod
def zucc_win(screen):
showWinner = True
welcomeText = Text((550,400), (255,255,255))
welcomeText.text = "long live zucc"
welcomeText.font = welcomeText.make_font(['Lucida Console'], 128)
bg = Background(screen)
clock = pygame.time.Clock()
sprites = pygame.sprite.Group()
while showWinner:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return StateCode.END
bg.render()
welcomeText.render(screen)
sprites.draw(screen)
pygame.display.flip()
clock.tick(60)
return StateCode.END
@staticmethod
def human_win(screen):
showWinner = True
welcomeText = Text((550,400), (255,255,255))
welcomeText.text = "long live gdpr"
welcomeText.font = welcomeText.make_font(['Lucida Console'], 128)
bg = Background(screen)
clock = pygame.time.Clock()
sprites = pygame.sprite.Group()
while showWinner:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return StateCode.END
bg.render()
welcomeText.render(screen)
sprites.draw(screen)
pygame.display.flip()
clock.tick(60)
return StateCode.END
|
sand_clock.py | import time
from tkinter import *
from multiprocessing import Process
import threading
info = {
'total_time': 60
}
flag = [False]
def make_app():
_font = ['Arial', 25, 'bold']
app = Tk()
Label(name='lb', text='sand clock', fg='yellow', bg='blue', font=_font).pack()
Button(name='btn', text='start', command=time_counts).pack()
Button(name='bts', text='reset', command=time_reset).pack()
Entry(name='ipt').pack()
Button(name='btp', text='pause', command=time_pause).pack()
Entry(name='ipp').pack()
app.geometry('300x300')
return app
def time_counts():
flag[0] = False
def _counts():
while info['total_time']:
if flag[0]:
break
info['total_time'] -= 1
print(info['total_time'])
time.sleep(1)
t = threading.Thread(target=_counts, name='timer')
t.start()
def time_reset():
flag[0] = True
info['total_time'] = 60
def time_pause():
ipt = app.children['ipt']
if not ipt.get():
flag[0] = True
def ui_watcher():
def _update_button():
btn = app.children['btn']
btp = app.children['btp']
timer = [t for t in threading.enumerate() if t.name == 'timer']
if timer:
btn['state'] = 'disabled'
btp['state'] = 'normal'
else:
btn['state'] = 'normal'
btp['state'] = 'disable'
def _get_time():
ipt = app.children['ipt']
timer = [t for t in threading.enumerate() if t.name == 'timer']
if not timer and ipt.get():
info['total_time'] = int(ipt.get())
def _update_time():
lb = app.children['lb']
lb['text'] = info['total_time']
def _main():
while True:
print("tik toc")
print(threading.enumerate())
_update_button()
_get_time()
_update_time()
time.sleep(0.5)
t = threading.Thread(target=_main, name='watcher')
t.start()
if __name__ == '__main__':
app = make_app()
app.after(0, ui_watcher)
app.mainloop()
|
object_detection_visulization.py | #!/usr/bin/env python
# --------------------------------------------------------
# 3D object detection train file
#
# -------------------------------------------------------
import pdb, traceback
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
import time
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR,'utils'))
sys.path.append(os.path.join(ROOT_DIR,'utils_xyz'))
sys.path.append(os.path.join(ROOT_DIR,'models'))
sys.path.append(os.path.join(ROOT_DIR,'config'))
from pointnet2_obj_detection_tf4 import placeholder_inputs,get_model,get_loss
#import provider
import get_dataset
from evaluation import EvaluationMetrics
from kitti_data_net_provider import kitti_data_net_provider #Normed_H5f,Net_Provider
from config import cfg
import multiprocessing as mp
from bbox_transform import bbox_transform_inv
from nms_3d import nms_3d
from evaluation_3d import evaluation_3d
from ply_util import create_ply, gen_box_pl
ISDEBUG = False
ISSUMMARY = False
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', default='rawh5_kitti', help='rawh5_kitti')
#parser.add_argument('--all_fn_globs', type=str,default='stride_1_step_2_8192_normed/',\
# help='The file name glob for both training and evaluation')
parser.add_argument('--feed_elements', default='xyz_raw', help='xyz_1norm,xyz_midnorm,color_1norm')
parser.add_argument('--batch_size', type=int, default= 32, help='Batch Size during training [default: 24]')
parser.add_argument('--eval_fnglob_or_rate', default='train', help='file name str glob or file number rate: scan1*.nh5 0.2')
parser.add_argument('--num_point', type=int, default=2**15, help='Point number [default: 2**15]')
parser.add_argument('--max_epoch', type=int, default=50, help='Epoch to run [default: 50]')
parser.add_argument('--num_batches', type=int, default= 2, help='decides how many visulization data you want to generate')
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--learning_rate', type=float, default=0.01, help='Initial learning rate [default: 0.01]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=300000, help='Decay step for lr decay [default: 300000]')
parser.add_argument('--decay_rate', type=float, default=0.5, help='Decay rate for lr decay [default: 0.5]')
parser.add_argument('--max_test_file_num', type=int, default=None, help='Which area to use for test, option: 1-6 [default: 6]')
parser.add_argument('--only_evaluate',action='store_true',help='do not train')
parser.add_argument('--finetune',action='store_true',help='do not train')
parser.add_argument('--model_epoch', type=int, default=10, help='the epoch of model to be restored')
parser.add_argument('--auto_break',action='store_true',help='If true, auto break when error occurs')
FLAGS = parser.parse_args()
DATASET_NAME = FLAGS.dataset_name
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
NUM_BATCH = FLAGS.num_batches
try:
FLAGS.eval_fnglob_or_rate = float(FLAGS.eval_fnglob_or_rate)
log_eval_fn_glob = ''
print('FLAGS.eval_fnglob_or_rate is eval file number rate')
except:
log_eval_fn_glob = FLAGS.eval_fnglob_or_rate.split('*')[0]
print('FLAGS.eval_fnglob_or_rate is eval name glob. log_eval_fn_glob:%s'%(log_eval_fn_glob))
if FLAGS.only_evaluate:
MAX_EPOCH = 1
log_name = 'log_Test.txt'
else:
MAX_EPOCH = FLAGS.max_epoch
log_name = 'log_Train.txt'
FLAGS.log_dir = FLAGS.log_dir+'-B'+str(BATCH_SIZE)+'-'+\
FLAGS.feed_elements+'-'+str(NUM_POINT)+'-'+FLAGS.dataset_name+'-eval_'+log_eval_fn_glob
FLAGS.feed_elements = FLAGS.feed_elements.split(',')
LOG_DIR = os.path.join(ROOT_DIR,'train_res/object_detection_result/'+FLAGS.log_dir)
MODEL_PATH = os.path.join(LOG_DIR,'model.ckpt-'+str(FLAGS.model_epoch))
LOG_DIR_FUSION = os.path.join(ROOT_DIR,'train_res/object_detection_result/fusion_log.txt')
if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR)
os.system('cp %s/models/pointnet2_obj_detection.py %s' % (ROOT_DIR,LOG_DIR)) # bkp of model def
os.system('cp %s/train_obj_detection.py %s' % (BASE_DIR,LOG_DIR)) # bkp of train procedure
if FLAGS.finetune:
LOG_FOUT = open(os.path.join(LOG_DIR, log_name), 'a')
else:
LOG_FOUT = open(os.path.join(LOG_DIR, log_name), 'w')
LOG_FOUT_FUSION = open(LOG_DIR_FUSION, 'a')
LOG_FOUT.write(str(FLAGS)+'\n\n')
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
#BN_DECAY_DECAY_STEP = float(DECAY_STEP * 2)
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
# Load Data
# FLAGS.all_fn_globs = FLAGS.all_fn_globs.split(',')
#net_provider = Net_Provider(dataset_name=FLAGS.dataset_name, \
# all_filename_glob=FLAGS.all_fn_globs, \
# eval_fnglob_or_rate=FLAGS.eval_fnglob_or_rate,\
# only_evaluate = FLAGS.only_evaluate,\
# num_point_block = NUM_POINT,
# feed_elements=FLAGS.feed_elements)
data_provider = kitti_data_net_provider(DATASET_NAME,BATCH_SIZE)
NUM_CHANNELS = cfg.TRAIN.NUM_CHANNELS # x, y, z
NUM_CLASSES = cfg.TRAIN.NUM_CLASSES # bg, fg
NUM_REGRESSION = cfg.TRAIN.NUM_REGRESSION
START_TIME = time.time()
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learing_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train_eval(train_feed_buf_q,eval_feed_buf_q):
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl, smpws_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT,NUM_CHANNELS, NUM_REGRESSION)
is_training_pl = tf.placeholder(tf.bool, shape=())
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
end_points, pred_class, pred_box, xyz_pl = get_model(pointclouds_pl, is_training_pl, NUM_CLASSES, bn_decay=bn_decay)
loss, classification_loss, regression_loss, pred_prob = get_loss(BATCH_SIZE,pred_class, pred_box, labels_pl,smpws_pl, xyz_pl)
tf.summary.scalar('loss', loss)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
saver = tf.train.Saver(max_to_keep=50)
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
merged = tf.summary.merge_all()
if not FLAGS.only_evaluate:
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
else:
test_writer = None
# Init variables
init = tf.global_variables_initializer()
sess.run(init, {is_training_pl:True})
# define operations
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred_class': pred_class,
'pred_box': pred_box,
'xyz_pl': xyz_pl,
'loss': loss,
'classification_loss':classification_loss,
'regression_loss':regression_loss,
'pred_prob':pred_prob,
'train_op': train_op,
'merged': merged,
'step': batch,
'smpws_pl': smpws_pl}
if FLAGS.finetune:
saver.restore(sess,MODEL_PATH)
log_string('finetune, restored model from: \n\t%s'%MODEL_PATH)
log_string(data_provider.data_summary_str)
epoch_start = 0
if FLAGS.finetune:
epoch_start+=(FLAGS.model_epoch+1)
for epoch in range(epoch_start,epoch_start+MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
#if train_feed_buf_q == None:
# net_provider.update_train_eval_shuffled_idx()
train_log_str = ''
saver.restore(sess,MODEL_PATH)
log_string('only evaluate, restored model from: \n\t%s'%MODEL_PATH)
eval_log_str = eval_one_epoch(sess, ops, test_writer,epoch,eval_feed_buf_q)
def add_log(tot,epoch,batch_idx,loss_batch,c_TP_FN_FP,total_seen,t_batch_ls,SimpleFlag = 0):
ave_whole_acc,class_acc_str,ave_acc_str = EvaluationMetrics.get_class_accuracy(
c_TP_FN_FP,total_seen)
log_str = ''
if len(t_batch_ls)>0:
t_per_batch = np.mean(np.concatenate(t_batch_ls,axis=1),axis=1)
t_per_block = t_per_batch / BATCH_SIZE
t_per_block_str = np.array2string(t_per_block*1000,formatter={'float_kind':lambda x: "%0.1f"%x})
else:
t_per_block_str = "no-t"
log_str += '%s [%d - %d] \t t_block(d,c):%s\tloss: %0.3f \tacc: %0.3f' % \
( tot,epoch,batch_idx,t_per_block_str,loss_batch,ave_whole_acc )
if SimpleFlag >0:
log_str += ave_acc_str
if SimpleFlag >1:
log_str += class_acc_str
log_string(log_str)
return log_str
def train_one_epoch(sess, ops, train_writer,epoch,train_feed_buf_q,pctx,opts):
""" ops: dict mapping from string to tf ops """
is_training = True
#log_string('----')
num_blocks = data_provider.num_train_data
if num_blocks!=None:
num_batches = num_blocks // BATCH_SIZE
if num_batches ==0: return ''
else:
num_batches = None
total_seen = 0.0001
loss_sum = 0.0
c_TP_FN_FP = np.zeros(shape=(3,NUM_CLASSES))
print('total batch num = ',num_batches)
batch_idx = -1
t_batch_ls=[]
train_logstr = ''
while (batch_idx < num_batches-1) or (num_batches==None):
t0 = time.time()
batch_idx += 1
#start_idx = batch_idx * BATCH_SIZE
#end_idx = (batch_idx+1) * BATCH_SIZE
poinr_cloud_data = []
label_data = []
if train_feed_buf_q == None:
point_cloud_data, label_data = data_provider._get_next_minibatch() #cur_data,cur_label,cur_smp_weights = net_provider.get_train_batch(start_idx,end_idx)
else:
if train_feed_buf_q.qsize() == 0:
print('train_feed_buf_q.qsize == 0')
break
#cur_data,cur_label,cur_smp_weights, batch_idx_buf,epoch_buf = train_feed_buf_q.get()
point_cloud_data, label_data = train_feed_buf_q.get()
cur_smp_weights = np.ones((point_cloud_data.shape[0], point_cloud_data.shape[1]))
t1 = time.time()
if type(point_cloud_data) == type(None):
break # all data reading finished
feed_dict = {ops['pointclouds_pl']: point_cloud_data,
ops['labels_pl']: label_data,
ops['is_training_pl']: is_training,
ops['smpws_pl']: cur_smp_weights}
if ISDEBUG and epoch == 0 and batch_idx ==5:
pctx.trace_next_step()
pctx.dump_next_step()
summary, step, _, loss_val, pred_class_val, classification_loss_val, regression_loss_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred_class'], ops['classification_loss'], ops['regression_loss']],
feed_dict=feed_dict)
pctx.profiler.profile_operations(options=opts)
else:
summary, step, _, loss_val, pred_class_val, classification_loss_val, regression_loss_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred_class'], ops['classification_loss'], ops['regression_loss']],
feed_dict=feed_dict)
t_batch_ls.append( np.reshape(np.array([t1-t0,time.time() - t1]),(2,1)) )
if ISSUMMARY: train_writer.add_summary(summary, step)
if batch_idx%80 == 0:
print('the training batch is {}, the loss value is {}'.format(batch_idx, loss_val))
print('the classificaiton loss is {}, the regression loss is {}'.format(classification_loss_val, regression_loss_val))
#print('the all merged is {}'.format(summary))
if False and ( batch_idx == num_batches-1 or (epoch == 0 and batch_idx % 20 ==0) or batch_idx%200==0) : ## not evaluation in one epoch
pred_class_val = np.argmax(pred_class_val, 2)
loss_sum += loss_val
total_seen += (BATCH_SIZE*NUM_POINT)
c_TP_FN_FP += EvaluationMetrics.get_TP_FN_FP(NUM_CLASSES,pred_class_val,cur_label)
train_logstr = add_log('train',epoch,batch_idx,loss_sum/(batch_idx+1),c_TP_FN_FP,total_seen,t_batch_ls)
if batch_idx == 200:
os.system('nvidia-smi')
return train_logstr
def limit_eval_num_batches(epoch,num_batches):
if epoch%5 != 0:
num_batches = min(num_batches,31)
return num_batches
def eval_one_epoch(sess, ops, test_writer, epoch,eval_feed_buf_q):
""" ops: dict mapping from string to tf ops """
is_training = False
total_seen = 0.00001
log_string('----')
num_batches = NUM_BATCH
eval_logstr = ''
t_batch_ls = []
all_gt_box = []
all_pred_class_val = []
all_pred_box_val = []
all_xyz = []
batch_idx = -1
# label
while (batch_idx < num_batches-1) or (num_batches==None):
t0 = time.time()
batch_idx += 1
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
if eval_feed_buf_q == None:
point_cloud_data, label_data, gt_box = data_provider._get_evaluation_minibatch(start_idx, end_idx) #cur_data,cur_label,cur_smp_weights = net_provider.get_eval_batch(start_idx,end_idx)
else:
if eval_feed_buf_q.qsize() == 0:
print('eval_feed_buf_q.qsize == 0')
break
point_cloud_data, label_data, epoch_buf = eval_feed_buf_q.get()
#assert batch_idx == batch_idx_buf and epoch== epoch_buf
cur_smp_weights = np.ones((point_cloud_data.shape[0], point_cloud_data.shape[1]))
t1 = time.time()
print('time of reading is {}'.format(t1-t0))
if type(point_cloud_data) == type(None):
print('batch_idx:%d, get None, reading finished'%(batch_idx))
break # all data reading finished
feed_dict = {ops['pointclouds_pl']: point_cloud_data,
ops['labels_pl']: label_data,
ops['is_training_pl']: is_training,
ops['smpws_pl']: cur_smp_weights }
summary, step, loss_val, pred_class_val, pred_prob_val, pred_box_val, xyz_pl, classification_loss_val, regression_loss_val = sess.run([ops['merged'], ops['step'], ops['loss'], ops['pred_class'], ops['pred_prob'], ops['pred_box'], ops['xyz_pl'], ops['classification_loss'], ops['regression_loss']],
feed_dict=feed_dict)
## generating the raw point cloud and downsampled point cloud
color_1 = np.array([[0, 0, 0]])
color_1 = np.tile(color_1, (point_cloud_data.shape[1],1))
raw_point_cloud_color = np.concatenate((point_cloud_data[0], color_1),1)
color_2 = np.array([[255, 255, 255]])
color_2 = np.tile(color_2, (xyz_pl.shape[1], 1))
xyz_pl[0] = xyz_pl[0] + np.array([[0.05, 0.05, 0.05]])
xyz_pl_color = np.concatenate((xyz_pl[0], color_2),1)
xyz_color = np.concatenate((raw_point_cloud_color, xyz_pl_color),0)
path_vis = os.path.join(ROOT_DIR,'data/visulization/','raw_xyz_'+str(batch_idx)+'.ply')
create_ply(xyz_color, path_vis)
t2 = time.time()
print('time of generating is {}'.format(t2 - t1))
#create_ply(xyz_pl_color, path_vis)
## generating the raw point cloud and ground truth bounding boxes
gt_box_ = get_box_coordinate(gt_box[0][:,1:8])
path_vis = os.path.join(ROOT_DIR,'data/visulization/','raw_gt_box_'+str(batch_idx)+'.ply')
gen_box_pl(path_vis, gt_box_, point_cloud_data[0])
t3 = time.time()
print('time of ground truth box is {}'.format(t3 - t2))
## generating the raw point cloud and predicted bounding boxes
num_anchors = cfg.TRAIN.NUM_ANCHORS
num_class = cfg.TRAIN.NUM_CLASSES
pred_box_ = bbox_transform_inv(pred_box_val[0], xyz_pl[0])
pred_class = np.array([pred_class_val[0, :,(x*num_class+1):((x+1)*num_class)] for x in range(num_anchors)]).transpose(1, 0, 2) ##shape: 512 x num_anchors x 1
pred_class = pred_class.reshape(-1, 1)
pred_box_ = np.concatenate(( pred_box_, pred_class), axis=1)
pred_box_ = pred_box_[ np.where( pred_box_[:,7] >= 0.2)[0], :]
if pred_box_.shape[0]>0:
pred_box_ = nms_3d( pred_box_, cfg.TEST.NMS)
pred_box_ = get_box_coordinate(pred_box_[:,0:7])
path_vis = os.path.join(ROOT_DIR,'data/visulization/','raw_pred_box_'+str(batch_idx)+'.ply')
gen_box_pl(path_vis, pred_box_, point_cloud_data[0])
t4 = time.time()
print('time of predicting box is {}'.format(t4 - t3))
if batch_idx%40 == 0:
print('the test batch is {}, the loss value is {}'.format(batch_idx, loss_val))
print('the classificaiton loss is {}, the regression loss is {}'.format(classification_loss_val, regression_loss_val))
print('Done!!')
return 1
def get_box_coordinate(all_boxes):
## transform the bounding box into array of coordinates
assert all_boxes.shape[0]>0
assert all_boxes.shape[1]==7
num_box = all_boxes.shape[0]
all_coordinates = np.zeros((1,3))
for i in range(num_box):
l = all_boxes[i, 0]
w = all_boxes[i, 1]
h = all_boxes[i, 2]
xy = np.array([[-w/2.0, l/2.0],[w/2.0, l/2.0],[w/2.0, -l/2.0],[-w/2.0, -l/2.0]])
theta = - all_boxes[i, 3]
T = np.array([[np.cos(theta), np.sin(theta) ],[-np.sin(theta), np.cos(theta)]])
a_xy = np.dot(xy, T)
xyz_0 = np.concatenate((a_xy, np.zeros((xy.shape[0],1))), 1)
xyz_1 = np.concatenate((a_xy, np.full((xy.shape[0],1), h)), 1)
xyz_all = np.concatenate((xyz_0,xyz_1), 0)
xyz_all = xyz_all + np.array([[all_boxes[i,4], all_boxes[i, 5], all_boxes[i, 6]]])
all_coordinates = np.append(all_coordinates, xyz_all, 0)
return all_coordinates[1:,:]
def boxes_assemble_filter(all_pred_class_val, all_pred_box_val, all_xyz, all_gt_box , thresh = 0.05):
#all_pred_boxes = np.zeros([1,8]) #l, w, h, theta, x, y, z, score
all_pred_boxes = [] # saved in list
num_batch = len(all_pred_class_val)
batch_size = all_pred_class_val[0].shape[0]
gt_box_ = []
num_anchors = cfg.TRAIN.NUM_ANCHORS
num_class = cfg.TRAIN.NUM_CLASSES
num_regression = cfg.TRAIN.NUM_REGRESSION
# generate, (num_samples x num_point) x 8
for i in range(num_batch):
for j in range(batch_size):
index = i*batch_size + j
temp_pred_class = np.array([all_pred_class_val[i][j,:,(x*num_class+1):((x+1)*num_class)] for x in range(num_anchors)]).transpose(1, 0, 2) ##shape: 512 x num_anchors x 1
temp_pred_class = temp_pred_class.reshape(-1, 1) # shape: n x 1
'''
# l, w, h, alpha, x, y ,z
temp_pred_box_l = np.array([ np.exp(all_pred_box_val[i][j,:,(x*num_regression)])*anchor_length for x in range(num_anchors)])
temp_pred_box_l = temp_pred_box_l.reshape(-1,1)
temp_pred_box_w = np.array([ np. exp(all_pred_box_val[i][j,:,(x*num_regression+1)])*anchor_width for x in range(num_anchors)])
temp_pred_box_w = temp_pred_box_w.reshape(-1,1)
temp_pred_box_h = np.array([ np.exp(all_pred_box_val[i][j,:,(x*num_regression+2)])*anchor_height for x in range(num_anchors)])
temp_pred_box_h = temp_pred_box_h.reshape(-1,1)
temp_pred_box_alpha = np.array([ all_pred_box_val[i][j,:,(x*num_regression+3)]*np.pi/4+anchor_alpha[x,0] for x in range(num_anchors)])
temp_pred_box_alpha = temp_pred_box_alpha.reshape(-1,1)
temp_pred_box_x = np.array([ all_pred_box_val[i][j,:,(x*num_regression+4)]*anchor_length + all_xyz[i][j,:,0] for x in range(num_anchors) ])
temp_pred_box_x = temp_pred_box_x.reshape(-1,1)
temp_pred_box_y = np.array([ all_pred_box_val[i][j,:,(x*num_regression+5)]*anchor_width + all_xyz[i][j,:,1] for x in range(num_anchors) ])
temp_pred_box_y = temp_pred_box_y.reshape(-1,1)
temp_pred_box_z = np.array([ all_pred_box_val[i][j,:,(x*num_regression+6)]*anchor_height + all_xyz[i][j,:,3] for x in range(num_anchors) ])
temp_pred_box_z = temp_pred_box_z.reshape(-1,1)
'''
# temp_pred_box = np.array([all_pred_box_val[i][j,:,(x*num_regression):((x+1)*num_regression)] for x in range(num_anchors)]).transpose(1,0,2) ## shape: 512 x num_anchors x 7
# temp_pred_box = temp_pred_box.reshape(-1, num_regression) # shape: n x 7
## transform the prediction into real num
temp_all_box = bbox_transform_inv(all_pred_box_val[i][j,:,:], all_xyz[i][j,:,:])
#temp_index = np.full((temp_pred_class.shape[0],1), index) # shape: n x 1
# temp_all_ = np.concatenate((temp_index, temp_pred_box_l, temp_pred_box_w, temp_pred_box_h, temp_pred_box_alpha, temp_pred_box_x, temp_pred_box_y, temp_pred_box_z, temp_pred_class),axis=1) # shape: n x 9
temp_all_ = np.concatenate(( temp_all_box,temp_pred_class), axis=1)
## getting box whose confidence is over thresh
temp_all_ = temp_all_[ np.where( temp_all_[:,7] >= thresh)[0], :] ## temp_all_ shape: n x 8
## useing nms
if temp_all_.shape[0] > 0: ## there is no prediction box whose prediction is over thresh
temp_all_ = nms_3d(temp_all_, cfg.TEST.NMS)
all_pred_boxes.append(temp_all_)
gt_box_.append(all_gt_box[i][j])
# all_pred_boxes = np.delete(all_pred_boxes, 0, 0)
# all_pred_boxes = all_pred_boxes[ np.where( all_pred_boxes[:,8] >= thresh)[0], :]
return all_pred_boxes, gt_box_
def add_train_feed_buf(train_feed_buf_q):
with tf.device('/cpu:0'):
max_buf_size = 20
num_blocks = data_provider.num_train_data #num_blocks = net_provider.train_num_blocks
if num_blocks!=None:
num_batches = num_blocks // BATCH_SIZE
else:
num_batches = None
epoch_start = 0
if FLAGS.finetune:
epoch_start+=(FLAGS.model_epoch+1)
for epoch in range(epoch_start,epoch_start+MAX_EPOCH):
# net_provider.update_train_eval_shuffled_idx()
batch_idx = -1
while (batch_idx < num_batches-1) or (num_batches==None):
if train_feed_buf_q.qsize() < max_buf_size:
batch_idx += 1
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
point_cloud_data, label_data = data_provider._get_next_minibatch() #cur_data,cur_label,cur_smp_weights = net_provider.get_train_batch(start_idx,end_idx)
train_feed_buf_q.put( [cur_data,cur_label,cur_smp_weights, batch_idx,epoch] )
if type(cur_data) == type(None):
print('add_train_feed_buf: get None data from net_provider, all data put finished. epoch= %d, batch_idx= %d'%(epoch,batch_idx))
break # all data reading finished
else:
time.sleep(0.1*BATCH_SIZE*max_buf_size/3)
print('add_train_feed_buf: data reading finished. epoch= %d, batch_idx= %d'%(epoch,batch_idx))
def add_eval_feed_buf(eval_feed_buf_q):
with tf.device('/cpu:1'):
max_buf_size = 20
num_blocks = data_provider.evaluation_num
if num_blocks!=None:
raw_num_batches = num_blocks // BATCH_SIZE
else:
raw_num_batches = None
epoch_start = 0
if FLAGS.finetune:
epoch_start+=(FLAGS.model_epoch+1)
for epoch in range(epoch_start,epoch_start+MAX_EPOCH):
batch_idx = -1
num_batches = limit_eval_num_batches(epoch,raw_num_batches)
while (batch_idx < num_batches-1) or (num_batches==None):
if eval_feed_buf_q.qsize() < max_buf_size:
batch_idx += 1
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
point_cloud_data, label_data = data_provider._get_evaluation_minibatch(start_idx, end_idx) #cur_data,cur_label,cur_smp_weights = net_provider.get_eval_batch(start_idx,end_idx)
eval_feed_buf_q.put( [cur_data,cur_label,cur_smp_weights, batch_idx,epoch] )
if type(cur_data) == type(None):
print('add_eval_feed_buf: get None data from net_provider, all data put finished. epoch= %d, batch_idx= %d'%(epoch,batch_idx))
break # all data reading finished
else:
time.sleep(0.1*BATCH_SIZE*max_buf_size/3)
print('add_eval_feed_buf: data reading finished. epoch= %d, batch_idx= %d'%(epoch,batch_idx))
def main():
IsFeedData_MultiProcessing = False and (not FLAGS.auto_break)
if IsFeedData_MultiProcessing:
train_feed_buf_q = mp.Queue()
eval_feed_buf_q = mp.Queue()
processes = {}
processes[ 'add_train_buf'] = mp.Process(target=add_train_feed_buf,args=(train_feed_buf_q,))
processes[ 'add_eval_buf'] = mp.Process(target=add_eval_feed_buf,args=(eval_feed_buf_q,))
processes[ 'train_eval'] = mp.Process(target=train_eval,args=(train_feed_buf_q,eval_feed_buf_q,))
for p in processes:
processes[p].start()
for p in processes:
processes[p].join()
else:
train_eval(None,None)
if __name__ == "__main__":
if FLAGS.auto_break:
try:
main()
LOG_FOUT.close()
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
else:
main()
#train_eval(None,None)
LOG_FOUT.close()
|
plugin.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import string,cgi,time
from os import curdir, sep
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
from urlparse import urlparse
from cgi import parse_qs
import simplejson as json
import threading
import fnmatch
def updateVar(name, value):
if name not in indigo.variables:
indigo.variable.create(name, value=value)
else:
indigo.variable.updateValue(name, value)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
class httpHandler(BaseHTTPRequestHandler):
def __init__(self, plugin,*args):
self.plugin = plugin
self.plugin.debugLog(u"New httpHandler thread: "+threading.currentThread().getName()+", total threads: "+str(threading.activeCount()))
BaseHTTPRequestHandler.__init__(self,*args)
def deviceUpdate(self,device,deviceAddress,event):
self.plugin.debugLog(u"deviceUpdate called")
if (self.plugin.createVar):
updateVar("Beacon_deviceID",str(device.id))
updateVar("Beacon_name",deviceAddress.split('@@')[0])
updateVar("Beacon_location",deviceAddress.split('@@')[1])
if event == "LocationEnter" or event == "enter" or event == "1" or event == self.plugin.customEnter:
indigo.server.log("Enter location notification received from sender/location "+deviceAddress)
device.updateStateOnServer("onOffState", True)
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensorTripped)
self.triggerEvent("statePresent",deviceAddress)
elif event == "LocationExit" or event == "exit" or event == "0" or event == self.plugin.customExit:
indigo.server.log("Exit location notification received from sender/location "+deviceAddress)
device.updateStateOnServer("onOffState", False)
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensor)
self.triggerEvent("stateAbsent",deviceAddress)
elif event == "LocationTest" or event=="test":
indigo.server.log("Test location notification received from sender/location "+deviceAddress)
if self.plugin.testTrigger:
indigo.server.log("Trigger action on test is enabled, triggeraction: "+self.plugin.testTriggeraction)
if self.plugin.testTriggeraction == "enter":
device.updateStateOnServer("onOffState", True)
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensorTripped)
elif self.plugin.testTriggeraction == "exit":
device.updateStateOnServer("onOffState", False)
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensor)
elif self.plugin.testTriggeraction == "toggle":
device.updateStateOnServer("onOffState", not device.onState)
if (device.onState):
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensorTripped)
else:
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensor)
self.triggerEvent("stateChange",deviceAddress)
def triggerEvent(self,eventType,deviceAddress):
self.plugin.debugLog(u"triggerEvent called")
for trigger in self.plugin.events[eventType]:
if (self.plugin.events[eventType][trigger].pluginProps["manualAddress"]):
indigo.trigger.execute(trigger)
elif (fnmatch.fnmatch(deviceAddress.lower(),self.plugin.events[eventType][trigger].pluginProps["deviceAddress"].lower())):
indigo.trigger.execute(trigger)
def deviceCreate(self,sender,location):
self.plugin.debugLog(u"deviceCreate called")
deviceName = sender+"@@"+location
device = indigo.device.create(address=deviceName,deviceTypeId="beacon",name=deviceName,protocol=indigo.kProtocol.Plugin)
self.plugin.deviceList[device.id] = {'ref':device,'name':device.name,'address':device.address.lower()}
self.plugin.debugLog(u"Created new device, "+ deviceName)
device.updateStateOnServer("onOffState",False)
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensor)
return device.id
def parseResult(self,sender,location,event):
self.plugin.debugLog(u"parseResult called")
deviceAddress = sender.lower()+"@@"+location.lower()
foundDevice = False
if self.plugin.deviceList:
for b in self.plugin.deviceList:
if (self.plugin.deviceList[b]['address'] == deviceAddress):
self.plugin.debugLog(u"Found userLocation device: " + self.plugin.deviceList[b]['name'])
self.deviceUpdate(self.plugin.deviceList[b]['ref'],deviceAddress,event)
foundDevice = True
if foundDevice == False:
self.plugin.debugLog(u"No device found")
indigo.server.log("Received "+event+" from "+deviceAddress+" but no corresponding device exists",isError=True)
if self.plugin.createDevice:
newdev = self.deviceCreate(sender,location)
self.deviceUpdate(self.plugin.deviceList[newdev]['ref'],deviceAddress,event)
def do_POST(self):
global rootnode
foundDevice = False
self.plugin.debugLog(u"Received HTTP POST")
self.plugin.debugLog(u"Sending HTTP 200 response")
self.send_response(200)
self.end_headers()
try:
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
uagent = str(self.headers.getheader('user-agent'))
self.plugin.debugLog(u"User-agent: %s, Content-type: %s" % (uagent, ctype))
data = self.rfile.read(int(self.headers['Content-Length']))
data = data.decode('utf-8')
self.plugin.debugLog(u"Data (UTF-8 decoded): %s" % data)
# Custom
if ((self.plugin.custom) and (ctype == 'application/x-www-form-urlencoded')):
pdata = parse_qs(data)
p = {}
for key, value in pdata.iteritems():
p.update({key:value[0]})
if all(p.has_key(name) for name in (self.plugin.customSender,self.plugin.customLocation,self.plugin.customAction)):
self.plugin.debugLog(u"Recognised Custom")
if ((p[self.plugin.customAction] == self.plugin.customEnter) or (p[self.plugin.customAction] == self.plugin.customExit)):
self.parseResult(p[self.plugin.customSender],p[self.plugin.customLocation],p[self.plugin.customAction])
else:
indigo.server.log(u"Received Custom data, but value of Action parameter wasn't recognised",isError=True)
return
# Locative
if (('Geofancy' in uagent) or ('Locative' in uagent)):
self.plugin.debugLog(u"Recognised Locative")
if (self.plugin.geofancy):
if (ctype == 'application/x-www-form-urlencoded'):
pdata = parse_qs(data)
p = {}
for key, value in pdata.iteritems():
p.update({key:value[0]})
if all(p.has_key(name) for name in ('device','id','trigger')):
self.parseResult(p["device"],p["id"],p["trigger"])
else:
indigo.server.log(u"Received Locative data, but one or more parameters are missing",isError=True)
else:
indigo.server.log(u"Recognised Locative, but received data was wrong content-type: %s" % ctype,isError=True)
else:
indigo.server.log(u"Received Locative data, but Locative is disabled in plugin config")
# Geofency
elif ('Geofency' in uagent):
self.plugin.debugLog(u"Recognised Geofency")
if (self.plugin.geofency):
if (ctype == 'application/json'):
p = json.loads(data)
if all(p.has_key(name) for name in ('name','entry','device')):
self.parseResult(p["device"],p["name"],p["entry"])
else:
indigo.server.log(u"Received Geofency data, but one or more parameters are missing",isError=True)
else:
indigo.server.log(u"Recognised Geofency, but received data was wrong content-type: %s" % ctype,isError=True)
else:
indigo.server.log(u"Received Geofency data, but Geofency is disabled in plugin config")
#Beecon
elif ('Beecon' in uagent):
self.plugin.debugLog(u"Recognised Beecon")
if (self.plugin.beecon):
pdata = parse_qs(data)
p = {}
for key, value in pdata.iteritems():
p.update({key:value[0]})
if all(p.has_key(name) for name in ('region','action')):
self.parseResult("Beecon",p["region"],p["action"])
else:
indigo.server.log(u"Received Beecon data, but one or more parameters are missing",isError=True)
else:
indigo.server.log(u"Received Beecon data, but Beecon is disabled in plugin config")
# Geohopper
elif ctype == 'application/json':
self.plugin.debugLog(u"Received JSON data (possible Geohopper)")
if (self.plugin.geohopper):
p = json.loads(data)
if all(p.has_key(name) for name in ('sender','location','event')):
self.parseResult(p["sender"],p["location"],p["event"])
else:
indigo.server.log(u"Received Geohopper data, but one or more parameters are missing",isError=True)
else:
indigo.server.log(u"Received Geohopper data, but Geohopper is disabled in plugin config")
else:
indigo.server.log(u"Didn't recognise received data. (User-agent: %s, Content-type: %s)" % (uagent, ctype),isError=True)
except Exception as e:
indigo.server.log(u"Exception: %s" % str(e), isError=True)
pass
class Plugin(indigo.PluginBase):
def __init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs):
indigo.PluginBase.__init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs)
self.deviceList = {}
self.events = dict()
self.events["stateChange"] = dict()
self.events["statePresent"] = dict()
self.events["stateAbsent"] = dict()
def __del__(self):
indigo.PluginBase.__del__(self)
def startup(self):
self.loadPluginPrefs()
self.debugLog(u"Startup called")
self.myThread = threading.Thread(target=self.listenHTTP, args=())
self.myThread.daemon = True
self.myThread.start()
def deviceCreated(self, device):
self.debugLog(device.name + ": Created device of type \"%s\"" % device.deviceTypeId)
self.deviceList[device.id] = {'ref':device,'name':device.name,'address':device.address.lower()}
def deviceStartComm(self, device):
self.debugLog(device.name + ": Starting device")
if (device.deviceTypeId == u'userLocation'):
indigo.server.log("Device "+device.name+" needs to be deleted and recreated.",isError=True)
else:
self.deviceList[device.id] = {'ref':device,'name':device.name,'address':device.address.lower()}
def deviceStopComm(self, device):
self.debugLog(device.name + ": Stopping device")
if (device.deviceTypeId == u'beacon'):
del self.deviceList[device.id]
def shutdown(self):
self.debugLog(u"Shutdown called")
def triggerStartProcessing(self, trigger):
self.debugLog(u"Start processing trigger " + unicode(trigger.name))
self.events[trigger.pluginTypeId][trigger.id] = trigger
def triggerStopProcessing(self, trigger):
self.debugLog(u"Stop processing trigger " + unicode(trigger.name))
if trigger.pluginTypeId in self.events:
if trigger.id in self.events[trigger.pluginTypeId]:
del self.events[trigger.pluginTypeId][trigger.id]
def actionControlSensor(self, action, device):
self.debugLog(u"Manual sensor state change request: " + device.name)
if device.pluginProps['AllowOnStateChange']:
if action.sensorAction == indigo.kSensorAction.TurnOn:
device.updateStateOnServer("onOffState", True)
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensorTripped)
elif action.sensorAction == indigo.kSensorAction.TurnOff:
device.updateStateOnServer("onOffState", False)
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensor)
elif action.sensorAction == indigo.kSensorAction.Toggle:
device.updateStateOnServer("onOffState", not device.onState)
if (device.onState):
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensorTripped)
else:
device.updateStateImageOnServer(indigo.kStateImageSel.MotionSensor)
else:
self.debugLog(u"ignored request (sensor is read-only)")
def validatePrefsConfigUi(self, valuesDict):
self.debugLog(u"validating Prefs called")
port = int(valuesDict[u'listenPort'])
if (port <= 0 or port>65535):
errorMsgDict = indigo.Dict()
errorMsgDict[u'port'] = u"Port number needs to be a valid TCP port (1-65535)."
return (False, valuesDict, errorMsgDict)
if (valuesDict[u'custom']):
if (valuesDict[u'customSender'] == ""):
errorMsgDict = indigo.Dict()
errorMsgDict[u'customSender'] = u"Sender field can't be empty"
return (False, valuesDict, errorMsgDict)
if (valuesDict[u'customLocation'] == ""):
errorMsgDict = indigo.Dict()
errorMsgDict[u'customLocation'] = u"Location field can't be empty"
return (False, valuesDict, errorMsgDict)
if (valuesDict[u'customAction'] == ""):
errorMsgDict = indigo.Dict()
errorMsgDict[u'customAction'] = u"Action field can't be empty"
return (False, valuesDict, errorMsgDict)
if (valuesDict[u'customEnter'] == ""):
errorMsgDict = indigo.Dict()
errorMsgDict[u'customEnter'] = u"Enter field can't be empty"
return (False, valuesDict, errorMsgDict)
if (valuesDict[u'customExit'] == ""):
errorMsgDict = indigo.Dict()
errorMsgDict[u'customExit'] = u"Exit field can't be empty"
return (False, valuesDict, errorMsgDict)
if (valuesDict[u'customEnter'] == valuesDict[u'customExit']):
errorMsgDict = indigo.Dict()
errorMsgDict[u'customExit'] = u"Enter and Exit fields can't have same value"
return (False, valuesDict, errorMsgDict)
if (valuesDict[u'customSender'] == valuesDict[u'customLocation']):
errorMsgDict = indigo.Dict()
errorMsgDict[u'customLocation'] = u"Sender and Location fields can't have same value"
return (False, valuesDict, errorMsgDict)
return (True, valuesDict)
def closedPrefsConfigUi ( self, valuesDict, UserCancelled):
if UserCancelled is False:
indigo.server.log ("Preferences were updated.")
if not (self.listenPort == int(self.pluginPrefs['listenPort'])):
indigo.server.log("New listen port configured, reload plugin for change to take effect",isError=True)
self.loadPluginPrefs()
def loadPluginPrefs(self):
self.debugLog(u"loadpluginPrefs called")
self.debug = self.pluginPrefs.get('debugEnabled',False)
self.createDevice = self.pluginPrefs.get('createDevice',True)
self.listenPort = int(self.pluginPrefs.get('listenPort',6192))
self.beecon = self.pluginPrefs.get('beecon',True)
self.geofancy = self.pluginPrefs.get('geofancy',True)
self.geohopper = self.pluginPrefs.get('geohopper',True)
self.geofency = self.pluginPrefs.get('geofency',True)
self.createVar = self.pluginPrefs.get('createVar',False)
self.custom = self.pluginPrefs.get('custom',False)
self.customSender = self.pluginPrefs.get('customSender','sender')
self.customLocation = self.pluginPrefs.get('customLocation','location')
self.customAction = self.pluginPrefs.get('customAction','action')
self.customEnter = self.pluginPrefs.get('customEnter','enter')
self.customExit = self.pluginPrefs.get('customExit','exit')
self.testTrigger = self.pluginPrefs.get('testTrigger',False)
self.testTriggeraction = self.pluginPrefs.get('testTriggeraction','toggle')
def listenHTTP(self):
self.debugLog(u"Starting HTTP listener thread")
indigo.server.log(u"Listening on TCP port " + str(self.listenPort))
self.server = ThreadedHTTPServer(('', self.listenPort), lambda *args: httpHandler(self, *args))
self.server.serve_forever()
def runConcurrentThread(self):
while True:
self.sleep(1)
|
pyTest.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# ---------- ---------- ---------- ---------- ---------- ---------- ---------- #
# @file pyTest #
# @author Hanno Sternberg <hanno@almostintelligent.de> #
# #
# This file contains the class defining a test case. #
# #
# @license MIT #
# #
# This software is licensed under the MIT License #
# #
# Copyright (c) 2012-2015 Hanno Sternberg #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to #
# deal in the Software without restriction, including without limitation the #
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or #
# sell copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS #
# IN THE SOFTWARE. #
# ---------- ---------- ---------- ---------- ---------- ---------- ---------- #
import os
import re
import sys
import signal
import difflib
import subprocess
from threading import Thread
from pyTestUtils import isLambda, TermColor, logger
class TestState:
# __slots__ = ["Success", "Fail", "Error", "Waiting", "Disabled", "InfoOnly", "Timeout"]
"""The test is waiting for execution"""
Success = 0
"""The test was successful"""
Fail = 1
"""The test has failed"""
Error = 2
"""The test has produced an error"""
Assertion = 3
"""The test has produced a assertion"""
SegFault = 4
"""The test has produced a segmentation fault"""
InfoOnly = 5
"""Display only the test information"""
Timeout = 6
"""The test has timed out"""
Waiting = 7
"""The test awaits execution"""
Disabled = 8
"""Disables the test"""
Clean = 10
"""BadWord come clean"""
BadWord = 11
"""A BadWord was detected"""
@staticmethod
def toString(state):
"""
Converts the enumeration value into a string
@type state: int
@param state: Enumeration value
"""
if state == TestState.Waiting:
return TermColor.colorText(" WAITING ", TermColor.White)
if state == TestState.Success:
return TermColor.colorText(" SUCCESS ", fg=TermColor.Black, bg=TermColor.Green)
if state == TestState.Fail:
return TermColor.colorText(" FAIL ", fg=TermColor.Black, bg=TermColor.Red)
if state == TestState.Error:
return TermColor.colorText(" ERROR ", fg=TermColor.Black, bg=TermColor.Red, style=TermColor.Bold)
if state == TestState.SegFault:
return TermColor.colorText(" SEGFAULT ", fg=TermColor.Black, bg=TermColor.Yellow)
if state == TestState.Assertion:
return TermColor.colorText(" ASSERTION ", fg=TermColor.Black, bg=TermColor.Yellow, style=TermColor.Bold)
if state == TestState.InfoOnly:
return TermColor.colorText(" INFO ", TermColor.White)
if state == TestState.Timeout:
return TermColor.colorText(" TIMEOUT ", TermColor.Purple)
if state == TestState.Disabled:
return TermColor.colorText(" DISABLED ", TermColor.Blue)
if state == TestState.Clean:
return TermColor.colorText(" CLEAN ", fg=TermColor.White, bg=TermColor.Green, style=TermColor.Bold)
if state == TestState.BadWord:
return TermColor.colorText(" BADWORD ", fg=TermColor.Yellow, bg=TermColor.Red, style=TermColor.Bold)
return TermColor.colorText(" UNKNOWN ", TermColor.Yellow)
class Command():
# __slots__ = ['_cmd', '_process', '_thread', 'out', 'err', 'ret', 'killed']
"""Command execution"""
def __init__(self, cmd):
"""
Initialises the command
@type cmd: str
@param cmd: Command
"""
self.cmd = cmd
self.proc = None
self.thread = None
self.out = ""
self.err = ""
self.ret = 0
self.killed = False
def commandFunc(self):
"""command to be run in the thread"""
self.proc = subprocess.Popen(self.cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True, shell=True, cwd=os.getcwd())
self.out, self.err = self.proc.communicate()
self.ret = self.proc.wait()
def execute(self, timeout):
"""
executes the command
@type timeout: float
@param timeout: Timeout in seconds
"""
self.thread = Thread(target=self.commandFunc)
self.thread.start()
self.thread.join(timeout)
if self.proc is not None and self.proc.poll() is None:
if sys.platform == "win32":
subprocess.Popen(['taskkill', '/F', '/T', '/PID', str(self.proc.pid)]).communicate()
else:
childProc = int(subprocess.check_output("pgrep -P {}".format(self.proc.pid), shell=True, universal_newlines=True).strip())
os.kill(childProc, signal.SIGKILL)
if self.proc.poll() is None:
os.kill(self.proc.pid, signal.SIGTERM)
# os.killpg(os.getpgid(self.proc.pid), signal.SIGTERM)
return TestState.Timeout
return TestState.Success
class Expectation(object):
def __call__(self):
return True
class ExpectFile(Expectation):
def __init__(self, fname):
self.exp = open(fname, "rb").read()
def __call__(self, out):
return self.exp == out
def __str__(self):
return self.exp
class Stringifier(object):
def __init__(self, expectation):
self.exp = expectation.encode("utf8", errors="ignore")
def __call__(self, output):
out = output.encode("utf8", errors="ignore")
return self.exp.strip().splitlines(), out.strip().splitlines()
def __str__(self):
return self.exp
class StringifiedFile(Stringifier):
def __init__(self, fname):
Stringifier.__init__(self, open(fname).read())
class CompareFiles(Expectation):
def __init__(self, expect_file, out_file):
self.expect = expect_file
self.out = out_file
def __call__(self, whatever):
# Since we want to compare files, actual output is ignored
expect = open(self.expect, "rb").read()
out = open(self.out, "rb").read()
return expect == out
class Test(object):
"""A single test"""
def __init__(self,
DUT=None,
name="",
description="",
command=None,
stdout=None,
stderr=None,
returnCode=None,
timeout=5.0,
outputOnFail=False,
pipe=False,
diff=None,
state=TestState.Waiting,
binary=False):
"""
Initalises a test
@type DUT: str
@param DUT: The path to the Device Under Test
@type name: str
@param name: The name of the test case
@type description: str
@param description: The description of the test case
@type command: str
@param command: The command to be executed by the test case
@type stdout: str
@param stdout: The expected output on stdout
@type stderr: str
@param stderr: The expected output on stderr
@type returnCode: int
@param returnCode: The expected return code
@type timeout: float
@param timeout: The time out be before the DUT gets killed
@type pipe: Boolean
@param pipe: Flag, set if the output streams should be piped
@type outputOnFail: Boolean
@param outputOnFail: Flag, set if the output streams should be piped on failed test
"""
self.name = name
"""The name of the test"""
self.descr = description
"""The description of the test"""
self.cmd = command
"""The description of the game"""
self.expectStdout = stdout
"""The description of the game"""
self.expectStderr = stderr
"""The expected output on stderr"""
self.expectRetCode = returnCode
self.timeout = timeout
"""The expected return code"""
self.DUT = DUT
"""The Device under Test - could be None"""
self.output = ""
"""The stdout"""
self.error = ""
"""The stderr"""
self.retCode = 0
"""The return code"""
self.state = TestState.Waiting
"""The state of the game"""
self.pipe = pipe
"""Flag, set if the output streams should be piped"""
self.outputOnFail = outputOnFail
"""Flag, set if the output streams should be piped on failed test"""
self.diff = diff
"""Flag, show comparison between input and string-expectation"""
self.timeout = timeout
"""Timeout after the DUT gets killed"""
self.linesep = os.linesep
"""Force a specific line ending"""
self.ignoreEmptyLines = False
"""Ignore empty lines Flag"""
self.pipeLimit = 2000
"""Work in binary mode"""
self.binary = binary
def lineComparison(self, expLines, outLines, stream=""):
same = True
if self.ignoreEmptyLines:
while expLines.count("") > 0:
expLines.remove("")
while outLines.count("") > 0:
outLines.remove("")
for line in difflib.unified_diff(expLines, outLines, stream, "expectation"):
col = TermColor.White
uline = unicode(line, encoding="utf-8", errors="ignore") if type(line) is not unicode else line
if uline.startswith(" + "):
same = False
col = TermColor.Green
elif uline.startswith("-"):
same = False
col = TermColor.Red
elif uline.startswith("@"):
same = False
col = TermColor.Cyan
if self.diff:
logger.log(TermColor.colorText(uline.rstrip(), col))
return same
def check(self, exp, out, stream="returnCode"):
"""
Test an expectation against an output
If it's a lambda function, it will be executed with the output
If it's a string, it will be treated as a regular expression.
@type exp: String, lambda
@param exp: Expected result
@type out: String
@param out: output The output
@rtype: Boolean
@return: Result of the comparison
"""
if exp is None:
return True
elif isLambda(exp) or isinstance(exp, Expectation):
return exp(out)
elif isinstance(exp, Stringifier):
return self.lineComparison(*(exp(out)), stream=stream)
elif isinstance(exp, int) and isinstance(out, int):
return exp == out
elif isinstance(exp, list):
return self.checkList(exp, out)
elif isinstance(exp, set):
return self.checkSet(exp, out)
elif isinstance(exp, str) or isinstance(exp, unicode):
if exp.startswith("lambda"):
f = eval(exp)
return f(out)
if exp.startswith("regex:"):
patCode = re.compile(exp[6:].replace("$n", self.linesep), re.IGNORECASE)
return (patCode.match(str(out)) != None)
else:
expLines = exp.replace("$n", self.linesep).splitlines()
outLines = str(out).rstrip().splitlines()
return self.lineComparison(expLines, outLines, stream)
return False
def checkList(self, lst, out):
"""
Tests a list of expectations against an output
all elements in the list must match to be successful
@type lst: List
@param lst: List with expectation
@type out: String, Int
@param out: output The output
@rtype: Boolean
@return: Result of the comparison
"""
for exp in lst:
if not self.check(exp, out):
return False
return True
def checkSet(self, st, out):
"""
Tests a set of expectations against an output
one element in the set must match to be successful
@type lst: List
@param lst: List with expectation
@type out: String, Int
@param out: output The output
@rtype: Boolean
@return: Result of the comparison
"""
for exp in st:
if self.check(exp, out):
return True
return False
def pipeOutputStream(self, stream, lines, color):
bytes = 0
for line in lines:
bytes += len(line)
stream.write(TermColor.colorText(line + " ", fg=color) + "\n")
if bytes > self.pipeLimit:
stream.write(TermColor.colorText("Stopped after {} Bytes".format(bytes), fg=TermColor.Yellow) + "\n")
break
def runCmd(self, command):
if "$DUT" in command:
if self.DUT is None:
self.state = TestState.Error
return
else:
_cmd = Command(cmd=str(command).replace("$DUT", self.DUT))
else:
_cmd = Command(cmd=str(command))
cmdRet = _cmd.execute(self.timeout)
if cmdRet == TestState.Success:
if self.binary:
self.output = _cmd.out
self.error = _cmd.err
else:
self.output = _cmd.out.decode(encoding="utf8", errors="ignore")
self.error = _cmd.err.decode(encoding="utf8", errors="ignore")
self.retCode = _cmd.ret
if (self.check(self.expectRetCode, self.retCode) and
self.check(self.expectStdout, self.output, "stdout") and
self.check(self.expectStderr, self.error, "stderr")):
self.state = TestState.Success
else:
if 'Assertion' in self.error or 'assertion' in self.error:
self.state = TestState.Assertion
elif "stackdump" in self.error or "coredump" in self.error or "Segmentation Fault" in self.error or self.retCode < 0:
self.state = TestState.SegFault
else:
self.state = TestState.Fail
if (self.pipe) or (self.outputOnFail and self.state is TestState.Fail):
sys.stdout.write(TermColor.colorText("{}".format(self.retCode), fg=TermColor.Yellow) + " ")
self.pipeOutputStream(sys.stdout, self.output.splitlines(), TermColor.Green)
self.pipeOutputStream(sys.stderr, self.error.splitlines(), TermColor.Red)
else:
self.state = cmdRet
def run(self):
"""Runs the test"""
if self.state == TestState.Disabled:
return TestState.Disabled
if self.state == TestState.InfoOnly:
if self.descr is None:
print "{}".format(self.name)
else:
print "{} - {}".format(self.name, self.descr)
return TestState.InfoOnly
if self.name == "Badword":
# Bad Word Detection Mode
# Description holds a matching file patterns
# Recursive look through the directory of DUT
# Treat command as a list of Badwords
words = map(lambda s: re.compile(s), self.cmd)
searchpath = os.path.abspath(os.path.dirname(self.DUT))
searchpattern = re.compile(self.descr)
hits = []
for dirpath, dirnames, filenames in os.walk(searchpath):
for file in filenames:
if searchpattern.match(file) is not None:
fname = os.path.join(dirpath, file)
fHnd = open(fname, "rb")
for nr, line in enumerate(fHnd.readlines()):
for word in words:
if word.search(line) is not None:
hits.append((os.path.relpath(fname), nr, line.rstrip(), word.pattern))
fHnd.close()
if len(hits) > 0:
for file, lineno, text, pattern in hits:
logger.log("{} {}[{}]: '{}' matches '{}'".format(TestState.toString(TestState.BadWord), file, lineno, text, pattern))
self.state = TestState.BadWord
else:
self.state = TestState.Clean
return self.state
if self.cmd is not None:
if isinstance(self.cmd, list):
for cmd_ in self.cmd:
self.runCmd(cmd_)
else:
self.runCmd(self.cmd)
else:
self.state = TestState.Error
return self.state
def __str__(self):
return self.toString(prefix="")
def toString(self, prefix="\t"):
"""
Creates a textual representation of the test.
The output can be saved to a file.
@rtype: String
"""
fields = []
fields.append("{}\tname = '{:s}'".format(prefix, self.name))
if self.descr is not None and self.descr != "":
fields.append("{}\tdescription = '{:s}'".format(prefix, self.descr))
fields.append("{}\tcommand = '{:s}'".format(prefix, self.cmd))
if self.expectStdout is not None:
fields.append("{}\tstdout = \"\"\"{}\"\"\"".format(prefix, self.expectStdout))
if self.expectStderr is not None:
fields.append("{}\tstderr = \"\"\"{}\"\"\"".format(prefix, self.expectStderr))
if self.expectRetCode is not None:
fields.append("{}\treturnCode = \"{}\"".format(prefix, self.expectRetCode))
if self.timeout is not None:
fields.append("{}\ttimeout = {:.1f}".format(prefix, self.timeout))
return "Test (\n{}\n{})".format(",\n".join(fields), prefix)
|
hypothesis_test.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import copy
import time
from functools import partial, reduce
from future.utils import viewitems, viewkeys
from hypothesis import assume, given, settings, HealthCheck
import hypothesis.strategies as st
import unittest
import os
from caffe2.python import core, workspace, tt_core, dyndep
import caffe2.python.hypothesis_test_util as hu
from caffe2.proto import caffe2_pb2
dyndep.InitOpsLibrary('@/caffe2/caffe2/fb/optimizers:sgd_simd_ops')
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
@st.composite
def _tensor_and_prefix(draw, dtype, elements, min_dim=1, max_dim=4, **kwargs):
dims_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
extra_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
assume(len(dims_) + len(extra_) < max_dim)
return (draw(hu.arrays(dims_ + extra_, dtype, elements)),
draw(hu.arrays(extra_, dtype, elements)))
def _tensor_and_indices(min_dim=1, max_dim=4, dtype=np.float32,
elements=None, **kwargs):
""" generates a tensor and a list of indices of larger tensor of same dim"""
data_dims_ = st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim)
original_dim = st.integers(min_value=2, max_value=10)
return st.tuples(data_dims_, original_dim).flatmap(lambda pair: st.tuples(
st.just(pair[1]), # original dimension
hu.arrays(pair[0], dtype, elements), # data tensor
hu.arrays(pair[0][0], dtype=np.int64, elements=st.integers(
min_value=0, max_value=pair[1] - 1)),
))
_NUMPY_TYPE_TO_ENUM = {
np.float32: core.DataType.FLOAT,
np.int32: core.DataType.INT32,
np.bool: core.DataType.BOOL,
np.uint8: core.DataType.UINT8,
np.int8: core.DataType.INT8,
np.uint16: core.DataType.UINT16,
np.int16: core.DataType.INT16,
np.int64: core.DataType.INT64,
np.float64: core.DataType.DOUBLE,
}
def _dtypes(dtypes=None):
dtypes = dtypes if dtypes else [np.int32, np.int64, np.float32]
return st.sampled_from(dtypes)
def _test_binary(name, ref, filter_=None, gcs=hu.gcs,
test_gradient=False, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(
lambda dtype: hu.tensors(
n=2, dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
out=st.sampled_from(('Y', 'X1', 'X2') if allow_inplace else ('Y',)),
**gcs)
@settings(max_examples=3, timeout=100)
def test_binary(self, inputs, out, gc, dc):
op = core.CreateOperator(name, ["X1", "X2"], [out])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
# We only do gradient check with float32 types.
if test_gradient and X1.dtype == np.float32:
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
return test_binary
def _test_binary_broadcast(name, ref, filter_=None,
gcs=hu.gcs, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(lambda dtype: _tensor_and_prefix(
dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
in_place=(st.booleans() if allow_inplace else st.just(False)),
**gcs)
@settings(max_examples=3, timeout=100)
def test_binary_broadcast(self, inputs, in_place, gc, dc):
op = core.CreateOperator(
name, ["X1", "X2"], ["X1" if in_place else "Y"], broadcast=1)
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
def cast_ref(x, y):
return (np.array(ref(x, y)[0], dtype=x.dtype), )
# gradient not implemented yet
# self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], cast_ref)
return test_binary_broadcast
class TestOperators(hu.HypothesisTestCase):
def test_comparison_ops(self):
ops = {"LT": lambda x1, x2: [x1 < x2],
"LE": lambda x1, x2: [x1 <= x2],
"GT": lambda x1, x2: [x1 > x2],
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in viewitems(ops):
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
def test_sum(self, inputs, in_place, gc, dc):
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
def test_row_mul(self, inputs, gc, dc):
op = core.CreateOperator("RowMul", ["X1", "X2"], ["Y"])
X1, Xtmp = inputs
X2 = Xtmp[:, 0]
def ref(x, y):
ret = np.zeros(shape=x.shape, dtype=x.dtype)
for i in range(y.size):
ret[i, ] = x[i, ] * y[i]
return [ret]
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
@given(inputs=hu.tensors(n=2), **hu.gcs_cpu_only)
def test_max(self, inputs, gc, dc):
op = core.CreateOperator("Max", ["X1", "X2"], ["Y"])
X1, X2 = inputs
# Make X1 and X2 far from each other, since X1=X2 is not differentiable
# and the step size of gradient checker is 0.05
X1[np.logical_and(X1 >= X2 - 0.05, X1 <= X2)] -= 0.05
X1[np.logical_and(X1 <= X2 + 0.05, X1 >= X2)] += 0.05
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
def elementwise_max(X, Y):
return [np.maximum(X, Y)]
self.assertReferenceChecks(gc, op, [X1, X2], elementwise_max)
def test_add(self):
def not_overflow(x):
if not isinstance(x, float):
return abs(x) < (1 << 30) - 1
return True
def ref(x, y):
return (x + y, )
_test_binary("Add", ref, filter_=not_overflow, test_gradient=True)(self)
_test_binary_broadcast("Add", ref, filter_=not_overflow)(self)
def test_sub(self):
def ref(x, y):
return (x - y, )
# TODO(jiayq): enable gradient test when implemented.
_test_binary("Sub", ref, test_gradient=True)(self)
_test_binary_broadcast("Sub", ref)(self)
def test_mul(self):
def not_overflow(x):
if not isinstance(x, float):
return abs(x) < (1 << 15) - 1
return True
def ref(x, y):
return (x * y, )
_test_binary("Mul", ref, filter_=not_overflow, test_gradient=True)(self)
_test_binary_broadcast("Mul", ref, filter_=not_overflow)(self)
def test_div(self):
def ref(x, y):
return (x / y, )
def non_zero(x):
return abs(x) > 1e-2
def div_dtypes():
return st.sampled_from([np.float32, np.float64])
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=True,
dtypes=div_dtypes, gcs=hu.gcs_cpu_only
)(self)
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=False,
dtypes=div_dtypes
)(self)
_test_binary_broadcast(
"Div", ref, filter_=non_zero, dtypes=div_dtypes)(self)
@given(X=hu.tensor(), in_place=st.booleans(), **hu.gcs)
def test_negative(self, X, in_place, gc, dc):
op = core.CreateOperator("Negative", ["X"],
["Y" if not in_place else "X"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
def test_tanh(self, X, gc, dc):
op = core.CreateOperator("Tanh", "X", "Y")
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
def test_averaged_loss(self, X, gc, dc):
op = core.CreateOperator("AveragedLoss", ["X"], ["loss"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
def test_softsign(self, X, inplace, gc, dc):
op = core.CreateOperator("Softsign", ["X"], ["X" if inplace else "Y"])
def softsign(X):
return (X / (1 + np.abs(X)),)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertReferenceChecks(gc, op, [X], softsign)
if inplace:
with self.assertRaises(Exception):
self.assertGradientChecks(gc, op, [X], 0, [0])
else:
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(
device_options=st.lists(
min_size=2,
max_size=4,
elements=st.sampled_from(hu.expanded_device_options)),
set_seed=st.booleans())
def test_random_seed_behaviour(self, device_options, set_seed):
# Assume we are always operating on CUDA or CPU, since RNG is
# inconsistent between CPU and GPU.
device_options = copy.deepcopy(device_options)
assume(len({do.device_type for do in device_options}) == 1)
if set_seed:
for do in device_options:
do.random_seed = 1000
def run(do):
# Reset each time because 'Y' may already exist in the workspace
# on a different device
workspace.ResetWorkspace()
ws = workspace.C.Workspace()
op = core.CreateOperator(
"XavierFill", [], ["Y"],
device_option=do,
shape=[2])
ws.run(op)
return ws.blobs["Y"].fetch()
ys = [run(do) for do in device_options]
for y in ys[1:]:
if set_seed:
np.testing.assert_array_equal(ys[0], y)
else:
with self.assertRaises(AssertionError):
np.testing.assert_array_equal(ys[0], y)
@given(axis=st.integers(min_value=1, max_value=4),
num_output=st.integers(min_value=4, max_value=8),
engine=st.sampled_from(["", "PACKED"]),
**hu.gcs)
def test_fully_connected_axis(self, axis, num_output, engine, gc, dc):
np.random.seed(1)
X = np.random.randn(1, 2, 3, 2, 1).astype(np.float32)
def prod(xs):
p = 1
for x in xs:
p *= x
return p
K = prod(list(X.shape)[axis:])
N = num_output
W = np.random.randn(N, K).astype(np.float32)
b = np.random.randn(N).astype(np.float32)
op = core.CreateOperator(
"FC",
["X", "W", "b"],
["Y"],
engine=engine,
axis=axis)
for name, param in [("X", X), ("W", W), ("b", b)]:
self.ws.create_blob(name).feed(param)
self.ws.run(op)
Y = self.ws.blobs["Y"].fetch()
self.assertEqual(list(Y.shape), list(X.shape)[:axis] + [N])
inputs = [X, W, b]
self.assertDeviceChecks(dc, op, inputs, [0])
for param, _ in enumerate(inputs):
self.assertGradientChecks(gc, op, inputs, param, [0])
@unittest.skipIf(not workspace.has_gpu_support,
"Skipping test due to no gpu present.")
@given(hidden_size=st.integers(min_value=1, max_value=3),
num_layers=st.integers(min_value=1, max_value=3),
bidirectional=st.booleans(),
rnn_mode=st.sampled_from(["lstm"]), # TODO: "gru"
input_mode=st.sampled_from(["linear"]),
dropout=st.floats(min_value=1.0, max_value=1.0),
T=st.integers(min_value=2, max_value=6),
N=st.integers(min_value=1, max_value=4),
D=st.integers(min_value=1, max_value=4))
def test_recurrent(self, hidden_size, num_layers, bidirectional, rnn_mode,
input_mode, dropout, T, N, D):
#there's a bug in miopen for N=1 which would be resolved in the next release.
if workspace.has_hip_support:
assume(N>1)
# Random seed, this one happens to pass
seed = 1234
np.random.seed(seed)
# set device option
if workspace.has_hip_support:
device_option = hu.hip_do
engine = 'MIOPEN'
else:
device_option = hu.gpu_do
engine = 'CUDNN'
input_weight_size = hidden_size * D
upper_layer_input_weight_size = hidden_size * hidden_size
if bidirectional:
upper_layer_input_weight_size *= 2
recurrent_weight_size = hidden_size * hidden_size
input_bias_size = hidden_size
recurrent_bias_size = hidden_size
num_directions = 2 if bidirectional else 1
first_layer_sz = input_weight_size + recurrent_weight_size + \
input_bias_size + recurrent_bias_size
upper_layer_sz = upper_layer_input_weight_size + \
recurrent_weight_size + input_bias_size + \
recurrent_bias_size
total_sz = 4 * (first_layer_sz + (num_layers - 1) * upper_layer_sz)
total_sz *= num_directions
W = np.random.rand(total_sz).astype(np.float32)
self.ws.create_blob("WEIGHT").feed(W, device_option=device_option)
op = core.CreateOperator(
"Recurrent",
["INPUT", "HIDDEN_INPUT", "CELL_INPUT", "WEIGHT"],
["OUTPUT", "HIDDEN_OUTPUT", "CELL_OUTPUT",
"RNN_SCRATCH", "DROPOUT_STATES"],
hidden_size=hidden_size,
bidirectional=bidirectional,
rnn_mode=rnn_mode,
dropout=dropout,
input_mode=input_mode,
num_layers=num_layers,
seed=seed,
engine=engine)
X = np.random.randn(T, N, D).astype(np.float32)
self.ws.create_blob("INPUT").feed(X, device_option=device_option)
W = self.ws.blobs["WEIGHT"].fetch()
H = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32)
C = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32) if rnn_mode == "lstm" else \
np.empty((1,)).astype(np.float32) # unused in GRU
inputs = [X, H, C, W]
input_idxs = [i for (i, _) in enumerate(inputs)] \
if rnn_mode == "lstm" else [0, 1, 3] # ignore C
for input_idx in input_idxs:
self.assertGradientChecks(
device_option, op, inputs, input_idx, [0],
stepsize=0.01, threshold=0.01)
@given(ndim=st.integers(1, 4),
axis=st.integers(0, 3),
add_axis=st.integers(0, 1),
num_inputs=st.integers(2, 4), **hu.gcs)
def test_depth_concat(self, ndim, axis, add_axis, num_inputs, gc, dc):
assume(axis < ndim)
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7][:ndim]
individual_dims = [1, 2, 3, 4, 5][:num_inputs]
inputs = []
for i in range(num_inputs):
if add_axis == 0:
# Sets a unique dim and create the input.
shape[axis] = individual_dims[i]
inputs.append(np.random.randn(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
axis=axis, add_axis=add_axis)
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
# Reference
def depth_concat(*inputs):
inputs = list(inputs)
if add_axis:
for i in range(len(inputs)):
inputs[i] = np.expand_dims(inputs[i], axis)
input_dims = np.array([np.shape(x)[axis] for x in inputs])
return [np.concatenate(inputs, axis=axis), input_dims]
self.assertReferenceChecks(gc, op, inputs, depth_concat)
@given(num_inputs=st.integers(2, 4),
order=st.sampled_from([("NCHW", 1), ("NHWC", 3)]),
**hu.gcs)
def test_depth_concat_with_order(self, num_inputs, order, gc, dc):
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7]
individual_dims = [1, 2, 3, 4][:num_inputs]
inputs = []
for i in range(num_inputs):
# Sets a unique dim and create the input.
shape[order[1]] = individual_dims[i]
inputs.append(np.random.rand(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
order=order[0])
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
# Reference
def depth_concat_with_order(*inputs):
inputs = list(inputs)
axis = order[1]
input_dims = np.array([np.shape(x)[axis] for x in inputs])
return [np.concatenate(inputs, axis=axis), input_dims]
self.assertReferenceChecks(gc, op, inputs, depth_concat_with_order)
@given(X=hu.arrays(dims=[5, 2],
elements=st.floats(min_value=1.0, max_value=10.0)),
**hu.gcs_cpu_only)
def test_last_n_windows(self, X, gc, dc):
workspace.FeedBlob('input', X)
workspace.FeedBlob('next', np.array(0, dtype=np.int32))
workspace.CreateBlob('output')
collect_net = core.Net('collect_net')
collect_net.LastNWindowCollector(
['output', 'next', 'input'],
['output', 'next'],
num_to_collect=7,
)
plan = core.Plan('collect_data')
plan.AddStep(core.execution_step('collect_data',
[collect_net], num_iter=2))
workspace.RunPlan(plan)
output = workspace.FetchBlob('output')
inputs = workspace.FetchBlob('input')
new_output = np.zeros([7, inputs.shape[1]])
for i in range(inputs.shape[0] * 2):
new_output[i % 7] = inputs[i % inputs.shape[0]]
import numpy.testing as npt
npt.assert_almost_equal(output, new_output, decimal=5)
@given(dtype=st.sampled_from([np.float32, np.float64, np.int32, np.bool]))
def test_print(self, dtype):
data = np.random.permutation(6).astype(dtype)
self.ws.create_blob("data").feed(data)
op = core.CreateOperator("Print", "data", [])
self.ws.run(op)
@given(inputs=hu.tensors(n=2),
in_place=st.booleans(),
momentum=st.floats(min_value=0.1, max_value=0.9),
nesterov=st.booleans(),
lr=st.floats(min_value=0.1, max_value=0.9),
**hu.gcs)
def test_momentum_sgd(
self, inputs, in_place, momentum, nesterov, lr, gc, dc):
grad, m = inputs
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"MomentumSGD",
["grad", "m", "lr"],
["grad" if in_place else "grad_o",
"m" if in_place else "m_o"],
momentum=momentum,
nesterov=int(nesterov),
device_option=gc)
self.assertDeviceChecks(
dc, op, [grad, m, lr], [0])
# Reference
def momentum_sgd(grad, m, lr):
lr = lr[0]
if not nesterov:
adjusted_gradient = lr * grad + momentum * m
return (adjusted_gradient, adjusted_gradient)
else:
m_new = momentum * m + lr * grad
return ((1 + momentum) * m_new - momentum * m, m_new)
self.assertReferenceChecks(gc, op, [grad, m, lr], momentum_sgd)
@given(inputs=hu.tensors(n=3),
in_place=st.booleans(),
decay=st.floats(min_value=0.1, max_value=0.9),
momentum=st.floats(min_value=0.1, max_value=0.9),
lr=st.floats(min_value=0.1, max_value=0.9),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
**hu.gcs)
def test_rmsprop_sgd(self, inputs, in_place, decay, momentum, lr, epsilon,
gc, dc):
grad, ms, mom = inputs
ms = np.abs(ms) + 0.01
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"RmsProp",
["grad", "ms", "mom", "lr"],
["grad" if in_place else "grad_o",
"ms" if in_place else "ms_o",
"mom" if in_place else "mom_o"],
momentum=momentum, decay=decay, epsilon=epsilon, device_option=gc)
self.assertDeviceChecks(dc, op, [grad, ms, mom, lr], [0])
def rmsprop(grad, ms, mom, lr):
lr = lr[0]
ms_o = ms + (1. - decay) * (np.square(grad) - ms)
mom_o = momentum * mom + lr * grad / np.sqrt(epsilon + ms_o)
grad_o = mom_o
return (grad_o, ms_o, mom_o)
self.assertReferenceChecks(gc, op, [grad, ms, mom, lr], rmsprop)
# Reference
@staticmethod
def _dense_ftrl(alpha, beta, lambda1, lambda2, w, nz, g):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
n = np.take(nz, 0, axis=-1)
z = np.take(nz, 1, axis=-1)
# python port of Sigrid's implementation
g2 = g * g
sigma = (np.sqrt(n + g2) - np.sqrt(n)) / alpha
z += g - sigma * w
n += g2
w = (np.sign(z) * lambda1 - z) / (
(beta + np.sqrt(n)) / alpha + lambda2)
w[np.abs(z) <= lambda1] = 0
return (w, np.stack([n, z], axis=-1))
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_ftrl_sgd(self, inputs, in_place, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad],
partial(self._dense_ftrl, alpha, beta, lambda1, lambda2))
# Reference
@staticmethod
def _dense_gftrl(alpha, beta, lambda1, lambda2, w, nz, g):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
old_shape = g.shape
n = np.take(nz, 0, axis=-1)
z = np.take(nz, 1, axis=-1)
output_dim = g.shape[0]
w = w.reshape(output_dim, -1)
g = g.reshape(output_dim, -1)
n = n.reshape(output_dim, -1)
z = z.reshape(output_dim, -1)
input_dim = g.shape[1]
g2 = g * g
sigma = (np.sqrt(n + g2) - np.sqrt(n)) / alpha
z += g - sigma * w
n += g2
z_norms = np.linalg.norm(z, 2, axis=0)
z_norms = z_norms + 1e-6
w = z * ((lambda1 * np.sqrt(output_dim)) / z_norms - 1) / \
((beta + np.sqrt(n)) / alpha + lambda2)
for i in range(input_dim):
if z_norms[i] <= lambda1 * np.sqrt(output_dim):
w[:, i] = 0
w = w.reshape(old_shape)
n = n.reshape(old_shape)
z = z.reshape(old_shape)
return (w, np.stack([n, z], axis=-1))
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_gftrl_sgd(self, inputs, in_place, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"GFtrl",
["var", "nz", "grad"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad],
partial(self._dense_gftrl, alpha, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_sparse_ftrl_sgd(self, inputs, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad"],
["var", "nz"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad], [0])
# Reference
def ftrl(w, nz, i, g):
sw, snz = self._dense_ftrl(alpha, beta, lambda1, lambda2,
w[i], nz[i], g)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad], ftrl)
# Reference
@staticmethod
def _dense_ftrl_send_alpha_by_input(beta, lambda1, lambda2, w, nz, g, alpha):
return TestOperators._dense_ftrl(alpha, beta, lambda1, lambda2, w, nz,
g)
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_ftrl_sgd_send_alpha_by_input(self, inputs, in_place, alpha, beta,
lambda1, lambda2, engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
alpha = np.array(alpha).astype(np.float32)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad", "alpha"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad, alpha], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad, alpha],
partial(self._dense_ftrl_send_alpha_by_input, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_sparse_ftrl_sgd_send_alpha_by_input(self, inputs, alpha, beta,
lambda1, lambda2, engine, gc,
dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
alpha = np.array(alpha).astype(np.float32)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad", "alpha"],
["var", "nz"],
beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad, alpha], [0])
# Reference
def ftrl(w, nz, i, g, alpha):
sw, snz = self._dense_ftrl_send_alpha_by_input(beta, lambda1,
lambda2, w[i], nz[i],
g, alpha)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad, alpha],
ftrl)
@given(input=hu.tensor(max_value=20,
max_dim=1,
dtype=np.int32,
elements=st.integers(min_value=0, max_value=10)),
with_remapping=st.booleans(),
**hu.gcs)
def test_unique(self, input, with_remapping, gc, dc):
op = core.CreateOperator(
"Unique",
["input"],
["unique"] + (["remapping"] if with_remapping else []),
device_option=gc)
self.assertDeviceChecks(dc, op, [input], [0])
# Validator
def unique_valid(input, unique, remapping=None):
self.assertEqual(unique.size, len(set(input)))
self.assertEqual(sorted(unique), sorted(set(input)))
if with_remapping:
self.assertEqual(remapping.shape, input.shape)
remapped = [unique[remapping[i]] for i in range(len(input))]
np.testing.assert_array_equal(remapped, input)
self.assertValidationChecks(gc, op, [input], unique_valid)
@given(prediction=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
top_k=st.integers(min_value=1, max_value=3),
**hu.gcs)
def test_accuracy(self, prediction, labels, top_k, gc, dc):
if(top_k > 1):
gc = hu.cpu_do
op = core.CreateOperator(
"Accuracy",
["prediction", "labels"],
["accuracy"],
top_k=top_k,
device_option=gc
)
def op_ref(prediction, labels, top_k):
N = prediction.shape[0]
correct = 0
for i in range(0, len(prediction)):
pred_sorted = sorted(
([item, j] for j, item in enumerate(prediction[i])),
key=lambda x: x[0],
reverse=True
)
max_ids = [x[1] for x in pred_sorted[0:top_k]]
for m in max_ids:
if m == labels[i]:
correct += 1
accuracy = correct / N
return (accuracy,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels, top_k],
reference=op_ref)
@given(target_probabilities=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0.01,
max_value=1)),
**hu.gcs)
def test_perplexity(self, target_probabilities, gc, dc):
op = core.CreateOperator(
"Perplexity",
["target_probabilities"],
["perplexity"]
)
def op_ref(target_probabilities):
N = target_probabilities.shape[0]
perplexities = np.power(target_probabilities, -1.0 / N)
perplexity = reduce(lambda x, y: x * y, perplexities)
return (perplexity,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[target_probabilities],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_to_segment_ids(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToSegmentIds",
["lengths"],
["segment_ids"])
def op_ref(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_range_fill(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsRangeFill",
["lengths"],
["increasing_seq"])
def op_ref(lengths):
sids = []
for _, l in enumerate(lengths):
sids.extend(list(range(l)))
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(**hu.gcs_cpu_only)
def test_segment_ids_to_ranges(self, gc, dc):
lengths = [4, 6, 3, 2, 0, 4]
op = core.CreateOperator(
"SegmentIdsToRanges",
["segment_ids"],
["ranges"])
def op_ref(segment_ids):
ranges = [np.array([0, 0], dtype=np.int32)]
prev = 0
for i, sid in enumerate(segment_ids):
while sid != prev:
prev += 1
ranges.append(np.array([i, 0], dtype=np.int32))
ranges[-1][1] += 1
return (np.array(ranges, dtype=np.int32), )
def lengths_to_segment_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=np.array(lengths_to_segment_ids(lengths), dtype=np.int32),
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_to_ranges(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToRanges",
["lengths"],
["ranges"])
def op_ref(x):
if not x.size:
return (x.reshape((0, 2)), )
return (np.column_stack((np.concatenate(([0], np.cumsum(x)[:-1])),
x)), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(prediction=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
**hu.gcs)
def test_multi_class_accuracy(self, prediction, labels, gc, dc):
op = core.CreateOperator(
"MultiClassAccuracy",
["prediction", "labels"],
["accuracies", "amounts"]
)
def op_ref(prediction, labels):
N = prediction.shape[0]
D = prediction.shape[1]
accuracies = np.empty(D, dtype=float)
accuracies.fill(0)
amounts = np.empty(D, dtype=int)
amounts.fill(0)
max_ids = np.argmax(prediction, axis=1)
for i in range(0, N):
max_id = max_ids[i]
label_id = labels[i]
if max_id == label_id:
accuracies[label_id] += 1
amounts[label_id] += 1
for i in range(0, D):
amount = amounts[i]
if amount:
accuracies[i] /= amount
return (accuracies, amounts,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_segment_ids_to_lengths(self, lengths, gc, dc):
op = core.CreateOperator(
"SegmentIdsToLengths",
["segment_ids"],
["lengths"])
def lengths_to_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return sids
segment_ids = lengths_to_ids(lengths)
def ids_to_lengths(ids):
ids_length = len(ids)
if ids_length == 0:
return (np.array([], dtype=np.int32),)
lengths = []
# segment id starts with 0
prev_id = -1
tmp_length = 0
for idx in range(ids_length):
cur_id = ids[idx]
if cur_id != prev_id:
if idx != 0:
lengths.append(tmp_length)
while prev_id + 1 != cur_id:
lengths.append(0)
prev_id += 1
prev_id = cur_id
tmp_length = 0
tmp_length += 1
lengths.append(tmp_length)
return (np.array(lengths, dtype=np.int32),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(segment_ids, dtype=np.int32)],
reference=ids_to_lengths)
@given(lengths=st.lists(st.integers(min_value=1, max_value=10),
min_size=0,
max_size=10),
power=st.sampled_from([0.5, 1.0, 1.5, 2.0]),
**hu.gcs_cpu_only)
def test_lengths_to_weights(self, lengths, power, gc, dc):
op = core.CreateOperator(
"LengthsToWeights",
["lengths"],
["weights"],
power=power)
def lengths_to_weights(lengths):
weighted_length = []
for l in lengths:
weighted_length.extend(l * [1 / pow(l, power)])
return (np.array(weighted_length, dtype=float),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=lengths_to_weights)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False)),
**hu.gcs)
def test_abs(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Abs",
["input"],
["output"]
)
def abs_ref(input_tensor):
return (np.abs(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=abs_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(min_value=-10,
max_value=10)),
**hu.gcs)
def test_cos(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Cos",
["input"],
["output"]
)
def cos_ref(input_tensor):
return (np.cos(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=cos_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(min_value=-10,
max_value=10)),
**hu.gcs)
def test_sin(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Sin",
["input"],
["output"]
)
def sin_ref(input_tensor):
return (np.sin(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=sin_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False)),
**hu.gcs)
def test_exp(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Exp",
["input"],
["output"]
)
def exp_ref(input_tensor):
return (np.exp(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=exp_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(min_value=1,
max_value=10000)),
**hu.gcs_cpu_only)
def test_log(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Log",
["input"],
["output"]
)
def log_ref(input_tensor):
return (np.log(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=log_ref)
self.assertGradientChecks(gc, op, [input_tensor], 0, [0])
def test_blobs_dequeue_timeout(self):
op = core.CreateOperator(
"CreateBlobsQueue",
[],
["queue"],
capacity=5,
num_blobs=1)
self.ws.run(op)
t = time.time()
op = core.CreateOperator(
"DequeueBlobs",
["queue"],
["out"],
timeout_secs=0.2)
self.assertRaises(RuntimeError, lambda: self.ws.run(op))
t = time.time() - t
self.assertGreater(t, 0.19)
@given(num_threads=st.integers(1, 10), # noqa
num_elements=st.integers(1, 100),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
def test_blobs_queue_threading(self, num_threads, num_elements,
capacity, num_blobs, do):
"""
- Construct matrices of size N x D
- Start K threads
- Push all N rows into the queue of capacity C
- Pull all N rows out of the queue.
- Verify that the output matrices are permutation of the rows of the
original matrices.
"""
import threading
try:
import queue
except ImportError:
# Py3
import Queue as queue
op = core.CreateOperator(
"CreateBlobsQueue",
[],
["queue"],
capacity=capacity,
num_blobs=num_blobs,
device_option=do)
self.ws.run(op)
xs = [np.random.randn(num_elements, 5).astype(np.float32)
for _ in range(num_blobs)]
q = queue.Queue()
for i in range(num_elements):
q.put([x[i] for x in xs])
def enqueue(t):
while True:
feed_blobs = ["x_{}_{}".format(i, t) for i in range(num_blobs)]
op = core.CreateOperator(
"EnqueueBlobs",
["queue"] + feed_blobs,
feed_blobs,
device_option=do)
try:
elems = q.get_nowait()
for elem, feed_blob in zip(elems, feed_blobs):
self.ws.create_blob(feed_blob).feed(
elem, device_option=do)
self.ws.run(op)
except queue.Empty:
return
# Create all blobs before racing on multiple threads
# (blob creation is not threadsafe)
for t in range(num_threads):
for i in range(num_blobs):
self.ws.create_blob("x_{}_{}".format(i, t))
threads = [threading.Thread(target=enqueue, args=(t,))
for t in range(num_threads)]
for thread in threads:
thread.start()
for n in range(num_elements):
dequeue_blobs = ["y_{}_{}".format(i, n) for i in range(num_blobs)]
op = core.CreateOperator(
"DequeueBlobs",
["queue"],
dequeue_blobs,
device_option=do)
self.ws.run(op)
for thread in threads:
thread.join()
op = core.CreateOperator("CloseBlobsQueue", ["queue"], [])
self.ws.run(op)
ys = [np.vstack([self.ws.blobs["y_{}_{}".format(i, n)].fetch()
for n in range(num_elements)])
for i in range(num_blobs)]
for i in range(num_blobs):
self.assertEqual(ys[i].shape, xs[i].shape)
for j in range(num_elements):
# Verify that the rows of the returned blob are a
# permutation. The order may be different due to
# different threads racing.
self.assertTrue(
any(np.array_equal(xs[i][j], ys[i][k])
for k in range(num_elements)))
@given(num_producers=st.integers(1, 10),
num_consumers=st.integers(1, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
def test_safe_blobs_queue(self, num_producers, num_consumers,
capacity, num_blobs, do):
init_net = core.Net('init_net')
queue = init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs)
producer_steps = []
truth = 0
for i in range(num_producers):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for times in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queue] + blobs, blobs + [status])
count = (i + 1) * 10
step = core.execution_step(name, net, num_iter=count)
truth += count
producer_steps.append(step)
producer_exit_net = core.Net('producer_exit_net')
producer_exit_net.CloseBlobsQueue([queue], 0)
producer_step = core.execution_step('producer', [
core.execution_step(
'producers', producer_steps, concurrent_substeps=True),
core.execution_step('producer_exit', producer_exit_net)]
)
consumer_steps = []
counters = []
const_1 = init_net.ConstantFill([], 1, value=1.0)
for i in range(num_consumers):
name = 'consumer_%d' % i
net1 = core.Net(name)
blobs = net1.SafeDequeueBlobs([queue], num_blobs + 1)
status = blobs[-1]
net2 = core.Net(name + '_counter')
counter = init_net.ConstantFill([], 1, value=0.0)
counters.append(counter)
net2.Add([counter, const_1], counter)
consumer_steps.append(core.execution_step(
name, [net1, net2], should_stop_blob=status))
consumer_step = core.execution_step(
'consumer', consumer_steps, concurrent_substeps=True)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [consumer_step, producer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
v = 0
for counter in counters:
v += self.ws.blobs[str(counter)].fetch().tolist()
self.assertEqual(v, truth)
@given(num_queues=st.integers(1, 5),
num_iter=st.integers(5, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3))
def test_weighted_sample_blobs_queue(
self, num_queues, num_iter, capacity, num_blobs
):
# Create BlobsQueue for each input queue
print("num_queues", num_queues)
init_net = core.Net('init_net')
queues = [
init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs
) for _ in range(num_queues)
]
# Create multiple producer nets and one producer exist net
producer_steps = []
producer_exit_nets = []
for i in range(num_queues):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for _ in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queues[i]] + blobs, blobs + [status])
exit_net = core.Net('producer_exit_%d' % i)
exit_net.CloseBlobsQueue(queues[i], 0)
producer_exit_nets.append(exit_net)
step = core.execution_step(
name, [
core.execution_step(
'producer_%d' % i, [net], num_iter=num_iter
),
core.execution_step('producer_exit_%d' % i, [exit_net]),
]
)
producer_steps.append(step)
producer_step = core.execution_step(
'producer', [
core.execution_step(
'producers',
producer_steps,
concurrent_substeps=True,
),
]
)
status_lst = []
def append(ins, outs):
status_lst.append(ins)
# Create one consumer dequeue net and one consumer exist net
consumer_net = core.Net('weight_sample_dequeue_net')
table_idx_blob = np.random.randint(low=-1, high=num_blobs, size=1)
blobs = consumer_net.WeightedSampleDequeueBlobs(
queues,
num_blobs + 1,
weights=np.random.uniform(low=0.0, high=1.0, size=(num_queues,)),
table_idx_blob=table_idx_blob[0],
)
status = blobs[-1]
consumer_net.Python(append)(status)
consumer_step = core.execution_step(
'consumer',
[
core.execution_step(
'consumer', [consumer_net], should_stop_blob=status
),
core.execution_step('producer_exit', producer_exit_nets)
]
)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [producer_step, consumer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
assert len(status_lst) >= num_iter + 1
assert len(status_lst) <= num_iter * num_queues + 1
@given(
data=hu.tensor(),
**hu.gcs_cpu_only)
def test_squeeze_expand_dims(self, data, gc, dc):
dims = [0, 0]
if len(data.shape) > 2:
dims.append(2)
op = core.CreateOperator(
"ExpandDims",
["data"],
["expanded"],
dims=dims)
def expand_dims_ref(data, *args, **kw):
inc_dims = list(set(dims))
inc_dims.sort()
r = data
for dim in inc_dims:
r = np.expand_dims(r, axis=dim)
return (r, )
def squeeze_ref(data, *args, **kw):
dec_dims = list(set(dims))
dec_dims.sort(reverse=True)
r = data
for dim in dec_dims:
r = np.squeeze(r, axis=dim)
return (r, )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data],
reference=expand_dims_ref,
output_to_grad='expanded',
grad_reference=squeeze_ref)
@given(**hu.gcs_cpu_only)
def test_tt_layer(self, gc, dc):
seed = 1234
np.random.seed(seed)
inp_sizes = [2, 2, 2, 2]
out_sizes = [2, 2, 2, 2]
tt_ranks = [1, 3, 3, 3, 1]
op = core.CreateOperator(
"TT",
["X", "b", "cores"],
["Y"],
inp_sizes=inp_sizes,
out_sizes=out_sizes,
tt_ranks=tt_ranks,
)
X = np.expand_dims(
np.random.rand(16).astype(np.float32), axis=0)
b = np.array([0] * 16).astype(np.float32)
cores = tt_core.init_tt_cores(inp_sizes, out_sizes, tt_ranks)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("b").feed(b)
self.ws.create_blob("cores").feed(cores)
self.ws.run(op)
Y = self.ws.blobs[("Y")].fetch()
Y = Y.reshape([16])
golden = np.array([-9.51763490e-07, -1.28442286e-06,
-2.86281141e-07, 2.28865644e-07,
-1.96180017e-06, -1.78920531e-06,
9.31094666e-07, -2.04273989e-07,
1.70017107e-06, 1.64845711e-06,
-1.06099132e-06, -4.69111137e-07,
6.57552358e-08, -1.28942040e-08,
-2.29114004e-07, -1.04262714e-06])
# This golden array is dependent on the specified inp_sizes, out_sizes,
# tt_ranks, and seed. Changing these will cause the test to fail.
self.assertAlmostEqual(np.linalg.norm(golden - Y), 0, delta=1e-10)
@given(num_workers=st.integers(1, 10),
net_type=st.sampled_from(
["simple", "dag"] +
(["async_dag"] if workspace.has_gpu_support else [])),
**hu.gcs)
def test_dag_net_forking(self, net_type, num_workers, gc, dc):
from caffe2.python.model_helper import ModelHelper
from caffe2.python import brew
m = ModelHelper(name="test_model")
n = 10
d = 2
depth = 2
iters = 5
np.random.seed(1701)
# Build a binary tree of FC layers, summing at each node.
for i in reversed(range(depth)):
for j in range(2 ** i):
bottom_1 = "{}_{}".format(i + 1, 2 * j)
bottom_2 = "{}_{}".format(i + 1, 2 * j + 1)
mid_1 = "{}_{}_m".format(i + 1, 2 * j)
mid_2 = "{}_{}_m".format(i + 1, 2 * j + 1)
top = "{}_{}".format(i, j)
brew.fc(
m,
bottom_1, mid_1,
dim_in=d, dim_out=d,
weight_init=('ConstantFill', dict(value=np.random.randn())),
bias_init=('ConstantFill', dict(value=np.random.randn())))
brew.fc(
m,
bottom_2, mid_2,
dim_in=d, dim_out=d,
weight_init=('ConstantFill', dict(value=np.random.randn())),
bias_init=('ConstantFill', dict(value=np.random.randn())))
m.net.Sum([mid_1, mid_2], top)
m.net.SquaredL2Distance(["0_0", "label"], "xent")
m.net.AveragedLoss("xent", "loss")
input_to_grad = m.AddGradientOperators(["loss"])
m.Proto().device_option.CopyFrom(gc)
m.param_init_net.Proto().device_option.CopyFrom(gc)
m.Proto().type = net_type
m.Proto().num_workers = num_workers
self.ws.run(m.param_init_net)
print(str(m.Proto()))
def run():
import numpy as np
np.random.seed(1701)
input_blobs = ["{}_{}".format(depth, j) for j in range(2 ** depth)]
for input_blob in input_blobs:
self.ws.create_blob(input_blob).feed(
np.random.randn(n, d).astype(np.float32),
device_option=gc)
self.ws.create_blob("label").feed(
np.random.randn(n, d).astype(np.float32),
device_option=gc)
self.ws.run(m.net)
gradients = [
self.ws.blobs[str(input_to_grad[input_blob])].fetch()
for input_blob in input_blobs]
return gradients
outputs = [run() for _ in range(iters)]
for output in outputs[1:]:
np.testing.assert_array_equal(outputs[0], output)
self.assertAlmostEqual(np.sum(np.square(output)), 91.81752,
delta=1e-2)
@given(input=hu.tensor(min_dim=2, max_dim=6),
slice_dim=st.integers(),
a=st.integers(),
b=st.integers(),
is_empty=st.booleans(),
**hu.gcs_cpu_only)
def test_slice(self, input, slice_dim, a, b, is_empty, gc, dc):
slice_dim = slice_dim % len(input.shape)
if (is_empty):
input = np.random.rand(*([0] + list(input.shape))).astype(np.int32)
slice_dim += 1
a = a % input.shape[slice_dim]
b = b % input.shape[slice_dim] + 1
start_vec = np.zeros(len(input.shape), dtype=np.int32)
end_vec = np.ones(len(input.shape), dtype=np.int32) * -1
start_vec[slice_dim] = min(a, b)
end_vec[slice_dim] = max(a, b)
op = core.CreateOperator(
"Slice",
["input", "start", "end"],
["output"])
def slice_ref(x, s, e):
if len(s.shape) == 0:
return x
slc = [slice(si, None if ei == -1 else ei) for si, ei in zip(s, e)]
return (x[slc], )
self.assertReferenceChecks(gc, op, [input, start_vec, end_vec],
slice_ref)
self.assertGradientChecks(gc, op, [input, start_vec, end_vec], 0, [0])
@given(data=hu.tensor(), **hu.gcs_cpu_only)
def test_shape(self, data, gc, dc):
op = core.CreateOperator("Shape", ["data"], ["shape"])
self.assertReferenceChecks(gc, op, [data], lambda x: (x.shape, ))
@given(data=hu.tensor(), **hu.gcs_cpu_only)
def test_shape_with_axes(self, data, gc, dc):
def shape_ref(x, y):
return ([x.shape[i] for i in y],)
axes = np.random.randint(len(data.shape), size=10).tolist()
op = core.CreateOperator("Shape", ["data"], ["shape"], axes=axes)
self.assertReferenceChecks(gc, op, [data, axes], shape_ref)
@given(data=hu.tensor(), **hu.gcs_cpu_only)
def test_has_elements(self, data, gc, dc):
op = core.CreateOperator("HasElements", ["data"], ["has_elements"])
self.assertReferenceChecks(gc, op, [data], lambda x: (len(x) > 0, ))
op = core.CreateOperator("IsEmpty", ["data"], ["is_empty"])
self.assertReferenceChecks(gc, op, [data], lambda x: (len(x) == 0, ))
@given(initial_iters=st.integers(0, 100),
max_iters=st.integers(0, 100))
def test_should_stop_as_criteria_net_execution_step(
self, initial_iters, max_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
self.ws.create_blob("num_iters").feed(
np.asarray([max_iters]).astype(np.int64))
criteria_net = core.Net("criteria")
criteria_net.GE(["iter", "num_iters"], ["stop"])
criteria_net.Proto().external_output.extend(["stop"])
plan = core.Plan('plan')
plan.AddStep(core.execution_step(
'step', [criteria_net, net],
should_stop_blob=core.BlobReference("stop")))
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], max(initial_iters, max_iters))
def test_disabled_execution_step(self):
def createNets(i, disabled):
should_stop = 'should_stop_{}'.format(i)
output = 'output_{}'.format(i)
# init content and stop signal
init = core.Net("init_{}".format(i))
init.ConstantFill(
[],
[output],
shape=[1],
value=0.0
)
init.Cast([output], [should_stop], to='bool')
# decide if disabled or not
criterion = core.Net("criterion_{}".format(i))
tmp = criterion.ConstantFill(
[],
shape=[1],
value=1.0 if disabled else 0.0
)
criterion.Cast([tmp], [should_stop], to='bool')
criterion.Proto().external_output.extend([should_stop])
# the body net is just to turn a 0 blob to 1
net = core.Net("net_{}".format(i))
net.ConstantFill(
[],
[output],
shape=[1],
value=1.0
)
# always end the loop
ender = core.Net("ender_{}".format(i))
tmp = ender.ConstantFill(
[],
shape=[1],
value=1.0
)
ender.Cast([tmp], [should_stop], to='bool')
ender.Proto().external_output.extend([should_stop])
return [init, criterion, net, ender]
nets = [createNets(1, False),
createNets(2, True),
createNets(3, False)]
steps = [
core.execution_step(
'step_1', nets[0],
should_stop_blob=core.BlobReference('should_stop_1')),
core.execution_step(
'step_2', nets[1],
should_stop_blob=core.BlobReference('should_stop_2')),
core.execution_step('step_3', nets[2])
]
expected = [1.0, 0.0, 1.0]
plan = core.Plan('plan')
plan.AddStep(core.execution_step('all_steps', steps, num_iter=3))
self.ws.run(plan)
for i, _ in enumerate(nets):
self.assertEqual(
self.ws.blobs['output_{}'.format(i + 1)].fetch()[0],
expected[i])
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100))
def test_iter_count_with_execution_step(self, initial_iters, num_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
step = core.ExecutionStep("step", [net])
step.SetIter(num_iters)
plan = core.Plan("plan")
plan.AddStep(step)
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters)
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100),
num_nets=st.integers(0, 5))
def test_atomic_iter_with_concurrent_steps(self, initial_iters, num_iters,
num_nets):
init_net = core.Net("init_net")
iter_mutex = init_net.CreateMutex([], ["iter_mutex"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
concurrent_steps = core.ExecutionStep("concurrent_steps",
num_iter=num_iters)
for i in range(num_nets):
net = core.Net("net_{}".format(i))
net.AtomicIter([iter_mutex, "iter"], ["iter"])
step = core.ExecutionStep("step", [net])
concurrent_steps.AddSubstep(step)
concurrent_steps.SetConcurrentSubsteps(True)
plan = core.Plan("plan")
plan.AddStep(concurrent_steps)
stats_net = core.Net("stats_net")
stats_net.StatRegistryExport([], ["stats_key", "stats_val", "stats_ts"])
self.ws.run(init_net)
self.ws.run(plan)
self.ws.run(stats_net)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters * num_nets)
if num_iters * num_nets > 0:
stats_key = self.ws.blobs[("stats_key")].fetch()
atomic_iter_key = b'atomic_iter/stats/iter/num_iter'
self.assertTrue(atomic_iter_key in stats_key)
stat_val = self.ws.blobs[("stats_val")].fetch()
self.assertEqual(num_iters * num_nets, stat_val[list(stats_key).index(atomic_iter_key)])
@given(a=hu.tensor(),
src=st.sampled_from(list(viewkeys(_NUMPY_TYPE_TO_ENUM))),
dst=st.sampled_from(list(viewkeys(_NUMPY_TYPE_TO_ENUM))),
use_name=st.booleans(),
**hu.gcs)
def test_cast(self, a, src, dst, use_name, gc, dc):
a = a.astype(src)
# Casting from a float type outside the range of the integral
# type is UB.
ftypes = [np.float32, np.float64]
if src in ftypes and dst not in ftypes and dst is not np.bool:
info = np.iinfo(dst)
a = np.clip(a, info.min, info.max)
def ref(data):
return [data.astype(dst)]
to = _NUMPY_TYPE_TO_ENUM[dst]
if use_name:
to = caffe2_pb2.TensorProto.DataType.Name(to).lower()
op = core.CreateOperator('Cast', ["X"], ["Y"], to=to)
self.assertDeviceChecks(dc, op, [a], [0])
out, = self.assertReferenceChecks(gc, op, [a], ref)
self.assertEqual(dst, out.dtype)
@given(a=hu.tensor(),
eps=st.floats(min_value=1e-4, max_value=1e-2),
a_grad=hu.tensor(elements=st.floats(min_value=0.01, max_value=0.99)),
eps_grad=st.floats(min_value=1e-4, max_value=1e-3),
**hu.gcs)
def test_logit(self, a, eps, a_grad, eps_grad, gc, dc):
def ref(data):
data = np.clip(data, eps, 1.0 - eps)
return (np.log(data / (1 - data)), )
# forward testing carried out in the full range of input
# to ensure original test coverage.
# gradient test carried out with reduced input range
# because the sharp increase of the logit curve at 0 and 1
# error increases dramtically when input is close to 0 or 1
# and it will fail the test.
# So we only run gradient test in the range of (0.01, 0.99)
# very occationally, test may fail due to random accumulated error
# reduce test range to (0.02, 0.98) will improve test stability
op = core.CreateOperator('Logit', ["X"], ["Y"], eps=eps)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
op_grad = core.CreateOperator('Logit', ["X"], ["Y"], eps=eps_grad)
self.assertGradientChecks(gc, op_grad, [a_grad], 0, [0],
threshold=0.04, stepsize=2e-3)
@given(a=hu.tensor(elements=st.floats(allow_nan=True)),
value=st.floats(min_value=-10, max_value=10),
**hu.gcs)
def test_replace_nan(self, a, value, gc, dc):
def ref(data):
out = np.copy(data)
out[np.isnan(data)] = value
return (out, )
op = core.CreateOperator('ReplaceNaN', ["X"], ["Y"], value=value)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
@given(data=_dtypes(dtypes=[np.int32, np.int64, np.float32, np.bool]).
flatmap(lambda dtype: hu.tensor(
min_dim=1, dtype=dtype, elements=hu.elements_of_type(dtype))),
has_input=st.booleans(),
has_extra_shape=st.booleans(),
extra_shape=st.lists(
min_size=1, max_size=5, elements=st.integers(1, 5)),
**hu.gcs)
def test_constant_fill(self, data, has_input, has_extra_shape, extra_shape,
gc, dc):
dtype = data.dtype.type
# in opt mode, np.bool is converted into np.bool_
if data.dtype == np.dtype(np.bool):
dtype = np.bool
value = data.item(0)
gt_shape = data.shape
inputs = [data]
enum_type = _NUMPY_TYPE_TO_ENUM[dtype]
if has_input:
if has_extra_shape:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
extra_shape=extra_shape,
value=value)
gt_shape += tuple(extra_shape)
else:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
value=value)
else:
op = core.CreateOperator('ConstantFill', [], ["Y"],
dtype=enum_type,
value=value,
shape=list(gt_shape))
inputs = []
def ref(inputs=None):
outputs = np.full(shape=gt_shape, fill_value=value, dtype=dtype)
return [outputs]
self.assertDeviceChecks(dc, op, inputs, [0])
out, = self.assertReferenceChecks(gc, op, inputs, ref)
self.assertEqual(dtype, out.dtype)
@given(t=st.integers(1, 5),
n=st.integers(1, 5),
d=st.integers(1, 5))
def test_elman_recurrent_network(self, t, n, d):
from caffe2.python import model_helper, brew
np.random.seed(1701)
step_net = model_helper.ModelHelper(name="Elman")
# TODO: name scope external inputs and outputs
step_net.Proto().external_input.extend(
["input_t", "seq_lengths", "timestep",
"hidden_t_prev", "gates_t_w", "gates_t_b"])
step_net.Proto().type = "simple"
step_net.Proto().external_output.extend(["hidden_t", "gates_t"])
brew.fc(step_net,
"hidden_t_prev", "gates_t", dim_in=d, dim_out=d, axis=2)
step_net.net.Sum(["gates_t", "input_t"], ["gates_t"])
step_net.net.Sigmoid(["gates_t"], ["hidden_t"])
# Initialize params for step net in the parent net
for op in step_net.param_init_net.Proto().op:
workspace.RunOperatorOnce(op)
backward_ops, backward_mapping = core.GradientRegistry.GetBackwardPass(
step_net.Proto().op, {"hidden_t": "hidden_t_grad"})
backward_mapping = {
str(k): str(v) for k, v in viewitems(backward_mapping)
}
backward_step_net = core.Net("ElmanBackward")
del backward_step_net.Proto().op[:]
backward_step_net.Proto().op.extend(backward_ops)
assert backward_mapping["input_t"] == "gates_t_grad"
links = [
("hidden_t_prev", "hidden", 0),
("hidden_t", "hidden", 1),
("input_t", "input", 0),
]
link_internal, link_external, link_offset = zip(*links)
backward_links = [
("hidden_t_prev_grad", "hidden_grad", 0),
("hidden_t_grad", "hidden_grad", 1),
("gates_t_grad", "input_grad", 0),
]
backward_link_internal, backward_link_external, backward_link_offset = \
zip(*backward_links)
backward_step_net.Proto().external_input.extend(["hidden_t_grad"])
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_input)
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_output)
inputs = ["input", "seq_lengths", "gates_t_w", "gates_t_b", "hidden_input"]
recurrent_inputs = ["hidden_input"]
op = core.CreateOperator(
"RecurrentNetwork",
inputs,
["output", "hidden", "hidden_output", "step_workspaces"],
alias_src=["hidden", "hidden"],
alias_dst=["output", "hidden_output"],
alias_offset=[1, -1],
recurrent_states=["hidden"],
initial_recurrent_state_ids=[
inputs.index(i) for i in recurrent_inputs
],
link_internal=link_internal,
link_external=link_external,
link_offset=link_offset,
backward_link_internal=backward_link_internal,
backward_link_external=backward_link_external,
backward_link_offset=backward_link_offset,
param=[inputs.index(p) for p in step_net.params],
step_net=step_net.Proto(),
backward_step_net=backward_step_net.Proto(),
outputs_with_grads=[0],
)
workspace.FeedBlob(
"input", np.random.randn(t, n, d).astype(np.float32))
workspace.FeedBlob(
"hidden_input", np.random.randn(1, n, d).astype(np.float32))
workspace.FeedBlob(
"seq_lengths", np.random.randint(0, t, size=(n,)).astype(np.int32))
def reference(input, seq_lengths, gates_w, gates_b, hidden_input):
T = input.shape[0]
N = input.shape[1]
D = input.shape[2]
hidden = np.zeros(shape=(T + 1, N, D))
assert hidden.shape[0] == T + 1
assert hidden.shape[1] == N
assert hidden.shape[2] == D
hidden[0, :, :] = hidden_input
for t in range(T):
input_t = input[t].reshape(1, N, D)
hidden_t_prev = hidden[t].reshape(1, N, D)
gates = np.dot(hidden_t_prev, gates_w.T)
gates = gates.reshape(1, N, D) + input_t.reshape(1, N, D)
hidden[t + 1] = sigmoid(gates)
return hidden[1:], hidden, hidden[-1].reshape(1, N, D)
self.assertReferenceChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
reference,
outputs_to_check=[0, 1, 2])
for param in [0, 2, 3]:
self.assertGradientChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
param,
[0])
@settings(suppress_health_check=[HealthCheck.filter_too_much])
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_space_to_batch(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(n, c, h, w).astype(np.float32)
op = core.CreateOperator("SpaceToBatch", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@settings(suppress_health_check=[HealthCheck.filter_too_much])
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_batch_to_space(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(
n * block_size * block_size,
c,
(h + 2 * pad) // block_size,
(w + 2 * pad) // block_size).astype(np.float32)
op = core.CreateOperator("BatchToSpace", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(),
in_place=st.booleans(),
scale=st.floats(min_value=-2.0, max_value=2.0),
**hu.gcs)
def test_scale(self, X, in_place, scale, gc, dc):
op = core.CreateOperator(
"Scale", ["X"], ["Y" if not in_place else "X"],
scale=scale)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(s=st.text())
def test_string_serde(self, s):
s = s.encode('ascii', 'ignore')
self.ws.create_blob("a").feed(s)
serialized = self.ws.blobs["a"].serialize("a")
self.ws.create_blob("b").deserialize(serialized)
self.assertEqual(s, self.ws.blobs[("a")].fetch())
self.assertEqual(s, self.ws.blobs[("b")].fetch())
@given(pad=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
def test_same_pad_image(self, pad, size, input_channels, batch_size, order,
mode, gc, dc):
assume(size > pad)
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad=pad,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad, pad), (pad, pad), (0, 0)), mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(pad_t=st.integers(0, 3),
pad_l=st.integers(0, 3),
pad_b=st.integers(0, 3),
pad_r=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
def test_pad_image(self, pad_t, pad_l, pad_b, pad_r, size, input_channels,
batch_size, order, mode, gc, dc):
assume(size > max(pad_b, pad_r, pad_t, pad_l))
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad_t=pad_t,
pad_l=pad_l,
pad_b=pad_b,
pad_r=pad_r,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad_t, pad_b), (pad_l, pad_r), (0, 0)),
mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)),
mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-4, max_value=1e-2),
**hu.gcs_cpu_only)
def test_instance_norm(self, size, input_channels, batch_size, order,
epsilon, gc, dc):
op = core.CreateOperator(
"InstanceNorm",
["X", "scale", "bias"],
["Y"],
order=order,
epsilon=epsilon,
)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
def ref_nchw(x, scale, bias):
x = x.reshape(batch_size * input_channels, size * size)
y = (x - x.mean(1)[:, np.newaxis])
y /= np.sqrt(x.var(1) + epsilon)[:, np.newaxis]
y = y.reshape(batch_size, input_channels, size, size)
y = y * scale.reshape(1, input_channels, 1, 1)
y = y + bias.reshape(1, input_channels, 1, 1)
return (y, )
def ref_nhwc(x, scale, bias):
x = x.swapaxes(2, 3).swapaxes(1, 2)
y = ref_nchw(x, scale, bias)[0]
return (y.swapaxes(1, 2).swapaxes(2, 3), )
self.assertReferenceChecks(
gc, op, [X, scale, bias],
ref_nchw if order == "NCHW" else ref_nhwc)
# TODO(jiayq): when there are backward and GPU implementations, enable
# these two.
# self.assertDeviceChecks(dc, op, [X, scale, bias], [0])
# self.assertGradientChecks(gc, op, [X, scale, bias], 0, [0])
ws = workspace.C.Workspace()
feeds = [("X", X), ("scale", scale), ("bias", bias)]
for blob, arr in feeds:
ws.create_blob(blob).feed(arr)
for _ in range(100):
ws.run(op)
for blob, arr in feeds:
np.testing.assert_array_equal(ws.blobs[blob].fetch(), arr)
@given(inp=_dtypes().flatmap(lambda dt: _tensor_and_indices(
elements=st.floats(min_value=0, max_value=1), dtype=dt)),
**hu.gcs)
def test_sparse_to_dense(self, inp, gc, dc):
first_dim, X, I = inp
if X.dtype != np.dtype('float32') and gc.device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP} :
# Cuda only support 32 bit float
print("Bailout {}".format(X.dtype))
return
if gc.device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP}:
# Cuda version only support int32
I = I.astype(np.int32)
# values don't matter
D = np.zeros((first_dim,) + X.shape[1:]).astype(X.dtype)
op = core.CreateOperator("SparseToDense", ["I", "X", "D"], ["Y"])
def sparse_to_dense(I, X, D):
O = np.zeros(D.shape)
for i, p in enumerate(I):
O[p] += X[i]
return [O]
self.assertReferenceChecks(gc, op, [I, X, D], sparse_to_dense)
X = X.astype(np.float32)
self.assertGradientChecks(gc, op, [I, X, D], 1, [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
def test_dot_product(self, inputs, gc, dc):
X, Y = inputs
op = core.CreateOperator("DotProduct", ["X", "Y"], 'out')
def dotproduct(X, Y):
return (np.sum(X * Y, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
K=st.integers(min_value=2, max_value=10),
pad_value=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
def test_dot_product_with_padding(self, N, M, K, pad_value, gc, dc):
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
pad_value=pad_value)
def dotproduct(X, Y):
Z = np.ones((N, max(M, K))).astype(np.float32) * pad_value
if M < K:
Z[:, :M] = X
return (np.sum(Z * Y, axis=1), )
else:
Z[:, :K] = Y
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
pad_value=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
def test_dot_product_with_rep_padding(self, N, M, pad_value, gc, dc):
K = 2 * M
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
replicate=True,
pad_value=pad_value)
def dotproduct(X, Y):
import numpy.matlib as npm
if M < K:
Z = npm.repmat(X, 1, K // M)
return (np.sum(Z * Y, axis=1), )
else:
Z = npm.repmat(Y, 1, M // K)
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10), **hu.gcs_cpu_only)
def test_ensure_dense(self, N, M, gc, dc):
# in place
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "X")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
# or not
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "out")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
@given(N=st.integers(min_value=10, max_value=100),
M=st.integers(min_value=2, max_value=10),
num_buckets=st.integers(min_value=1, max_value=5),
**hu.gcs_cpu_only)
def test_accumulate_histogram_op(self, N, M, num_buckets, gc, dc):
X = np.random.rand(N, M).astype(np.float32)
lower_bound, upper_bound = 0.1, 0.9
op = core.CreateOperator("AccumulateHistogram", ["X"],
['cur_hist', 'acc_hist'],
lower_bound=lower_bound,
upper_bound=upper_bound,
num_buckets=num_buckets)
def histogram(X):
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Y = np.zeros((N, M), dtype=np.int32)
Y[X < lower_bound] = 0
Y[X >= upper_bound] = num_buckets + 1
Y[(X >= lower_bound) & (X < upper_bound)] = \
((X[(X >= lower_bound) & (X < upper_bound)] - lower_bound) /
segment + 1).astype(np.int32)
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
hist[Y[i][j]] += 1
cur_hist, acc_hist = hist, hist
return [cur_hist, acc_hist]
self.assertDeviceChecks(dc, op, [X], [0, 1])
self.assertReferenceChecks(gc, op, [X], histogram)
if __name__ == "__main__":
unittest.main()
|
DPPO.py | """
A simple version of OpenAI's Proximal Policy Optimization (PPO). [http://adsabs.harvard.edu/abs/2017arXiv170706347S]
Distributing workers in parallel to collect data, then stop worker's roll-out and train PPO on collected data.
Restart workers once PPO is updated.
The global PPO updating rule is adopted from DeepMind's paper (DPPO):
Emergence of Locomotion Behaviours in Rich Environments (Google Deepmind): [http://adsabs.harvard.edu/abs/2017arXiv170702286H]
View more on my tutorial website: https://morvanzhou.github.io/tutorials
Dependencies:
tensorflow r1.2
gym 0.9.2
"""
import tensorflow as tf
from tensorflow.contrib.distributions import Normal
import numpy as np
import matplotlib.pyplot as plt
import threading, queue
from experiments.Robot_arm.arm_env import ArmEnv
EP_MAX = 2000
EP_LEN = 300
N_WORKER = 4 # parallel workers
GAMMA = 0.9 # reward discount factor
A_LR = 0.0001 # learning rate for actor
C_LR = 0.0005 # learning rate for critic
MIN_BATCH_SIZE = 64 # minimum batch size for updating PPO
UPDATE_STEP = 5 # loop update operation n-steps
EPSILON = 0.2 # Clipped surrogate objective
MODE = ['easy', 'hard']
n_model = 1
env = ArmEnv(mode=MODE[n_model])
S_DIM = env.state_dim
A_DIM = env.action_dim
A_BOUND = env.action_bound[1]
class PPO(object):
def __init__(self):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu)
self.v = tf.layers.dense(l1, 1)
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)
# actor
pi, pi_params = self._build_anet('pi', trainable=True)
oldpi, oldpi_params = self._build_anet('oldpi', trainable=False)
self.sample_op = tf.squeeze(pi.sample(1), axis=0) # choosing action
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
# ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa))
ratio = pi.prob(self.tfa) / (oldpi.prob(self.tfa) + 1e-5)
surr = ratio * self.tfadv # surrogate loss
self.aloss = -tf.reduce_mean(tf.minimum(
surr,
tf.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) * self.tfadv))
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
self.sess.run(tf.global_variables_initializer())
def update(self):
global GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
if GLOBAL_EP < EP_MAX:
UPDATE_EVENT.wait() # wait until get batch of data
self.sess.run(self.update_oldpi_op) # old pi to pi
data = [QUEUE.get() for _ in range(QUEUE.qsize())]
data = np.vstack(data)
s, a, r = data[:, :S_DIM], data[:, S_DIM: S_DIM + A_DIM], data[:, -1:]
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(UPDATE_STEP)]
[self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(UPDATE_STEP)]
UPDATE_EVENT.clear() # updating finished
GLOBAL_UPDATE_COUNTER = 0 # reset counter
ROLLING_EVENT.set() # set roll-out available
def _build_anet(self, name, trainable):
with tf.variable_scope(name):
l1 = tf.layers.dense(self.tfs, 200, tf.nn.relu, trainable=trainable)
mu = A_BOUND * tf.layers.dense(l1, A_DIM, tf.nn.tanh, trainable=trainable)
sigma = tf.layers.dense(l1, A_DIM, tf.nn.softplus, trainable=trainable)
norm_dist = Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def choose_action(self, s):
s = s[np.newaxis, :]
a = self.sess.run(self.sample_op, {self.tfs: s})[0]
return np.clip(a, -2, 2)
def get_v(self, s):
if s.ndim < 2: s = s[np.newaxis, :]
return self.sess.run(self.v, {self.tfs: s})[0, 0]
class Worker(object):
def __init__(self, wid):
self.wid = wid
self.env = ArmEnv(mode=MODE[n_model])
self.ppo = GLOBAL_PPO
def work(self):
global GLOBAL_EP, GLOBAL_RUNNING_R, GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
s = self.env.reset()
ep_r = 0
buffer_s, buffer_a, buffer_r = [], [], []
for t in range(EP_LEN):
if not ROLLING_EVENT.is_set(): # while global PPO is updating
ROLLING_EVENT.wait() # wait until PPO is updated
buffer_s, buffer_a, buffer_r = [], [], [] # clear history buffer
a = self.ppo.choose_action(s)
s_, r, done = self.env.step(a)
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r) # normalize reward, find to be useful
s = s_
ep_r += r
GLOBAL_UPDATE_COUNTER += 1 # count to minimum batch size
if t == EP_LEN - 1 or GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
v_s_ = self.ppo.get_v(s_)
discounted_r = [] # compute discounted reward
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
QUEUE.put(np.hstack((bs, ba, br)))
if GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
ROLLING_EVENT.clear() # stop collecting data
UPDATE_EVENT.set() # globalPPO update
if GLOBAL_EP >= EP_MAX: # stop training
COORD.request_stop()
break
# record reward changes, plot later
if len(GLOBAL_RUNNING_R) == 0: GLOBAL_RUNNING_R.append(ep_r)
else: GLOBAL_RUNNING_R.append(GLOBAL_RUNNING_R[-1]*0.9+ep_r*0.1)
GLOBAL_EP += 1
print('{0:.1f}%'.format(GLOBAL_EP/EP_MAX*100), '|W%i' % self.wid, '|Ep_r: %.2f' % ep_r,)
if __name__ == '__main__':
GLOBAL_PPO = PPO()
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
UPDATE_EVENT.clear() # no update now
ROLLING_EVENT.set() # start to roll out
workers = [Worker(wid=i) for i in range(N_WORKER)]
GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0
GLOBAL_RUNNING_R = []
COORD = tf.train.Coordinator()
QUEUE = queue.Queue()
threads = []
for worker in workers: # worker threads
t = threading.Thread(target=worker.work, args=())
t.start()
threads.append(t)
# add a PPO updating thread
threads.append(threading.Thread(target=GLOBAL_PPO.update,))
threads[-1].start()
COORD.join(threads)
# plot reward change and testing
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('Episode'); plt.ylabel('Moving reward'); plt.ion(); plt.show()
env.set_fps(30)
while True:
s = env.reset()
for t in range(400):
env.render()
s = env.step(GLOBAL_PPO.choose_action(s))[0] |
hotkey.py | from enum import Enum
from threading import Thread
from typing import Dict, Callable, Optional
import keyboard
from playsound import playsound
from fishy.helper import helper
class Key(Enum):
F9 = "f9"
F10 = "f10"
F8 = "f8"
F7 = "f7"
UP = "up"
DOWN = "down"
LEFT = "left"
RIGHT = "right"
_hotkeys: Dict[Key, Optional[Callable]] = {}
def _get_callback(k):
def callback():
if not _hotkeys[k]:
return
playsound(helper.manifest_file("beep.wav"), False)
Thread(target=_hotkeys[k]).start()
return callback
def initalize():
for k in Key:
_hotkeys[k] = None
keyboard.add_hotkey(k.value, _get_callback(k))
def set_hotkey(key: Key, func: Optional[Callable]):
_hotkeys[key] = func
def free_key(k: Key):
set_hotkey(k, None)
|
email.py | #coding:utf-8
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from app import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['WR_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['WR_MAIL_SENDER'],
recipients=list(set(to)))
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
consumers.py | import zmq
from .sockets import ClientConnection
from .constants import *
import threading
def consumer(address,callback,message_type):
"""
Creates a consumer binding to the given address pull messages.
The callback is invoked for every reply received.
Args:
- address: the address to bind the PULL socket to.
- callback: the callback to invoke for every message. Must accept 1 variables - the message
- message_type: the type of message to receive
"""
return Consumer(address,callback,message_type)
class Consumer(ClientConnection):
"""
Requestor that that can send requests of given type
Args:
- address: the address to bind to
- callback: the callback to invoke for every reply
- message_type: the type of request to send
"""
def __init__(self,address,callback,message_type):
self._active = True
self._callback = callback
self._message_type = message_type
super(Consumer,self).__init__(address,zmq.PULL)
def _consume(self):
while self._active:
try:
topic, message=super(Consumer,self).receive(self._message_type)
#process the message
self._callback(message)
except zmq.ZMQError:
pass
def start(self):
"""
Start a thread that consumes the replies and invokes the callback
"""
t=threading.Thread(target=self._consume)
t.start()
def stop(self):
"""
Stop the consumer thread
"""
self._active = False
|
test_pyiec61850.py | #!/usr/bin/python
import sys
import time
import threading
import traceback
import signal
import sys
sys.path.append('..')
import iec61850
def signal_handler(signal, frame):
global running
running = 0
print('You pressed Ctrl+C!')
# sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
tcpPort = 8102
running = 1
class myIECServer():
def __init__(self):
self.__model = iec61850.IedModel_create("testmodel")
lDevice1 = iec61850.LogicalDevice_create("SENSORS", self.__model);
lln0 = iec61850.LogicalNode_create("LLN0", lDevice1);
ttmp1 = iec61850.LogicalNode_create("TTMP1", lDevice1);
iec61850.CDC_SAV_create("TmpSv", iec61850.toModelNode(ttmp1), 0, False)
iec61850.CDC_ASG_create("TmpSp", iec61850.toModelNode(ttmp1), 0, False)
self.__iedServer = iec61850.IedServer_create(self.__model)
iec61850.IedServer_start(self.__iedServer, tcpPort);
if not (iec61850.IedServer_isRunning(self.__iedServer)):
print("Starting server failed! Exit.\n")
iec61850.IedServer_destroy(self.__iedServer)
sys.exit(-1)
def run(self):
global running
while running:
time.sleep(0.1)
self.stop()
def stop(self):
iec61850.IedServer_stop(self.__iedServer)
iec61850.IedServer_destroy(self.__iedServer)
iec61850.IedModel_destroy(self.__model)
def testClient():
con = iec61850.IedConnection_create()
error = iec61850.IedConnection_connect(con, "127.0.0.1", tcpPort)
if (error == iec61850.IED_ERROR_OK):
# Accessing to SAV values
theVal = "testmodelSENSORS/TTMP1.TmpSv.instMag.f"
theValType = iec61850.IEC61850_FC_MX
temperatureValue = iec61850.IedConnection_readFloatValue(con, theVal, theValType)
assert (temperatureValue[1] == 0)
newValue = temperatureValue[0] + 10
err = iec61850.IedConnection_writeFloatValue(con, theVal, theValType, newValue)
assert (err == 21)
# Accessing to ASG values
theVal = "testmodelSENSORS/TTMP1.TmpSp.setMag.f"
theValType = iec61850.IEC61850_FC_SP
temperatureSetpoint = iec61850.IedConnection_readFloatValue(con, theVal, theValType)
print(temperatureSetpoint)
assert (temperatureValue[1] == 0)
newValue = temperatureValue[0] + 10
err = iec61850.IedConnection_writeFloatValue(con, theVal, theValType, newValue)
assert (err == 0)
temperatureSetpoint = iec61850.IedConnection_readFloatValue(con, theVal, theValType)
print(temperatureSetpoint)
assert (temperatureSetpoint[0] == newValue)
iec61850.IedConnection_close(con)
else:
print("Connection error")
sys.exit(-1)
iec61850.IedConnection_destroy(con)
print("client ok")
try:
srv = myIECServer()
srvThread = threading.Thread(target=srv.run)
srvThread.start()
# testClient()
# running = 0
# signal.pause()
except:
running = 0
print("Error :")
traceback.print_exc(file=sys.stdout)
sys.exit(-1)
|
git-csv-data.py | import re
import random
import io
from threading import Thread
import requests
import json
class CSVData:
def __init__(self, msg="", id="", stars="", forks="", watchers=""):
self.msg = str(msg);
self.id = str(id);
self.stars = str(stars);
self.forks = str(forks);
self.watchers = str(watchers);
def getCSVRepresentation(self):
return self.msg + ", " + self.id + ", " + self.stars + ", " + self.forks + ", " + self.watchers;
proxies = []
userAgents = []
def loadProxies():
global proxies
with open("proxies.txt","r") as f:
lines = f.readlines();
for line in lines:
proxies.append(line.strip());
def loadUserAgents():
global userAgents
with open("user-agents.txt", "r") as f:
lines = f.readlines()
for line in lines:
userAgents.append(line.strip());
def randomUserAgent():
headers = {
"User-Agent": random.choice(userAgents)
}
return headers;
def randomProxy():
while (True):
proxy = {}
proxy["https"] = "https://" + random.choice(proxies);
try:
r = requests.get("https://myexternalip.com/raw", proxies=proxy, headers=randomUserAgent(), timeout=3);
break;
except Exception as e:
continue;
print("choosing proxy: ", proxy)
return proxy;
loadProxies()
loadUserAgents()
THREAD_COUNT = 30;
allUsersRepo = ["https://api.github.com/users/facebook/repos?per_page=100"]
#commit message, repo-id, stars, forks, watchers, additions, deletions
def fetchData(startPageClean, repoData, i):
print("scraping commit page: " + startPageClean)
proxy = randomProxy()
headers = randomUserAgent()
try:
moreCommits = requests.get(startPageClean, proxies=proxy, headers=headers, timeout=5);
commit_data_json = moreCommits.json();
totalCommitData = []
for commit_data in commit_data_json:
curmsg = commit_data["commit"]["message"].replace('\n', ' ').replace('\r', '').replace(",", "");
curstars = repoData["stargazers_count"]
curforks=repoData["forks"]
curwatchers=repoData["watchers_count"]
curid=repoData["id"]
tmpData = CSVData(
msg=curmsg, stars=curstars, forks=curforks,
watchers=curwatchers, id=curid)
totalCommitData.append(tmpData.getCSVRepresentation())
#f.write(tmpData.getCSVRepresentation() + '\n')
results[i] = totalCommitData;
except Exception as e:
print("ERROR IN THREADING CALL: ", repr(e))
print(e.__traceback__());
pass
f = io.open('facebook-commits.csv', 'w', encoding='utf-8')
for repos in allUsersRepo:
proxy = randomProxy()
headers = randomUserAgent()
r = requests.get(repos, proxies=proxy, headers=headers, timeout=5);
print (r.text)
jsonDataRepos = r.json();
repoDataCount = 0
for repoData in jsonDataRepos:
repoDataCount+=1;
#tmpData = CSVData(stars=repoData["stargazers_count"], forks=repoData["forks"], watchers=repoData["watchers_count"])
cleanCommitUrl = repoData["commits_url"];
cleanCommitUrl = cleanCommitUrl[0:cleanCommitUrl.index("{")]
cleanCommitUrl += "?per_page=100"
proxy = randomProxy()
headers = randomUserAgent()
try:
commit_data_request = requests.get(cleanCommitUrl, proxies=proxy, headers=headers, timeout=5);
except:
continue;
commit_data_json = commit_data_request.json();
for commit_data in commit_data_json:
try:
tmpData = CSVData(msg=commit_data["commit"]["message"].replace('\n', ' ').replace('\r', '').replace(",",""), stars=repoData["stargazers_count"], forks=repoData["forks"],
watchers=repoData["watchers_count"], id=repoData["id"])
f.write(tmpData.getCSVRepresentation()+'\n')
except:
continue;
try:
pagesURLDirty = commit_data_request.headers["Link"]
startPageClean = pagesURLDirty[pagesURLDirty.find("<")+1:pagesURLDirty.find(">")];
end = pagesURLDirty.find(">");
lastPageClean = pagesURLDirty[pagesURLDirty.find("<", end+1)+1:pagesURLDirty.find(">", end+1)]
#print ("FIRST PAGE: ", startPageClean);
print ("LPC: ", lastPageClean)
startPageInt = int(startPageClean[-1])
lastPageInt = int(lastPageClean.split("&page=")[1])
print ("LAST PAGE NUMBER IS: " + str(lastPageInt))
while (startPageInt <= lastPageInt):
threads = [None] * THREAD_COUNT
results = [None] * THREAD_COUNT
for i in range(len(threads)):
threads[i] = Thread(target=fetchData, args=(startPageClean, repoData, i))
threads[i].start()
startPageInt += 1;
startPageClean = startPageClean.split("&page=",1)[0] + "&page=" + str(startPageInt)
if (startPageInt > lastPageInt):
break;
for thread in threads:
if thread != None:
thread.join();
for result in results:
if result != None:
for commit in result:
f.write(commit+'\n');
# print ("scraping commit page: " + str(startPageInt))
# proxy = randomProxy()
# headers = randomUserAgent()
# try :
# moreCommits = requests.get(startPageClean, proxies=proxy, headers=headers, timeout=5);
# commit_data_json = moreCommits.json();
#
# for commit_data in commit_data_json:
# tmpData = CSVData(
# msg=commit_data["commit"]["message"].replace('\n', ' ').replace('\r', '').replace(",", ""),
# stars=repoData["stargazers_count"], forks=repoData["forks"],
# watchers=repoData["watchers_count"], id=repoData["id"])
#
# f.write(tmpData.getCSVRepresentation()+'\n')
#
# startPageInt += 1;
# startPageClean = startPageClean.split("=")[0] + "="+str(startPageInt)
# print("new page: " + startPageClean)
#
# except Exception as e:
# print (e)
except UnicodeEncodeError:
print ('something')
except Exception as e:
print("ERROR IN FETCH NEXT PG DATA: ", repr(e))
continue;
f.close(); |
TimeHandler.py |
import time
import threading
class TimeTracker:
def __init__(self):
self.running = True
def check_elasped_time(self ,websocket):
current_time = time.time(websocket)
#only can be connected to the proxy
#without stating who you are for 20 seconds!
while self.running == True:
elasped_time = int(current_time -clock.start_time)
if elasped_time >20 :
websocket.close()
self.stop_timer()
def stop_timer(self ):
self.running = False
def start_timer(self , websocket):
thread = threading.Thread(target = self.monitor_timeout , args = (websocket,))
thread.start()
def monitor_timeout(self, websocket ):
self.check_elasped_time(websocket)
|
Semaphore_studikasus.py | import logging
import threading
import time
import random
LOG_FORMAT = '%(asctime)s %(threadName)-17s %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
semaphore = threading.Semaphore(0)
item = 0
def nasabah():
logging.info('Nasabah Menunggu nomor Antriannya dipanggil')
semaphore.acquire()
logging.info('Nasabah dengan nomor antrian %d menuju Customer Service' % (item))
def cs():
global item
time.sleep(3)
item = random.randint(0, 100)
logging.info('Customer Service memanggil nomor antrian {}'.format(item))
semaphore.release()
def main():
for i in range(5):
t1 = threading.Thread(target=nasabah)
t2 = threading.Thread(target=cs)
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == "__main__":
main()
|
B.py | import sys
import threading
sys.setrecursionlimit(10**6+1)
threading.stack_size(10**8)
def solve():
[n_v,m_e,s] = list(map(int,input().split()))
graph = {}
s = s-1
for i in range(m_e):
[u,v] = list(map(int,input().split()))
if (u-1) in graph:
graph[u-1].append(v-1)
else:
graph[u-1] = [v-1]
dp = [0 for i in range(n_v)]
visited = [False for i in range(n_v)]
def dfs(graph,u):
dp[u] = 0
visited[u]=True
if u not in graph:
return
for v in graph[u]:
if visited[v]:
if dp[v] ==0:
dp[u]=1
break
else:
dfs(graph,v)
if dp[v]==0:
dp[u]=1
break
return
dfs(graph,s)
#print(dp)
#print(2-dp[s])
if dp[s]==1:
print('First player wins')
else:
print('Second player wins')
t = threading.Thread(target=solve)
t.start()
t.join()
|
inspector_websocket.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import socket
import threading
import time
from telemetry.core.backends.chrome import websocket
class InspectorWebsocket(object):
def __init__(self, notification_handler=None, error_handler=None):
"""Create a websocket handler for communicating with Inspectors.
Args:
notification_handler: A callback for notifications received as a result of
calling DispatchNotifications() or StartAsyncDispatchNotifications().
Must accept a single JSON object containing the Inspector's
notification. May return True to indicate the stop async dispatching.
error_handler: A callback for errors in communicating with the Inspector.
Must accept a single numeric parameter indicated the time elapsed before
the error.
"""
self._socket = None
self._thread = None
self._cur_socket_timeout = 0
self._next_request_id = 0
self._notification_handler = notification_handler
self._error_handler = error_handler
@property
def is_dispatching_async_notifications(self):
return self._thread != None
def Connect(self, url, timeout=10):
assert not self._socket
self._socket = websocket.create_connection(url, timeout=timeout)
self._cur_socket_timeout = 0
self._next_request_id = 0
def Disconnect(self):
if self._socket:
self._socket.close()
self._socket = None
def SendAndIgnoreResponse(self, req):
req['id'] = self._next_request_id
self._next_request_id += 1
data = json.dumps(req)
self._socket.send(data)
logging.debug('sent [%s]', data)
def SyncRequest(self, req, timeout=10):
assert not self._thread, 'Cannot be used during async dispatching.'
self.SendAndIgnoreResponse(req)
while self._socket:
res = self._Receive(timeout)
if 'id' in res and res['id'] == req['id']:
return res
def DispatchNotifications(self, timeout=10):
assert not self._thread, 'Cannot be used during async dispatching.'
self._Receive(timeout)
def StartAsyncDispatchNotifications(self):
assert not self._thread, 'Cannot be started twice.'
self._thread = threading.Thread(target=self._AsyncDispatcher)
self._thread.daemon = True
self._thread.start()
def StopAsyncDispatchNotifications(self):
self._thread.join(timeout=30)
if self._thread.is_alive():
raise RuntimeError('Timed out waiting for async dispatch notifications.')
self._thread = None
def _AsyncDispatcher(self):
while self._socket:
try:
if not self._Receive():
break
except websocket.WebSocketTimeoutException:
pass
def _SetTimeout(self, timeout):
if self._cur_socket_timeout != timeout:
self._socket.settimeout(timeout)
self._cur_socket_timeout = timeout
def _Receive(self, timeout=10):
self._SetTimeout(timeout)
start_time = time.time()
try:
while self._socket:
data = self._socket.recv()
res = json.loads(data)
logging.debug('got [%s]', data)
if 'method' in res and self._notification_handler(res):
return None
return res
except (socket.error, websocket.WebSocketException):
elapsed_time = time.time() - start_time
self._error_handler(elapsed_time)
|
zmirror.py | #!/usr/bin/env python3
# coding=utf-8
import os
# noinspection PyUnresolvedReferences
from itertools import count
if os.path.dirname(__file__) != '':
os.chdir(os.path.dirname(__file__))
import traceback
import pickle
from datetime import datetime, timedelta
import re
import base64
import zlib
import random
import sched
import copy
from time import time, sleep
import queue
from fnmatch import fnmatch
from html import escape as html_escape
from urllib.parse import urljoin, urlsplit, urlunsplit, quote_plus
import requests
from flask import Flask, request, make_response, Response, redirect
from ColorfulPyPrint import * # TODO: Migrate logging tools to the stdlib
__VERSION__ = '0.23.1-dev'
__author__ = 'Aploium <i@z.codes>'
infoprint('zmirror version: ', __VERSION__, 'from', __author__)
infoprint('Github: https://github.com/Aploium/zmirror')
try:
import threading
except ImportError: # 在某些罕见的系统环境下,threading包可能失效,用dummy代替
import dummy_threading as threading
try: # 用于检测html的文本编码, cchardet是chardet的c语言实现, 非常快
from cchardet import detect as c_chardet
except:
cchardet_available = False
else:
cchardet_available = True
try: # lru_cache的c语言实现, 比Python内置lru_cache更快
from fastcache import lru_cache # lru_cache用于缓存函数的执行结果
except:
from functools import lru_cache
warnprint('package fastcache not found, fallback to stdlib lru_cache, no FUNCTION is effected, only maybe a bit slower. '
'Considering install it using "pip3 install fastcache"')
else:
infoprint('lru_cache loaded successfully from fastcache')
try: # 加载默认设置
from config_default import *
except:
traceback.print_exc()
errprint('the config_default.py is missing, this program may not works normally\n'
'config_default.py 文件丢失, 这会导致配置文件不向后兼容, 请重新下载一份 config_default.py')
raise # v0.23.1+ 当 config_default.py 不存在时, 程序会终止运行
try: # 加载用户自定义配置文件, 覆盖掉默认配置的同名项
from config import *
except:
traceback.print_exc()
errprint(
'the config_default.py is missing, fallback to default configs(if we can), '
'please COPY the config_default.py to config.py, and change it\'s content, '
'or use the configs in the more_configs folder\n'
'自定义配置文件 config.py 丢失或存在错误, 将使用默认设置, 请将 config_default.py 复制一份为 config.py, '
'并根据自己的需求修改里面的设置'
'(或者使用 more_configs 中的配置文件)'
)
raise # v0.23.1+ 当config文件存在错误或不存在时, 程序会终止运行
else:
infoprint('config file found')
if local_cache_enable:
try:
from cache_system import FileCache, get_expire_from_mime
cache = FileCache()
except Exception as e:
traceback.print_exc()
errprint('Can Not Create Local File Cache: ', e, ' local file cache is disabled automatically.')
local_cache_enable = False
else:
infoprint('Local file cache enabled')
# ########## Basic Init #############
# 开始从配置文件加载配置, 在读代码时可以先跳过这部分, 从 main_function() 开始看
ColorfulPyPrint_set_verbose_level(verbose_level)
my_host_name_no_port = my_host_name # 不带有端口号的本机域名
if my_host_port is not None:
my_host_name += ':' + str(my_host_port) # 带有端口号的本机域名, 如果为标准端口则不带显式端口号
my_host_name_urlencoded = quote_plus(my_host_name) # url编码后的
else:
my_host_name_urlencoded = my_host_name
static_file_extensions_list = set(static_file_extensions_list)
external_domains_set = set(external_domains or [])
allowed_domains_set = external_domains_set.copy()
allowed_domains_set.add(target_domain)
for _domain in external_domains: # for support domain with port
allowed_domains_set.add(urlsplit('http://' + _domain).hostname)
domain_alias_to_target_set = set() # 那些被视为主域名的域名, 如 www.google.com和google.com可以都被视为主域名
domain_alias_to_target_set.add(target_domain)
domains_alias_to_target_domain = list(domains_alias_to_target_domain)
if domains_alias_to_target_domain:
for _domain in domains_alias_to_target_domain:
allowed_domains_set.add(_domain)
domain_alias_to_target_set.add(_domain)
domains_alias_to_target_domain.append(target_domain)
else:
domains_alias_to_target_domain = [target_domain]
my_host_scheme_escaped = my_host_scheme.replace('/', r'\/')
myurl_prefix = my_host_scheme + my_host_name # http(s)://www.my-mirror-site.com 末尾没有反斜线
myurl_prefix_escaped = myurl_prefix.replace('/', r'\/')
cdn_domains_number = len(CDN_domains)
allowed_remote_response_headers = {
'content-type', 'date', 'expires', 'cache-control', 'last-modified', 'server', 'location',
'accept-ranges',
'access-control-allow-origin', 'access-control-allow-headers', 'access-control-allow-methods',
'access-control-expose-headers', 'access-control-max-age', 'access-control-allow-credentials',
'timing-allow-origin',
}
allowed_remote_response_headers.update(custom_allowed_remote_headers)
# ## Get Target Domain and MyHostName's Root Domain ##
# 解析目标域名和本机域名的根域名, 如 www.foobar.com 的根域名为 foobar.com
# 但是 www.aaa.foobar.com 的根域名会被认为是 aaa.foobar.com
# 支持二级顶级域名, 如 www.white.ac.cn
temp = target_domain.split('.')
if len(temp) <= 2 or len(temp) == 3 and temp[1] in ('com', 'net', 'org', 'co', 'edu', 'mil', 'gov', 'ac'):
target_domain_root = target_domain
else:
target_domain_root = '.'.join(temp[1:])
temp = my_host_name.split('.')
if len(temp) <= 2 or len(temp) == 3 and temp[1] in ('com', 'net', 'org', 'co', 'edu', 'mil', 'gov', 'ac'):
my_host_name_root = target_domain
else:
my_host_name_root = '.'.join(temp[1:])
# keep-alive的连接池, 每个域名保持一个keep-alive连接
# 借用requests在同一session中, 自动保持keep-alive的特性
connection_pool_per_domain = {}
if enable_keep_alive_per_domain:
for _domain in allowed_domains_set:
connection_pool_per_domain[_domain] = {'session': requests.Session(),}
# 在 cdn_redirect_encode_query_str_into_url 中用于标示编码进url的分隔串
cdn_url_query_encode_salt = 'zm24'
_url_salt = re.escape(cdn_url_query_encode_salt)
# ## thread local var ##
# 与flask的request变量功能类似, 存储了一些解析后的请求信息, 在程序中会经常被调用
this_request = threading.local()
this_request.start_time = None # 处理请求开始的时间, unix
this_request.content_type = '' # 远程服务器响应头中的content_type
this_request.mime = '' # 远程服务器响应的MIME
this_request.cache_control = '' # 远程服务器响应的cache_control内容
this_request.temporary_domain_alias = None # 用于纯文本域名替换, 见 `plain_replace_domain_alias` 选项
this_request.remote_domain = '' # 当前请求对应的远程域名
this_request.is_https = '' # 是否需要用https来请求远程域名
this_request.remote_url = '' # 远程服务器的url
this_request.remote_path = '' # 对应的远程path
this_request.remote_path_query = '' # 对应的远程path+query string
this_request.remote_response = None # 远程服务器的响应, requests.Response
# task_scheduler
task_scheduler = sched.scheduler(time, sleep)
# ########## Handle dependencies #############
if not enable_static_resource_CDN:
mime_based_static_resource_CDN = False
disable_legacy_file_recognize_method = True
if not mime_based_static_resource_CDN:
cdn_redirect_code_if_cannot_hard_rewrite = 0 # record incoming urls if we should use cdn on it
url_to_use_cdn = {}
if not cdn_redirect_code_if_cannot_hard_rewrite:
cdn_redirect_encode_query_str_into_url = False
if not isinstance(target_static_domains, set):
target_static_domains = set()
if not enable_stream_content_transfer:
steamed_mime_keywords = ()
if not url_custom_redirect_enable:
url_custom_redirect_list = {}
url_custom_redirect_regex = ()
shadow_url_redirect_regex = ()
plain_replace_domain_alias = ()
if not enable_stream_content_transfer:
enable_stream_transfer_async_preload = False
if not enable_automatic_domains_whitelist:
domains_whitelist_auto_add_glob_list = tuple()
if not enable_individual_sites_isolation:
isolated_domains = set()
else:
for isolated_domain in isolated_domains:
if isolated_domain not in external_domains_set:
warnprint('An isolated domain:', isolated_domain,
'would not have effect because it did not appears in the `external_domains` list')
if enable_custom_access_cookie_generate_and_verify:
human_ip_verification_whitelist_from_cookies = False
if not is_use_proxy:
requests_proxies = None
if human_ip_verification_enabled:
import ipaddress
buff = []
for network in human_ip_verification_default_whitelist_networks:
buff.append(ipaddress.ip_network(network, strict=False))
human_ip_verification_default_whitelist_networks = tuple(buff)
for question in human_ip_verification_questions:
human_ip_verification_answers_hash_str += question[1]
else:
identity_verify_required = False
human_ip_verification_whitelist_from_cookies = False
must_verify_cookies = False
if not human_ip_verification_whitelist_from_cookies and not enable_custom_access_cookie_generate_and_verify:
must_verify_cookies = False
url_rewrite_cache = {} # an VERY Stupid and VERY Experimental Cache
url_rewrite_cache_hit_count = 0
url_rewrite_cache_miss_count = 0
# ########### PreCompile Regex ###############
# Advanced url rewriter, see function response_text_rewrite()
# #### 这个正则表达式是整个程序的最核心的部分, 它的作用是从 html/css/js 中提取出长得类似于url的东西 ####
# 如果需要阅读这个表达式, 请一定要在IDE(如PyCharm)的正则高亮下阅读
# 这个正则并不保证匹配到的东西一定是url, 在 regex_url_reassemble() 中会进行进一步验证是否是url
regex_adv_url_rewriter = re.compile( # TODO: Add non-standard port support
# 前缀, 必须有 'action='(表单) 'href='(链接) 'src=' 'url('(css) '@import'(css) '":'(js/json, "key":"value")
# \s 表示空白字符,如空格tab
r"""(?P<prefix>\b((action|href|src)\s*=|url\s*\(|@import\s*|"\s*:)\s*)""" + # prefix, eg: src=
# 左边引号, 可选 (因为url()允许没有引号). 如果是url以外的, 必须有引号且左右相等(在重写函数中判断, 写在正则里可读性太差)
r"""(?P<quote_left>["'])?""" + # quote "'
# 域名和协议头, 可选. http:// https:// // http:\/\/ (json) https:\/\/ (json) \/\/ (json)
r"""(?P<domain_and_scheme>(?P<scheme>(https?:)?\\?/\\?/)(?P<domain>([-a-z0-9]+\.)+[a-z]+(?P<port>:\d{1,5})?))?""" +
# url路径, 含参数 可选
r"""(?P<path>[^\s;+$?#'"\{}]*?""" + # full path(with query string) /foo/bar.js?love=luciaZ
# url中的扩展名, 仅在启用传统的根据扩展名匹配静态文件时打开
(r"""(\.(?P<ext>[-_a-z0-9]+?))?""" if not disable_legacy_file_recognize_method else '') + # file ext
# 查询字符串, 可选
r"""(?P<query_string>\?[^\s?#'"]*?)?)""" + # query string ?love=luciaZ
# 右引号(可以是右括弧), 必须
r"""(?P<quote_right>["'\)])(?P<right_suffix>\W)""", # right quote "'
flags=re.IGNORECASE
)
regex_extract_base64_from_embedded_url = re.compile(
r'_' + _url_salt + r'(?P<gzip>z?)_\.(?P<b64>[a-zA-Z0-9-_]+=*)\._' + _url_salt + r'_\.[a-zA-Z\d]+\b')
# Response Cookies Rewriter, see response_cookie_rewrite()
regex_cookie_rewriter = re.compile(r'\bdomain=(\.?([\w-]+\.)+\w+)\b', flags=re.IGNORECASE)
regex_cookie_path_rewriter = re.compile(r'(?P<prefix>[pP]ath)=(?P<path>[\w\._/-]+?;)')
# Request Domains Rewriter, see client_requests_text_rewrite()
if my_host_port is not None:
temp = r'(' + re.escape(my_host_name) + r'|' + re.escape(my_host_name_no_port) + r')'
else:
temp = re.escape(my_host_name)
regex_request_rewriter = re.compile(
temp + r'(/|(%2F))extdomains(/|(%2F))(https-)?(?P<origin_domain>\.?([\w-]+\.)+\w+)\b',
flags=re.IGNORECASE)
# Flask main app
app = Flask(__name__)
# ########## Begin Utils #############
def cache_clean(is_force_flush=False):
"""
清理程序运行中产生的垃圾, 在程序运行期间会被自动定期调用
包括各种重写缓存, 文件缓存等
默认仅清理过期的
:param is_force_flush: 是否无视有效期, 清理所有缓存
"""
global url_rewrite_cache, cache, url_to_use_cdn, connection_pool_per_domain
if len(url_rewrite_cache) > 16384:
url_rewrite_cache.clear()
if len(url_to_use_cdn) > 40960:
url_to_use_cdn.clear()
if enable_keep_alive_per_domain:
connection_pool_per_domain.clear()
try:
if local_cache_enable:
cache.check_all_expire(force_flush_all=is_force_flush)
except:
errprint('ErrorWhenCleaningLocalCache, is_force_flush=', is_force_flush)
traceback.print_exc()
if is_force_flush:
try:
is_domain_match_glob_whitelist.cache_clear()
is_content_type_streamed.cache_clear()
extract_real_url_from_embedded_url.cache_clear()
embed_real_url_to_embedded_url.cache_clear()
check_global_ua_pass.cache_clear()
is_mime_represents_text.cache_clear()
extract_mime_from_content_type.cache_clear()
is_content_type_using_cdn.cache_clear()
is_ua_in_whitelist.cache_clear()
verify_ip_hash_cookie.cache_clear()
is_denied_because_of_spider.cache_clear()
is_ip_not_in_allow_range.cache_clear()
# client_requests_text_rewrite.cache_clear()
# extract_url_path_and_query.cache_clear()
except:
errprint('ErrorWhenCleaningFunctionLruCache')
traceback.print_exc()
def cron_task_container(task_dict, add_task_only=False):
"""
定时任务容器. 调用目标函数, 并在运行结束后创建下一次定时
:param task_dict: 定时任务的相关参数, dict
{ "target":目标函数(可调用的函数对象,不是函数名字符串) 必须,
"iterval":任务延时(秒) 可选,
"priority":优先级 可选,
"name":定时任务别名 可选
"args":位置型参数 (arg1,arg2) 可选,
"kwargs":键值型参数 {key:value,} 可选,
}
:param add_task_only: 是否只添加定时任务而不执行
"""
global task_scheduler
if not add_task_only:
# 执行任务
try:
infoprint('CronTask:', task_dict.get('name', str(task_dict['target'])), 'Target:', str(task_dict['target']))
target_func = task_dict.get('target')
if target_func is None:
raise ValueError("target is not given in " + str(task_dict))
target_func(
*(task_dict.get('args', ())), # 解开参数以后传递
**(task_dict.get('kwargs', {}))
)
except:
errprint('ErrorWhenProcessingCronTasks', task_dict)
traceback.print_exc()
# 添加下一次定时任务
task_scheduler.enter(
task_dict.get('interval', 300),
task_dict.get('priority', 999),
cron_task_container,
(task_dict,)
)
def cron_task_host():
"""定时任务宿主, 每分钟检查一次列表, 运行时间到了的定时任务"""
while True:
sleep(60)
try:
task_scheduler.run()
except:
errprint('ErrorDuringExecutingCronTasks')
traceback.print_exc()
# noinspection PyShadowingNames
def calc_domain_replace_prefix(_domain):
"""生成各种形式的scheme变体"""
return dict(
# normal
slash='//' + _domain,
http='http://' + _domain,
https='https://' + _domain,
double_quoted='"%s"' % _domain,
single_quoted="'%s'" % _domain,
# hex
hex_lower=('//' + _domain).replace('/', r'\x2f'),
hex_upper=('//' + _domain).replace('/', r'\x2F'),
# escape slash
slash_esc=('//' + _domain).replace('/', r'\/'),
http_esc=('http://' + _domain).replace('/', r'\/'),
https_esc=('https://' + _domain).replace('/', r'\/'),
double_quoted_esc='\\"%s\\"' % _domain,
single_quoted_esc="\\'%s\\'" % _domain,
# double escape slash
slash_double_esc=('//' + _domain).replace('/', r'\\\/'),
http_double_esc=('http://' + _domain).replace('/', r'\\\/'),
https_double_esc=('https://' + _domain).replace('/', r'\\\/'),
# urlencoded
slash_ue=quote_plus('//' + _domain),
http_ue=quote_plus('http://' + _domain),
https_ue=quote_plus('https://' + _domain),
double_quoted_ue=quote_plus('"%s"' % _domain),
single_quoted_ue=quote_plus("'%s'" % _domain),
# escaped and urlencoded
slash_esc_ue=quote_plus(('//' + _domain).replace('/', r'\/')),
http_esc_ue=quote_plus(('http://' + _domain).replace('/', r'\/')),
https_esc_ue=quote_plus(('https://' + _domain).replace('/', r'\/')),
)
def add_temporary_domain_alias(source_domain, replaced_to_domain):
"""
添加临时域名替换列表
用于纯文本域名替换, 见 `plain_replace_domain_alias` 选项
:param source_domain: 被替换的域名
:param replaced_to_domain: 替换成这个域名
"""
if this_request.temporary_domain_alias is None:
this_request.temporary_domain_alias = []
else:
this_request.temporary_domain_alias = list(this_request.temporary_domain_alias)
this_request.temporary_domain_alias.append((source_domain, replaced_to_domain))
this_request.temporary_domain_alias = tuple(this_request.temporary_domain_alias)
dbgprint('A domain', source_domain, 'to', replaced_to_domain, 'added to temporary_domain_alias',
this_request.temporary_domain_alias)
@lru_cache(maxsize=1024)
def is_domain_match_glob_whitelist(domain):
"""
域名是否匹配 `domains_whitelist_auto_add_glob_list` 中设置的通配符
"""
for domain_glob in domains_whitelist_auto_add_glob_list:
if fnmatch(domain, domain_glob):
return True
return False
@lru_cache(maxsize=128)
def is_content_type_streamed(_content_type):
"""
根据content-type判断是否应该用stream模式传输(服务器下载的同时发送给用户)
视频/音频/图片等二进制内容默认用stream模式传输
"""
for streamed_keyword in steamed_mime_keywords:
if streamed_keyword in _content_type:
return True
return False
# noinspection PyGlobalUndefined
def try_match_and_add_domain_to_rewrite_white_list(domain, force_add=False):
"""
若域名与`domains_whitelist_auto_add_glob_list`中的通配符匹配, 则加入 external_domains 列表
被加入 external_domains 列表的域名, 会被应用重写机制
用于在程序运行过程中动态添加域名到external_domains中
也可在外部函数(custom_func.py)中使用
关于 external_domains 更详细的说明, 请看 default_config.py 中对应的文档
"""
global external_domains, external_domains_set, allowed_domains_set, prefix_buff
if domain is None or not domain:
return False
if domain in allowed_domains_set:
return True
if not force_add and not is_domain_match_glob_whitelist(domain):
return False
else:
infoprint('A domain:', domain, 'was added to external_domains list')
_buff = list(external_domains) # external_domains是tuple类型, 添加前需要先转换
_buff.append(domain)
external_domains = tuple(_buff) # 转换回tuple, tuple有一些性能优势
external_domains_set.add(domain)
allowed_domains_set.add(domain)
prefix_buff[domain] = calc_domain_replace_prefix(domain)
# write log
try:
with open('automatic_domains_whitelist.log', 'a', encoding='utf-8') as fp:
fp.write(domain + '\n')
except:
traceback.print_exc()
return True
def current_line_number():
"""Returns the current line number in our program."""
import inspect
return inspect.currentframe().f_back.f_lineno
@lru_cache(maxsize=1024)
def extract_real_url_from_embedded_url(embedded_url):
"""
将 embed_real_url_to_embedded_url() 编码后的url转换为原来的带有参数的url
`cdn_redirect_encode_query_str_into_url`设置依赖于本函数, 详细说明请看配置文件中这个参数的部分
eg: https://cdn.domain.com/a.php_zm24_.cT1zb21ldGhpbmc=._zm24_.css
---> https://foo.com/a.php?q=something (assume it returns an css) (base64 only)
eg2: https://cdn.domain.com/a/b/_zm24_.bG92ZT1saXZl._zm24_.jpg
---> https://foo.com/a/b/?love=live (assume it returns an jpg) (base64 only)
eg3: https://cdn.domain.com/a/b/_zm24z_.[some long long base64 encoded string]._zm24_.jpg
---> https://foo.com/a/b/?love=live[and a long long query string] (assume it returns an jpg) (gzip + base64)
eg4:https://cdn.domain.com/a (no change)
---> (no query string): https://foo.com/a (assume it returns an png) (no change)
:param embedded_url: embedded_url
:return: real url or None
"""
if '._' + cdn_url_query_encode_salt + '_.' not in embedded_url[-15:]: # check url mark
return None
m = regex_extract_base64_from_embedded_url.search(embedded_url)
b64 = get_group('b64', m)
if not b64:
return None
# 'https://cdn.domain.com/a.php_zm24_.cT1zb21ldGhpbmc=._zm24_.css'
# real_request_url_no_query ---> 'https://cdn.domain.com/a.php'
real_request_url_no_query = embedded_url[:m.span()[0]]
try:
query_string_byte = base64.urlsafe_b64decode(b64)
is_gzipped = get_group('gzip', m)
if is_gzipped:
query_string_byte = zlib.decompress(query_string_byte)
query_string = query_string_byte.decode(encoding='utf-8')
except:
traceback.print_exc()
return None
result = urljoin(real_request_url_no_query, '?' + query_string)
# dbgprint('extract:', embedded_url, 'to', result)
return result
@lru_cache(maxsize=1024)
def embed_real_url_to_embedded_url(real_url_raw, url_mime, escape_slash=False):
"""
将url的参数(?q=some&foo=bar)编码到url路径中, 并在url末添加一个文件扩展名
在某些对url参数支持不好的CDN中, 可以减少错误
`cdn_redirect_encode_query_str_into_url`设置依赖于本函数, 详细说明可以看配置文件中的对应部分
解码由 extract_real_url_from_embedded_url() 函数进行, 对应的例子也请看这个函数
"""
# dbgprint(real_url_raw, url_mime, escape_slash)
if escape_slash:
real_url = real_url_raw.replace(r'\/', '/')
else:
real_url = real_url_raw
url_sp = urlsplit(real_url)
if not url_sp.query: # no query, needn't rewrite
return real_url_raw
try:
byte_query = url_sp.query.encode()
if len(byte_query) > 128: # 当查询参数太长时, 进行gzip压缩
gzip_label = 'z' # 进行压缩后的参数, 会在标识区中添加一个z
byte_query = zlib.compress(byte_query)
else:
gzip_label = ''
b64_query = base64.urlsafe_b64encode(byte_query).decode()
# dbgprint(url_mime)
mixed_path = url_sp.path + '_' + _url_salt + gzip_label + '_.' \
+ b64_query \
+ '._' + _url_salt + '_.' + mime_to_use_cdn[url_mime]
result = urlunsplit((url_sp.scheme, url_sp.netloc, mixed_path, '', ''))
except:
traceback.print_exc()
return real_url_raw
else:
if escape_slash:
result = result.replace('/', r'\/')
# dbgprint('embed:', real_url_raw, 'to:', result)
return result
def decode_mirror_url(mirror_url=None):
"""
解析镜像url(可能含有extdomains), 并提取出原始url信息
可以不是完整的url, 只需要有 path 部分即可(query_string也可以有)
若参数留空, 则使用当前用户正在请求的url
支持json (处理 \/ 和 \. 的转义)
:param mirror_url:
:return: dict(domain, is_https, path, path_query)
:rtype: {'domain':str, 'is_https':bool, 'path':str, 'path_query':str}
"""
_is_escaped_dot = False
_is_escaped_slash = False
result = {}
if mirror_url is None:
input_path_query = extract_url_path_and_query()
else:
if r'\/' in mirror_url: # 如果 \/ 在url中, 先反转义, 处理完后再转义回来
_is_escaped_slash = True
mirror_url = mirror_url.replace(r'\/', '/')
if r'\.' in mirror_url: # 如果 \. 在url中, 先反转义, 处理完后再转义回来
_is_escaped_dot = True
mirror_url = mirror_url.replace(r'\.', '.')
input_path_query = extract_url_path_and_query(mirror_url)
if input_path_query[:12] == '/extdomains/':
# 12 == len('/extdomains/')
domain_end_pos = input_path_query.find('/', 12)
real_domain = input_path_query[12:domain_end_pos]
real_path_query = input_path_query[domain_end_pos:]
if real_domain[:6] == 'https-':
real_domain = real_domain[6:]
_is_https = True
else:
_is_https = False
real_path_query = client_requests_text_rewrite(real_path_query)
if _is_escaped_dot: real_path_query = real_path_query.replace('.', r'\.')
if _is_escaped_slash: real_path_query = real_path_query.replace('/', r'\/')
result['domain'] = real_domain
result['is_https'] = _is_https
result['path_query'] = real_path_query
result['path'] = urlsplit(result['path_query']).path
return result
input_path_query = client_requests_text_rewrite(input_path_query)
if _is_escaped_dot: input_path_query = input_path_query.replace('.', r'\.')
if _is_escaped_slash: input_path_query = input_path_query.replace('/', r'\/')
result['domain'] = target_domain
result['is_https'] = (target_scheme == 'https://')
result['path_query'] = input_path_query
result['path'] = urlsplit(result['path_query']).path
return result
# 函数别名, 为了兼容早期版本的配置文件
extract_from_url_may_have_extdomains = decode_mirror_url
# noinspection PyShadowingNames
def encode_mirror_url(raw_url_or_path, remote_domain=None, is_scheme=None, is_escape=False):
"""convert url from remote to mirror url"""
if is_escape:
_raw_url_or_path = raw_url_or_path.replace('r\/', r'/')
else:
_raw_url_or_path = raw_url_or_path
sp = urlsplit(_raw_url_or_path)
if '/extdomains/' == sp.path[:12]:
return raw_url_or_path
domain = remote_domain or sp.netloc or this_request.remote_domain or target_domain
if domain not in allowed_domains_set:
return raw_url_or_path
if is_scheme or ((sp.scheme or _raw_url_or_path[:2] == '//') and is_scheme is not False):
our_prefix = myurl_prefix
else:
our_prefix = ''
if domain not in domain_alias_to_target_set:
remote_scheme = get_ext_domain_inurl_scheme_prefix(domain)
middle_part = '/extdomains/' + remote_scheme + domain
else:
middle_part = ''
result = urljoin(our_prefix + middle_part + '/',
extract_url_path_and_query(_raw_url_or_path).lstrip('/'))
if is_escape:
result = result.replace('/', r'\/')
return response_text_rewrite(result)
# 函数别名, 为了兼容早期版本的配置文件
convert_to_mirror_url = encode_mirror_url
def get_ext_domain_inurl_scheme_prefix(ext_domain, force_https=None):
"""根据域名返回其在镜像url中的https中缀(或没有)"""
if force_https is not None:
if force_https:
return 'https-'
else:
return ''
if force_https_domains == 'NONE':
return ''
if force_https_domains == 'ALL':
return 'https-'
if ext_domain in force_https_domains:
return 'https-'
else:
return ''
def add_ssrf_allowed_domain(domain):
"""添加域名到ssrf白名单, 不支持通配符"""
global allowed_domains_set
allowed_domains_set.add(domain)
# noinspection PyGlobalUndefined
def set_request_for_debug(dummy_request):
global request
request = dummy_request
def strx(*args, sep=' '):
output = ''
for arg in args:
output += str(arg) + sep
output.rstrip(sep)
return output
@lru_cache(maxsize=1024)
def check_global_ua_pass(ua_str):
"""该user-agent是否满足全局白名单"""
if ua_str is None or not global_ua_white_name:
return False
ua_str = ua_str.lower()
if global_ua_white_name in ua_str:
return True
else:
return False
@lru_cache(maxsize=128)
def is_mime_represents_text(input_mime):
"""
Determine whether an mime is text (eg: text/html: True, image/png: False)
:param input_mime: str
:return: bool
"""
input_mime_l = input_mime.lower()
for text_word in text_like_mime_keywords:
if text_word in input_mime_l:
return True
return False
@lru_cache(maxsize=128)
def extract_mime_from_content_type(_content_type):
"""从content-type中提取出mime, 如 'text/html; encoding=utf-8' --> 'text/html' """
c = _content_type.find(';')
if c == -1:
return _content_type
else:
return _content_type[:c]
@lru_cache(maxsize=128)
def is_content_type_using_cdn(_content_type):
"""根据content-type确定该资源是否使用CDN"""
_mime = extract_mime_from_content_type(_content_type)
if _mime in mime_to_use_cdn:
# dbgprint(content_type, 'Should Use CDN')
return _mime
else:
# dbgprint(content_type, 'Should NOT CDN')
return False
def generate_simple_resp_page(errormsg=b'We Got An Unknown Error', error_code=500):
return make_response(errormsg, error_code)
def generate_html_redirect_page(target_url, msg='', delay_sec=1):
"""生成一个HTML重定向页面
某些浏览器在301/302页面不接受cookies, 所以需要用html重定向页面来传cookie"""
resp_content = r"""<!doctype html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<title>重定向 (Page Redirect)</title>
<meta http-equiv="refresh" content="%d; url=%s">
<script>setTimeout(function(){location.href="%s"} , %d000);</script>
</head>
<body>
<pre>%s</pre>
<hr />
You are now redirecting to <a href="%s">%s</a>, if it didn't redirect automatically, please click that link.
</body>
</html>""" % (
delay_sec, html_escape(target_url), html_escape(target_url), delay_sec + 1,
html_escape(msg), html_escape(target_url), html_escape(target_url)
)
resp_content = resp_content.encode('utf-8')
return Response(response=resp_content)
def generate_304_response(_content_type=None):
r = Response(content_type=_content_type, status=304)
r.headers.add('X-Cache', 'FileHit-304')
return r
def generate_ip_verify_hash(input_dict):
"""
生成一个标示用户身份的hash
在 human_ip_verification 功能中使用
hash一共14位
hash(前7位+salt) = 后7位 以此来进行验证
"""
strbuff = human_ip_verification_answers_hash_str
for key in input_dict:
strbuff += key + input_dict[key] + str(random.randint(0, 9000000))
input_key_hash = hex(zlib.adler32(strbuff.encode(encoding='utf-8')))[2:]
while len(input_key_hash) < 7:
input_key_hash += '0'
output_hash = hex(zlib.adler32((input_key_hash + human_ip_verification_answers_hash_str).encode(encoding='utf-8')))[2:]
while len(output_hash) < 7:
output_hash += '0'
return input_key_hash + output_hash
@lru_cache(maxsize=1024)
def verify_ip_hash_cookie(hash_cookie_value):
"""
根据cookie中的hash判断是否允许用户访问
在 human_ip_verification 功能中使用
hash一共14位
hash(前7位+salt) = 后7位 以此来进行验证
:type hash_cookie_value: str
"""
try:
input_key_hash = hash_cookie_value[:8]
output_hash = hash_cookie_value[8:]
calculated_hash = hex(zlib.adler32(
(input_key_hash + human_ip_verification_answers_hash_str).encode(encoding='utf-8')
))[2:]
if output_hash == calculated_hash:
return True
else:
return False
except:
return False
def update_content_in_local_cache(url, content, method='GET'):
"""更新 local_cache 中缓存的资源, 追加content
在stream模式中使用"""
if local_cache_enable and method == 'GET' and cache.is_cached(url):
info_dict = cache.get_info(url)
resp = cache.get_obj(url)
resp.set_data(content)
# 当存储的资源没有完整的content时, without_content 被设置为true
# 此时该缓存不会生效, 只有当content被添加后, 缓存才会实际生效
# 在stream模式中, 因为是先接收http头, 然后再接收内容, 所以会出现只有头而没有内容的情况
# 此时程序会先将只有头部的响应添加到本地缓存, 在内容实际接收完成后再追加内容
info_dict['without_content'] = False
if verbose_level >= 4: dbgprint('LocalCache_UpdateCache', url, content[:30], len(content))
cache.put_obj(
url,
resp,
obj_size=len(content),
expires=get_expire_from_mime(this_request.mime),
last_modified=info_dict.get('last_modified'),
info_dict=info_dict,
)
def put_response_to_local_cache(url, _our_resp, without_content=False):
"""
put our response object(headers included) to local cache
:param without_content: for stream mode use
:param url: client request url
:param _our_resp: our response(flask response object) to client, would be storge
:return: None
"""
# Only cache GET method, and only when remote returns 200(OK) status
if local_cache_enable and request.method == 'GET' and this_request.remote_response.status_code == 200:
if without_content:
our_resp = copy.copy(_our_resp)
our_resp.response = None # delete iterator
else:
our_resp = _our_resp
# the header's character cases are different in flask/apache(win)/apache(linux)
last_modified = this_request.remote_response.headers.get('last-modified', None) \
or this_request.remote_response.headers.get('Last-Modified', None)
dbgprint('PuttingCache:', url)
cache.put_obj(
url,
our_resp,
expires=get_expire_from_mime(this_request.mime),
obj_size=0 if without_content else len(this_request.remote_response.content),
last_modified=last_modified,
info_dict={'without_content': without_content,
'last_modified': last_modified,
},
)
def try_get_cached_response(url, client_header=None):
"""
尝试从本地缓存中取出响应
:param url: real url with query string
:type client_header: dict
"""
# Only use cache when client use GET
if local_cache_enable and request.method == 'GET' and cache.is_cached(url):
if client_header is not None and 'if-modified-since' in client_header and \
cache.is_unchanged(url, client_header.get('if-modified-since', None)):
dbgprint('FileCacheHit-304', url)
return generate_304_response()
else:
cached_info = cache.get_info(url)
if cached_info.get('without_content', False):
# 关于 without_content 的解释, 请看update_content_in_local_cache()函数
return None
# dbgprint('FileCacheHit-200')
resp = cache.get_obj(url)
assert isinstance(resp, Response)
resp.headers.set('x-zmirror-cache', 'FileHit')
return resp
else:
return None
def get_group(name, match_obj):
"""return a blank string if the match group is None"""
try:
obj = match_obj.group(name)
except:
return ''
else:
if obj is not None:
return obj
else:
return ''
def regex_url_reassemble(match_obj):
"""
Reassemble url parts split by the regex.
:param match_obj: match object of stdlib re
:return: re assembled url string (included prefix(url= etc..) and suffix.)
"""
if match_obj.group() in url_rewrite_cache: # Read Cache
global url_rewrite_cache_hit_count
url_rewrite_cache_hit_count += 1
return url_rewrite_cache[match_obj.group()]
else:
global url_rewrite_cache_miss_count
prefix = get_group('prefix', match_obj)
quote_left = get_group('quote_left', match_obj)
quote_right = get_group('quote_right', match_obj)
path = get_group('path', match_obj)
match_domain = get_group('domain', match_obj)
scheme = get_group('scheme', match_obj)
whole_match_string = match_obj.group()
# dbgprint('prefix', prefix, 'quote_left', quote_left, 'quote_right', quote_right,
# 'path', path, 'match_domain', match_domain, 'scheme', scheme, 'whole', whole_match_string)
if r"\/" in path or r"\/" in scheme:
require_slash_escape = True
path = path.replace(r"\/", "/")
# domain_and_scheme = domain_and_scheme.replace(r"\/", "/")
else:
require_slash_escape = False
# path must be not blank
if (not path # path is blank
# only url(something) and @import are allowed to be unquoted
or ('url' not in prefix and 'import' not in prefix) and (not quote_left or quote_right == ')')
# for "key":"value" type replace, we must have at least one '/' in url path (for the value to be regard as url)
or (':' in prefix and '/' not in path)
# if we have quote_left, it must equals to the right
or (quote_left and quote_left != quote_right)
# in javascript, those 'path' contains one or only two slash, should not be rewrited (for potential error)
# or (this_request.mime == 'application/javascript' and path.count('/') < 2)
# in javascript, we only rewrite those with explicit scheme ones.
# v0.21.10+ in "key":"value" format, we should ignore those path without scheme
or (not scheme and ('javascript' in this_request.mime or '"' in prefix))
):
# dbgprint('returned_un_touch', whole_match_string)
return whole_match_string
# v0.19.0+ Automatic Domains Whitelist (Experimental)
if enable_automatic_domains_whitelist:
try_match_and_add_domain_to_rewrite_white_list(match_domain)
# dbgprint('remote_path:', remote_path, 'remote_domain:', remote_domain, 'match_domain', match_domain, v=5)
# dbgprint(match_obj.groups(), v=5)
# dbgprint('remote_path:', remote_path, 'remote_domain:', remote_domain, 'match_domain', match_domain, v=5)
domain = match_domain or this_request.remote_domain
# dbgprint('rewrite match_obj:', match_obj, 'domain:', domain, v=5)
# skip if the domain are not in our proxy list
if domain not in allowed_domains_set:
# dbgprint('return untouched because domain not match', domain, whole_match_string)
return match_obj.group() # return raw, do not change
# this resource's absolute url path to the domain root.
# dbgprint('match path', path, v=5)
path = urljoin(this_request.remote_path, path)
# dbgprint('middle path', path, v=5)
if ':' not in this_request.remote_domain: # the python's builtin urljoin has a bug, cannot join domain with port correctly
url_no_scheme = urljoin(domain + '/', path.lstrip('/'))
else:
url_no_scheme = domain + '/' + path.lstrip('/')
# dbgprint('url_no_scheme', url_no_scheme)
# add extdomains prefix in path if need
if domain in external_domains_set:
scheme_prefix = get_ext_domain_inurl_scheme_prefix(domain)
path = '/extdomains/' + scheme_prefix + url_no_scheme
# dbgprint('final_path', path, v=5)
if mime_based_static_resource_CDN and url_no_scheme in url_to_use_cdn:
# dbgprint('We Know:', url_no_scheme,v=5)
_we_knew_this_url = True
_this_url_mime_cdn = url_to_use_cdn[url_no_scheme][0]
else:
# dbgprint('We Don\'t know:', url_no_scheme,v=5)
_we_knew_this_url = False
_this_url_mime_cdn = False
# Apply CDN domain
if _this_url_mime_cdn \
or (not disable_legacy_file_recognize_method and get_group('ext', match_obj) in static_file_extensions_list):
# pick an cdn domain due to the length of url path
# an advantage of choose like this (not randomly), is this can make higher CDN cache hit rate.
# CDN rewrite, rewrite static resources to cdn domains.
# A lot of cases included, the followings are just the most typical examples.
# http(s)://target.com/img/love_lucia.jpg --> http(s)://your.cdn.domains.com/img/love_lucia.jpg
# http://external.com/css/main.css --> http(s)://your.cdn.domains.com/extdomains/external.com/css/main.css
# https://external.pw/css/main.css --> http(s)://your.cdn.domains.com/extdomains/https-external.pw/css/main.css
replace_to_scheme_domain = my_host_scheme + CDN_domains[zlib.adler32(path.encode()) % cdn_domains_number]
# else: # this_request.mime == 'application/javascript':
# replace_to_scheme_domain = '' # Do not use explicit url prefix in js, to prevent potential error
elif not scheme:
replace_to_scheme_domain = ''
else:
replace_to_scheme_domain = myurl_prefix
reassembled_url = urljoin(replace_to_scheme_domain, path)
if _this_url_mime_cdn and cdn_redirect_encode_query_str_into_url:
reassembled_url = embed_real_url_to_embedded_url(
reassembled_url,
url_mime=url_to_use_cdn[url_no_scheme][1],
escape_slash=require_slash_escape
)
if require_slash_escape:
reassembled_url = reassembled_url.replace("/", r"\/")
# reassemble!
# prefix: src= quote_left: "
# path: /extdomains/target.com/foo/bar.js?love=luciaZ
reassembled = prefix + quote_left + reassembled_url + quote_right + get_group('right_suffix', match_obj)
# write the adv rewrite cache only if we disable CDN or we known whether this url is CDN-able
if not mime_based_static_resource_CDN or _we_knew_this_url:
url_rewrite_cache[match_obj.group()] = reassembled # write cache
url_rewrite_cache_miss_count += 1
# dbgprint('---------------------', v=5)
return reassembled
@lru_cache(maxsize=256)
def is_ua_in_whitelist(ua_str):
"""
当机器人或蜘蛛的请求被ban时, 检查它是否处在允许的白名单内
被 is_denied_because_of_spider() 调用
:type ua_str: str
"""
ua_str = ua_str.lower()
if global_ua_white_name in ua_str:
return True
for allowed_ua in spider_ua_white_list:
if allowed_ua in ua_str:
return True
return False
@lru_cache(maxsize=256)
def is_denied_because_of_spider(ua_str):
"""检查user-agent是否因为是蜘蛛或机器人而需要ban掉"""
ua_str = ua_str.lower()
if 'spider' in ua_str or 'bot' in ua_str:
if is_ua_in_whitelist(ua_str):
infoprint("A Spider/Bot's access was granted", ua_str)
return False
infoprint('A Spider/Bot was denied, UA is:', ua_str)
return True
else:
return False
def load_ip_whitelist_file():
"""从文件加载ip白名单"""
set_buff = set()
if os.path.exists(human_ip_verification_whitelist_file_path):
with open(human_ip_verification_whitelist_file_path, 'r', encoding='utf-8') as fp:
set_buff.add(fp.readline().strip())
return set_buff
def append_ip_whitelist_file(ip_to_allow):
"""写入ip白名单到文件"""
try:
with open(human_ip_verification_whitelist_file_path, 'a', encoding='utf-8') as fp:
fp.write(ip_to_allow + '\n')
except:
errprint('Unable to write whitelist file')
traceback.print_exc()
def ip_whitelist_add(ip_to_allow, info_record_dict=None):
"""添加ip到白名单, 并写入文件"""
if ip_to_allow in single_ip_allowed_set:
return
dbgprint('ip white added', ip_to_allow, 'info:', info_record_dict)
single_ip_allowed_set.add(ip_to_allow)
is_ip_not_in_allow_range.cache_clear()
append_ip_whitelist_file(ip_to_allow)
# dbgprint(single_ip_allowed_set)
try:
with open(human_ip_verification_whitelist_log, 'a', encoding='utf-8') as fp:
fp.write(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + " " + ip_to_allow
+ " " + str(request.user_agent)
+ " " + repr(info_record_dict) + "\n")
except:
errprint('Unable to write log file', os.path.abspath(human_ip_verification_whitelist_log))
traceback.print_exc()
@lru_cache(maxsize=256)
def is_ip_not_in_allow_range(ip_address):
"""判断ip是否在白名单中"""
if ip_address in single_ip_allowed_set:
return False
ip_address_obj = ipaddress.ip_address(ip_address)
for allowed_network in human_ip_verification_default_whitelist_networks:
if ip_address_obj in allowed_network:
return False
return True
# ########## End utils ###############
# ################# Begin Server Response Handler #################
def preload_streamed_response_content_async(requests_response_obj, buffer_queue):
"""
stream模式下, 预读远程响应的content
:param requests_response_obj:
:type buffer_queue: queue.Queue
"""
for particle_content in requests_response_obj.iter_content(stream_transfer_buffer_size):
try:
buffer_queue.put(particle_content, timeout=10)
except queue.Full:
traceback.print_exc()
buffer_queue = None # 这样把它free掉, 会不会减少内存泄露? 我也不知道 (Ap)
exit()
if verbose_level >= 3: dbgprint('BufferSize', buffer_queue.qsize())
buffer_queue.put(None, timeout=10)
exit()
def iter_streamed_response_async():
"""异步, 一边读取远程响应, 一边发送给用户"""
total_size = 0
_start_time = time()
_content_buffer = b''
_disable_cache_temporary = False
buffer_queue = queue.Queue(maxsize=stream_transfer_async_preload_max_packages_size)
t = threading.Thread(
target=preload_streamed_response_content_async,
args=(this_request.remote_response, buffer_queue),
daemon=True,
)
t.start()
while True:
try:
particle_content = buffer_queue.get(timeout=15)
except queue.Empty:
warnprint('WeGotAnSteamTimeout')
traceback.print_exc()
try:
# noinspection PyProtectedMember
t._stop()
except:
pass
return
buffer_queue.task_done()
if particle_content is not None:
# 由于stream的特性, content会被消耗掉, 所以需要额外储存起来
if local_cache_enable and not _disable_cache_temporary:
if len(_content_buffer) > 8 * 1024 * 1024: # 8MB
_disable_cache_temporary = True
_content_buffer = None
else:
_content_buffer += particle_content
yield particle_content
else:
if local_cache_enable and not _disable_cache_temporary:
update_content_in_local_cache(this_request.remote_url, _content_buffer,
method=this_request.remote_response.request.method)
return
if verbose_level >= 4:
total_size += len(particle_content)
dbgprint('total_size:', total_size, 'total_speed(KB/s):', total_size / 1024 / (time() - _start_time))
def iter_streamed_response():
"""非异步, 读取一小部分远程响应, 发送给用户, 再读取下一小部分. 已不推荐使用"""
total_size = 0
_start_time = time()
_content_buffer = b''
_disable_cache_temporary = False
for particle_content in this_request.remote_response.iter_content(stream_transfer_buffer_size):
if verbose_level >= 4:
total_size += len(particle_content)
dbgprint('total_size:', total_size, 'total_speed(KB/s):', total_size / 1024 / (time() - _start_time))
if particle_content is not None:
# 由于stream的特性, content会被消耗掉, 所以需要额外储存起来
if local_cache_enable and not _disable_cache_temporary:
if len(_content_buffer) > 8 * 1024 * 1024: # 8MB
_disable_cache_temporary = True
_content_buffer = None
else:
_content_buffer += particle_content
yield particle_content
if local_cache_enable and not _disable_cache_temporary:
update_content_in_local_cache(this_request.remote_url, _content_buffer,
method=this_request.remote_response.request.method)
def copy_response(content=None, is_streamed=False):
"""
Copy and parse remote server's response headers, generate our flask response object
:type is_streamed: bool
:param content: pre-rewrited response content, bytes
:return: flask response object
"""
if content is None:
if is_streamed:
req_time_body = 0
if not enable_stream_transfer_async_preload:
dbgprint('TransferUsingStreamMode(basic):', this_request.remote_response.url, this_request.mime)
content = iter_streamed_response()
else:
dbgprint('TransferUsingStreamMode(async):', this_request.remote_response.url, this_request.mime)
content = iter_streamed_response_async()
else:
content, req_time_body = response_content_rewrite()
else:
req_time_body = 0
if verbose_level >= 3: dbgprint('RemoteRespHeaders', this_request.remote_response.headers)
resp = Response(content, status=this_request.remote_response.status_code)
for header_key in this_request.remote_response.headers:
header_key_lower = header_key.lower()
# Add necessary response headers from the origin site, drop other headers
if header_key_lower in allowed_remote_response_headers:
if header_key_lower == 'location':
_location = this_request.remote_response.headers[header_key]
# try to apply custom rewrite function
try:
if custom_text_rewriter_enable:
_loc_rewrite = custom_response_text_rewriter(_location, 'mwm/headers-location', this_request.remote_url)
if isinstance(_loc_rewrite, str):
_location = _loc_rewrite
except Exception as _e: # just print err and fallback to normal rewrite
errprint('(LCOATION) Custom Rewrite Function ERROR', _e)
traceback.print_exc()
resp.headers[header_key] = encode_mirror_url(_location)
elif header_key_lower == 'content-type':
# force add utf-8 to content-type if it is text
if is_mime_represents_text(this_request.mime) and 'utf-8' not in this_request.content_type:
resp.headers[header_key] = this_request.mime + '; charset=utf-8'
else:
resp.headers[header_key] = this_request.remote_response.headers[header_key]
elif header_key_lower in ('access-control-allow-origin', 'timing-allow-origin'):
if custom_allowed_origin is None:
resp.headers[header_key] = myurl_prefix
elif custom_allowed_origin == '_*_':
_origin = request.headers.get('origin') or request.headers.get('Origin') or myurl_prefix
resp.headers[header_key] = _origin
else:
resp.headers[header_key] = custom_allowed_origin
else:
resp.headers[header_key] = this_request.remote_response.headers[header_key]
# If we have the Set-Cookie header, we should extract the raw ones
# and then change the cookie domain to our domain
if header_key_lower == 'set-cookie':
for cookie_string in response_cookies_deep_copy():
try:
resp.headers.add('Set-Cookie', response_cookie_rewrite(cookie_string))
except:
traceback.print_exc()
if verbose_level >= 3: dbgprint('OurRespHeaders:\n', resp.headers)
return resp, req_time_body
# noinspection PyProtectedMember
def response_cookies_deep_copy():
"""
It's a BAD hack to get RAW cookies headers, but so far, we don't have better way.
We'd go DEEP inside the urllib's private method to get raw headers
raw_headers example:
[('Cache-Control', 'private'),
('Content-Length', '48234'),
('Content-Type', 'text/html; Charset=utf-8'),
('Server', 'Microsoft-IIS/8.5'),
('Set-Cookie','BoardList=BoardID=Show; expires=Mon, 02-May-2016 16:00:00 GMT; path=/'),
('Set-Cookie','aspsky=abcefgh; expires=Sun, 24-Apr-2016 16:00:00 GMT; path=/; HttpOnly'),
('Set-Cookie', 'ASPSESSIONIDSCSSDSSQ=OGKMLAHDHBFDJCDMGBOAGOMJ; path=/'),
('X-Powered-By', 'ASP.NET'),
('Date', 'Tue, 26 Apr 2016 12:32:40 GMT')]
"""
raw_headers = this_request.remote_response.raw._original_response.headers._headers
header_cookies_string_list = []
for name, value in raw_headers:
if name.lower() == 'set-cookie':
if my_host_scheme == 'http://':
value = value.replace('Secure;', '')
value = value.replace(';Secure', ';')
value = value.replace('; Secure', ';')
if 'httponly' in value.lower():
if enable_aggressive_cookies_path_rewrite:
# 暴力cookie path重写, 把所有path都重写为 /
value = regex_cookie_path_rewriter.sub('path=/;', value)
elif enable_aggressive_cookies_path_rewrite is not None:
# 重写HttpOnly Cookies的path到当前url下
# eg(/extdomains/https-a.foobar.com): path=/verify; -> path=/extdomains/https-a.foobar.com/verify
if this_request.remote_domain not in domain_alias_to_target_set: # do not rewrite main domains
_scheme_prefix = get_ext_domain_inurl_scheme_prefix(this_request.remote_domain,
force_https=this_request.is_https)
value = regex_cookie_path_rewriter.sub(
'\g<prefix>=/extdomains/' + _scheme_prefix + this_request.remote_domain + '\g<path>', value)
header_cookies_string_list.append(value)
return header_cookies_string_list
def response_content_rewrite():
"""
Rewrite requests response's content's url. Auto skip binary (based on MIME).
:return: (bytes, float)
"""
_start_time = time()
_content = this_request.remote_response.content
req_time_body = time() - _start_time
if this_request.mime and is_mime_represents_text(this_request.mime):
# Do text rewrite if remote response is text-like (html, css, js, xml, etc..)
if verbose_level >= 3: dbgprint('Text-like', this_request.content_type,
this_request.remote_response.text[:15], _content[:15])
if force_decode_remote_using_encode is not None:
this_request.remote_response.encoding = force_decode_remote_using_encode
elif possible_charsets:
for charset in possible_charsets:
try:
this_request.remote_response.content.decode(charset)
except:
pass
else:
this_request.remote_response.encoding = charset
break
elif cchardet_available: # detect the encoding using cchardet (if we have)
this_request.remote_response.encoding = c_chardet(_content)
# simply copy the raw text, for custom rewriter function first.
resp_text = this_request.remote_response.text
if developer_string_trace is not None and developer_string_trace in resp_text:
infoprint('StringTrace: appears in the RAW remote response text, code line no. ', current_line_number())
# try to apply custom rewrite function
try:
if custom_text_rewriter_enable:
resp_text2 = custom_response_text_rewriter(resp_text, this_request.mime, this_request.remote_url)
if isinstance(resp_text2, str):
resp_text = resp_text2
elif isinstance(resp_text2, tuple) or isinstance(resp_text2, list):
resp_text, is_skip_builtin_rewrite = resp_text2
if is_skip_builtin_rewrite:
infoprint('Skip_builtin_rewrite', request.url)
return resp_text.encode(encoding='utf-8')
except Exception as _e: # just print err and fallback to normal rewrite
errprint('Custom Rewrite Function "custom_response_text_rewriter(text)" in custom_func.py ERROR', _e)
traceback.print_exc()
else:
if developer_string_trace is not None and developer_string_trace in resp_text:
infoprint('StringTrace: appears after custom text rewrite, code line no. ', current_line_number())
# then do the normal rewrites
try:
resp_text = response_text_rewrite(resp_text)
except:
traceback.print_exc()
else:
if developer_string_trace is not None and developer_string_trace in resp_text:
infoprint('StringTrace: appears after builtin rewrite, code line no. ', current_line_number())
return resp_text.encode(encoding='utf-8'), req_time_body # return bytes
else:
# simply don't touch binary response content
dbgprint('Binary', this_request.content_type)
return _content, req_time_body
def response_text_basic_rewrite(resp_text, domain, domain_id=None):
if domain not in domains_alias_to_target_domain:
domain_prefix = '/extdomains/' + get_ext_domain_inurl_scheme_prefix(domain) + domain
domain_prefix_https = '/extdomains/https-' + domain
domain_prefix_https_esc = r'\/extdomains\/https-' + domain
else:
domain_prefix = ''
domain_prefix_https = ''
domain_prefix_https_esc = ''
# Static resources domains hard rewrite
if enable_static_resource_CDN and domain in target_static_domains:
# dbgprint(domain, 'is static domains')
cdn_id = domain_id if domain_id is not None else zlib.adler32(domain.encode())
_my_host_name = CDN_domains[cdn_id % cdn_domains_number]
_myurl_prefix = my_host_scheme + _my_host_name
_myurl_prefix_escaped = _myurl_prefix.replace('/', r'\/')
else:
_my_host_name = my_host_name
_myurl_prefix = myurl_prefix
_myurl_prefix_escaped = myurl_prefix_escaped
# load pre-generated replace prefix
prefix = prefix_buff[domain]
# Explicit HTTPS scheme must be kept
resp_text = resp_text.replace(prefix['https_double_esc'], (_myurl_prefix + domain_prefix).replace('/', r'\\\/'))
resp_text = resp_text.replace(prefix['https_esc'], _myurl_prefix_escaped + domain_prefix_https_esc)
resp_text = resp_text.replace(prefix['https'], _myurl_prefix + domain_prefix_https)
resp_text = resp_text.replace(prefix['https_esc_ue'], quote_plus(_myurl_prefix_escaped + domain_prefix_https_esc))
resp_text = resp_text.replace(prefix['https_ue'], quote_plus(_myurl_prefix + domain_prefix_https))
# Implicit schemes replace, will be replaced to the same as `my_host_scheme`, unless forced
# _buff: my-domain.com/extdomains/https-remote.com or my-domain.com
if domain not in domains_alias_to_target_domain:
_buff = _my_host_name + domain_prefix
else:
_buff = _my_host_name
_buff_esc = _buff.replace('/', r'\/')
_buff_double_esc = _buff.replace('/', r'\\\/')
resp_text = resp_text.replace(prefix['http_double_esc'], my_host_scheme_escaped + _buff_double_esc)
resp_text = resp_text.replace(prefix['http_esc'], my_host_scheme_escaped + _buff_esc)
resp_text = resp_text.replace(prefix['http'], my_host_scheme + _buff)
resp_text = resp_text.replace(prefix['slash_double_esc'], r'\\\/\\\/' + _buff_double_esc)
resp_text = resp_text.replace(prefix['slash_esc'], r'\/\/' + _buff_esc)
resp_text = resp_text.replace(prefix['slash'], '//' + _buff)
resp_text = resp_text.replace(prefix['http_esc_ue'], quote_plus(my_host_scheme_escaped + _buff_esc))
resp_text = resp_text.replace(prefix['http_ue'], quote_plus(my_host_scheme + _buff))
resp_text = resp_text.replace(prefix['slash_esc_ue'], quote_plus(r'\/\/' + _buff_esc))
resp_text = resp_text.replace(prefix['slash_ue'], quote_plus('//' + _buff))
resp_text = resp_text.replace(prefix['hex_lower'], ('//' + _my_host_name).replace('/', r'\x2f'))
resp_text = resp_text.replace(prefix['hex_upper'], ('//' + _my_host_name).replace('/', r'\x2F'))
# rewrite "foo.domain.tld" and 'foo.domain.tld'
resp_text = resp_text.replace(prefix['double_quoted'], '"%s"' % _buff)
resp_text = resp_text.replace(prefix['single_quoted'], "'%s'" % _buff)
resp_text = resp_text.replace(prefix['double_quoted_esc'], '\\"%s\\"' % _buff)
resp_text = resp_text.replace(prefix['single_quoted_esc'], "\\'%s\\'" % _buff)
resp_text = resp_text.replace(prefix['double_quoted_ue'], quote_plus('"%s"' % _buff))
resp_text = resp_text.replace(prefix['single_quoted_ue'], quote_plus("'%s'" % _buff))
resp_text = resp_text.replace('"' + domain + '"', '"' + _buff_esc + '"')
return resp_text
def response_text_rewrite(resp_text):
"""
rewrite urls in text-like content (html,css,js)
:type resp_text: str
"""
# v0.20.6+ plain replace domain alias, support json/urlencoded/json-urlencoded/plain
if url_custom_redirect_enable:
for before_replace, after_replace in (plain_replace_domain_alias + this_request.temporary_domain_alias):
# _before_e = before_replace.replace('/', r'\/')
# _after_e = after_replace.replace('/', r'\/')
# resp_text = resp_text.replace(quote_plus(_before_e), quote_plus(_after_e))
# resp_text = resp_text.replace(_before_e, _after_e)
# resp_text = resp_text.replace(quote_plus(before_replace), quote_plus(after_replace))
dbgprint('plain_replace_domain_alias', before_replace, after_replace, v=4)
resp_text = resp_text.replace(before_replace, after_replace)
# v0.9.2+: advanced url rewrite engine
resp_text = regex_adv_url_rewriter.sub(regex_url_reassemble, resp_text)
if developer_string_trace is not None and developer_string_trace in resp_text:
infoprint('StringTrace: appears after advanced rewrite, code line no. ', current_line_number())
# basic url rewrite, rewrite the main site's url
# http(s)://target.com/foo/bar --> http(s)://your-domain.com/foo/bar
for _target_domain in domains_alias_to_target_domain:
resp_text = response_text_basic_rewrite(resp_text, _target_domain)
if developer_string_trace is not None and developer_string_trace in resp_text:
infoprint('StringTrace: appears after basic rewrite(main site), code line no. ', current_line_number())
# External Domains Rewrite
# http://external.com/foo1/bar2 --> http(s)://your-domain.com/extdomains/external.com/foo1/bar2
# https://external.com/foo1/bar2 --> http(s)://your-domain.com/extdomains/https-external.com/foo1/bar2
for domain_id, domain in enumerate(external_domains):
resp_text = response_text_basic_rewrite(resp_text, domain, domain_id)
if developer_string_trace is not None and developer_string_trace in resp_text:
infoprint('StringTrace: appears after basic ext domain rewrite:', domain, ', code line no. ', current_line_number())
# for cookies set string (in js) replace
# eg: ".twitter.com" --> "foo.com"
resp_text = resp_text.replace('\".' + target_domain_root + '\"', '\"' + my_host_name_no_port + '\"')
resp_text = resp_text.replace("\'." + target_domain_root + "\'", "\'" + my_host_name_no_port + "\'")
resp_text = resp_text.replace("domain=." + target_domain_root, "domain=" + my_host_name_no_port)
resp_text = resp_text.replace('\"' + target_domain_root + '\"', '\"' + my_host_name_no_port + '\"')
resp_text = resp_text.replace("\'" + target_domain_root + "\'", "\'" + my_host_name_no_port + "\'")
if developer_string_trace is not None and developer_string_trace in resp_text:
infoprint('StringTrace: appears after js cookies string rewrite, code line no. ', current_line_number())
# resp_text = resp_text.replace('lang="zh-Hans"', '', 1)
return resp_text
def response_cookie_rewrite(cookie_string):
"""
rewrite response cookie string's domain to `my_host_name`
:type cookie_string: str
"""
cookie_string = regex_cookie_rewriter.sub('domain=' + my_host_name_no_port, cookie_string)
return cookie_string
# ################# End Server Response Handler #################
# ################# Begin Client Request Handler #################
def extract_client_header():
"""
Extract necessary client header, filter out some.
:return: dict client request headers
"""
outgoing_head = {}
if verbose_level >= 3: dbgprint('ClientRequestHeaders:', request.headers)
for head_name, head_value in request.headers:
head_name_l = head_name.lower()
if (head_name_l not in ('host', 'content-length', 'content-type')) \
or (head_name_l == 'content-type' and head_value != ''):
# For Firefox, they may send 'Accept-Encoding: gzip, deflate, br'
# however, this program cannot decode the br encode, so we have to remove it from the request header.
if head_name_l == 'accept-encoding' and 'br' in head_value:
_str_buff = ''
if 'gzip' in head_value:
_str_buff += 'gzip, '
if 'deflate' in head_value:
_str_buff += 'deflate'
if _str_buff:
outgoing_head[head_name_l] = _str_buff
else:
outgoing_head[head_name_l] = client_requests_text_rewrite(head_value)
if verbose_level >= 3: dbgprint('FilteredRequestHeaders:', outgoing_head)
return outgoing_head
# noinspection SpellCheckingInspection
def client_requests_text_rewrite(raw_text):
"""
Rewrite proxy domain to origin domain, extdomains supported.
Also Support urlencoded url.
This usually used in rewriting request params
eg. http://foo.bar/extdomains/https-accounts.google.com to http://accounts.google.com
eg2. foo.bar/foobar to www.google.com/foobar
eg3. http%3a%2f%2fg.zju.tools%2fextdomains%2Fhttps-accounts.google.com%2f233
to http%3a%2f%2faccounts.google.com%2f233
"""
replaced = regex_request_rewriter.sub('\g<origin_domain>', raw_text)
# replaced = replaced.replace(my_host_name_urlencoded, target_domain)
# replaced = replaced.replace(my_host_name_no_port, target_domain)
# dbgprint('after regex_request_rewriter', replaced)
# 32MB == 33554432
replaced = client_requests_bin_rewrite(replaced.encode(), max_len=33554432).decode()
if verbose_level >= 3 and raw_text != replaced:
dbgprint('ClientRequestedUrl: ', raw_text, '<- Has Been Rewrited To ->', replaced)
return replaced
def client_requests_bin_rewrite(raw_bin, max_len=2097152): # 2097152=2MB
"""
:type max_len: int
:type raw_bin: byte
"""
if raw_bin is None or len(raw_bin) > max_len:
return raw_bin
else:
_str_buff = my_host_name + '/extdomains'
for _str_buff2 in (_str_buff + '/https-', _str_buff + '/', _str_buff):
raw_bin = raw_bin.replace(quote_plus(_str_buff2.replace('/', r'\/')).encode(), b'')
raw_bin = raw_bin.replace(quote_plus(_str_buff2.replace('/', r'\/')).lower().encode(), b'')
raw_bin = raw_bin.replace(quote_plus(_str_buff2).encode(), b'')
raw_bin = raw_bin.replace(quote_plus(_str_buff2).lower().encode(), b'')
raw_bin = raw_bin.replace(_str_buff2.replace('/', r'\/').encode(), b'')
raw_bin = raw_bin.replace(_str_buff2.replace('/', r'\/').lower().encode(), b'')
raw_bin = raw_bin.replace(_str_buff2.encode(), b'')
raw_bin = raw_bin.replace(quote_plus(my_host_name).encode(), quote_plus(target_domain).encode())
raw_bin = raw_bin.replace(my_host_name.encode(), target_domain.encode())
raw_bin = raw_bin.replace(my_host_name_no_port.encode(), target_domain.encode())
raw_bin = raw_bin.replace(b'%5C%2Fextdomains%5C%2Fhttps-', b'')
raw_bin = raw_bin.replace(b'%5c%2fextdomains%5c%2fhttps-', b'')
raw_bin = raw_bin.replace(b'%2Fextdomains%2Fhttps-', b'')
raw_bin = raw_bin.replace(b'%2fextdomains%2fhttps-', b'')
raw_bin = raw_bin.replace(b'\\/extdomains\\/https-', b'')
raw_bin = raw_bin.replace(b'/extdomains/https-', b'')
raw_bin = raw_bin.replace(b'%2Fextdomains%2F', b'')
raw_bin = raw_bin.replace(b'%2fextdomains%2f', b'')
raw_bin = raw_bin.replace(b'%5C%2Fextdomains%5C%2F', b'')
raw_bin = raw_bin.replace(b'%5c%2cextdomains%5c%2c', b'')
raw_bin = raw_bin.replace(b'\\/extdomains\\/', b'')
raw_bin = raw_bin.replace(b'/extdomains/', b'')
return raw_bin
def extract_url_path_and_query(full_url=None, no_query=False):
"""
Convert http://foo.bar.com/aaa/p.html?x=y to /aaa/p.html?x=y
:param no_query:
:type full_url: str
:param full_url: full url
:return: str
"""
if full_url is None:
full_url = request.url
split = urlsplit(full_url)
result = split.path
if not no_query and split.query:
result += '?' + split.query
return result
# ################# End Client Request Handler #################
# ################# Begin Middle Functions #################
def send_request(url, method='GET', headers=None, param_get=None, data=None):
"""实际发送请求到目标服务器, 对于重定向, 原样返回给用户
被request_remote_site_and_parse()调用"""
final_hostname = urlsplit(url).netloc
dbgprint('FinalRequestUrl', url, 'FinalHostname', final_hostname)
# Only external in-zone domains are allowed (SSRF check layer 2)
if final_hostname not in allowed_domains_set and not developer_temporary_disable_ssrf_prevention:
raise ConnectionAbortedError('Trying to access an OUT-OF-ZONE domain(SSRF Layer 2):', final_hostname)
# set zero data to None instead of b''
if not data:
data = None
if enable_keep_alive_per_domain:
if final_hostname not in connection_pool_per_domain:
connection_pool_per_domain[final_hostname] = {'session': requests.Session()}
_requester = connection_pool_per_domain[final_hostname]['session']
_requester.cookies.clear()
else:
_requester = requests
# Send real requests
req_start_time = time()
r = _requester.request(
method, url,
params=param_get, headers=headers, data=data,
proxies=requests_proxies, allow_redirects=False,
stream=enable_stream_content_transfer,
)
# remote request time
req_time = time() - req_start_time
dbgprint('RequestTime:', req_time, v=4)
# Some debug output
# print(r.request.headers, r.headers)
if verbose_level >= 3:
dbgprint(r.request.method, "FinalSentToRemoteRequestUrl:", r.url, "\nRem Resp Stat: ", r.status_code)
dbgprint("RemoteRequestHeaders: ", r.request.headers)
if data:
dbgprint('RemoteRequestRawData: ', r.request.body)
dbgprint("RemoteResponseHeaders: ", r.headers)
return r, req_time
def request_remote_site_and_parse():
if mime_based_static_resource_CDN:
url_no_scheme = this_request.remote_url[this_request.remote_url.find('//') + 2:]
if (cdn_redirect_code_if_cannot_hard_rewrite
and url_no_scheme in url_to_use_cdn and url_to_use_cdn[url_no_scheme][0] and request.method == 'GET'
and not is_ua_in_whitelist(str(request.user_agent))
):
_path_for_client = extract_url_path_and_query(request.url)
redirect_to_url = urljoin(
my_host_scheme + CDN_domains[zlib.adler32(url_no_scheme.encode()) % cdn_domains_number],
_path_for_client
)
if cdn_redirect_encode_query_str_into_url:
redirect_to_url = embed_real_url_to_embedded_url(redirect_to_url, url_mime=url_to_use_cdn[url_no_scheme][1])
return redirect(redirect_to_url, code=cdn_redirect_code_if_cannot_hard_rewrite)
client_header = extract_client_header()
if local_cache_enable:
resp = try_get_cached_response(this_request.remote_url, client_header)
if resp is not None:
dbgprint('CacheHit,Return')
if this_request.start_time is not None:
resp.headers.set('X-Compute-Time', "%.4f" % (time() - this_request.start_time))
# resp.headers.set('X-Req-Time', "0.0000")
return resp # If cache hit, just skip the next steps
try: # send request to remote server
data = client_requests_bin_rewrite(request.get_data())
# server's request won't follow 301 or 302 redirection
this_request.remote_response, req_time_headers = send_request(
this_request.remote_url,
method=request.method,
headers=client_header,
data=data, # client_requests_bin_rewrite(request.get_data()),
)
if this_request.remote_response.url != this_request.remote_url:
warnprint('requests\'s remote url' + this_request.remote_response.url
+ 'does no equals our rewrited url' + this_request.remote_url)
except Exception as _e:
errprint(_e) # ERROR :( so sad
traceback.print_exc()
return generate_simple_resp_page()
# extract response's mime to thread local var
this_request.content_type = this_request.remote_response.headers.get('Content-Type', '') \
or this_request.remote_response.headers.get('content-type', '')
this_request.mime = extract_mime_from_content_type(this_request.content_type)
# only_serve_static_resources
if only_serve_static_resources and not is_content_type_using_cdn(this_request.content_type):
return generate_simple_resp_page(b'This site is just for static resources.', error_code=403)
# is streamed
is_streamed = enable_stream_content_transfer and is_content_type_streamed(this_request.content_type)
# extract cache control header, if not cache, we should disable local cache
this_request.cache_control = this_request.remote_response.headers.get('Cache-Control', '') \
or this_request.remote_response.headers.get('cache-control', '')
_response_no_cache = 'no-store' in this_request.cache_control or 'must-revalidate' in this_request.cache_control
if verbose_level >= 4:
dbgprint('Response Content-Type:', this_request.content_type,
'IsStreamed:', is_streamed,
'is_no_cache:', _response_no_cache,
'Line', current_line_number(), v=4)
# add url's MIME info to record, for MIME-based CDN rewrite,
# next time we access this url, we would know it's mime
# Notice: mime_based_static_resource_CDN will be auto disabled above when global CDN option are False
if mime_based_static_resource_CDN and not _response_no_cache \
and this_request.remote_response.request.method == 'GET' and this_request.remote_response.status_code == 200:
# we should only cache GET method, and response code is 200
# noinspection PyUnboundLocalVariable
if url_no_scheme not in url_to_use_cdn:
if is_content_type_using_cdn(this_request.mime):
# mark it to use cdn, and record it's url without scheme.
# eg: If SERVER's request url is http://example.com/2333?a=x, we record example.com/2333?a=x
# because the same url for http and https SHOULD be the same, drop the scheme would increase performance
url_to_use_cdn[url_no_scheme] = [True, this_request.mime]
if verbose_level >= 3: dbgprint('CDN enabled for:', url_no_scheme)
else:
if verbose_level >= 3: dbgprint('CDN disabled for:', url_no_scheme)
url_to_use_cdn[url_no_scheme] = [False, '']
# copy and parse remote response
resp, req_time_body = copy_response(is_streamed=is_streamed)
# storge entire our server's response (headers included)
if local_cache_enable and not _response_no_cache:
put_response_to_local_cache(this_request.remote_url, resp, without_content=is_streamed)
if this_request.start_time is not None and not is_streamed:
# remote request time should be excluded when calculating total time
resp.headers.add('X-Header-Req-Time', "%.4f" % req_time_headers)
resp.headers.add('X-Body-Req-Time', "%.4f" % req_time_body)
resp.headers.add('X-Compute-Time', "%.4f" % (time() - this_request.start_time - req_time_headers - req_time_body))
resp.headers.add('X-Powered-By', 'zmirror %s' % __VERSION__)
if developer_dump_all_traffics and not is_streamed:
if not os.path.exists('traffic'):
os.mkdir('traffic')
_time_str = datetime.now().strftime('traffic_%Y-%m-%d_%H-%M-%S')
try:
with open(os.path.join('traffic', _time_str + '.dump'), 'wb') as fp:
pickle.dump(
(_time_str,
(repr(request.url), repr(request.headers), repr(request.get_data())),
this_request.remote_response, resp
),
fp)
except:
traceback.print_exc()
return resp
def filter_client_request():
"""过滤用户请求, 视情况拒绝用户的访问"""
if verbose_level >= 3: dbgprint('Client Request Url: ', request.url)
# crossdomain.xml
if os.path.basename(request.path) == 'crossdomain.xml':
dbgprint('crossdomain.xml hit from', request.url)
return crossdomain_xml()
# Global whitelist ua
if check_global_ua_pass(str(request.user_agent)):
return None
if is_deny_spiders_by_403 and is_denied_because_of_spider(str(request.user_agent)):
return generate_simple_resp_page(b'Spiders Are Not Allowed To This Site', 403)
if human_ip_verification_enabled and (
((human_ip_verification_whitelist_from_cookies or enable_custom_access_cookie_generate_and_verify)
and must_verify_cookies)
or is_ip_not_in_allow_range(request.remote_addr)
):
if verbose_level >= 3: dbgprint('ip', request.remote_addr, 'is verifying cookies')
if 'zmirror_verify' in request.cookies and \
((human_ip_verification_whitelist_from_cookies and verify_ip_hash_cookie(request.cookies.get('zmirror_verify')))
or (enable_custom_access_cookie_generate_and_verify and custom_verify_access_cookie(
request.cookies.get('zmirror_verify'), request))):
ip_whitelist_add(request.remote_addr, info_record_dict=request.cookies.get('zmirror_verify'))
if verbose_level >= 3: dbgprint('add to ip_whitelist because cookies:', request.remote_addr)
else:
return redirect(
"/ip_ban_verify_page?origin=" + base64.urlsafe_b64encode(str(request.url).encode(encoding='utf-8')).decode(),
code=302)
return None
def is_client_request_need_redirect():
"""对用户的请求进行按需重定向处理
与rewrite_client_request()不同, 使用301/307等进行外部重定向, 不改变服务器内部数据
遇到任意一个需要重定向的, 即跳出本函数
"""
_temp = decode_mirror_url()
hostname, extpath_query = _temp['domain'], _temp['path_query']
if hostname in domain_alias_to_target_set and '/extdomains/' == request.path[:12]:
dbgprint('Requesting main domain in extdomains, redirect back.')
return redirect(extpath_query, code=307)
if enable_individual_sites_isolation and '/extdomains/' != request.path[:12] and request.headers.get('referer'):
reference_domain = decode_mirror_url(request.headers.get('referer'))['domain']
if reference_domain in isolated_domains:
return redirect(encode_mirror_url(extract_url_path_and_query(), reference_domain), code=307)
if url_custom_redirect_enable:
if request.path in url_custom_redirect_list:
redirect_to = request.url.replace(request.path, url_custom_redirect_list[request.path], count=1)
if verbose_level >= 3: dbgprint('Redirect from', request.url, 'to', redirect_to)
return redirect(redirect_to, code=307)
for regex_match, regex_replace in url_custom_redirect_regex:
if re.match(regex_match, extract_url_path_and_query(), flags=re.IGNORECASE) is not None:
redirect_to = re.sub(regex_match, regex_replace, extract_url_path_and_query(), flags=re.IGNORECASE)
if verbose_level >= 3: dbgprint('Redirect from', request.url, 'to', redirect_to)
return redirect(redirect_to, code=307)
def rewrite_client_request():
"""
在这里的所有重写都只作用程序内部, 对请求者不可见
与 is_client_request_need_redirect() 的外部301/307重定向不同,
本函数通过改变程序内部变量来起到重定向作用
返回True表示进行了重定向, 需要重载某些设置, 返回False表示未重定向
遇到重写后, 不会跳出本函数, 而是会继续下一项. 所以重写顺序很重要
"""
has_been_rewrited = False
if cdn_redirect_encode_query_str_into_url:
if is_ua_in_whitelist(str(request.user_agent)):
try:
real_url = extract_real_url_from_embedded_url(request.url)
if real_url is not None:
request.url = real_url
request.path = urlsplit(real_url).path
except:
traceback.print_exc()
else:
has_been_rewrited = True
if url_custom_redirect_enable and shadow_url_redirect_regex:
_path_query = extract_url_path_and_query()
_path_query_raw = _path_query
for before, after in shadow_url_redirect_regex:
_path_query = re.sub(before, after, _path_query)
if _path_query != _path_query_raw:
dbgprint('ShadowUrlRedirect:', _path_query_raw, 'to', _path_query)
request.url = myurl_prefix + _path_query
request.path = urlsplit(_path_query).path
has_been_rewrited = True
break
return has_been_rewrited
# ################# End Middle Functions #################
# ################# Begin Flask #################
@app.route('/zmirror_stat')
def zmirror_status():
"""返回服务器的一些状态信息"""
if request.remote_addr != '127.0.0.1':
return generate_simple_resp_page(b'Only 127.0.0.1 are allowed', 403)
output = ""
output += strx('extract_real_url_from_embedded_url', extract_real_url_from_embedded_url.cache_info())
output += strx('\nis_content_type_streamed', is_content_type_streamed.cache_info())
output += strx('\nembed_real_url_to_embedded_url', embed_real_url_to_embedded_url.cache_info())
output += strx('\ncheck_global_ua_pass', check_global_ua_pass.cache_info())
output += strx('\nextract_mime_from_content_type', extract_mime_from_content_type.cache_info())
output += strx('\nis_content_type_using_cdn', is_content_type_using_cdn.cache_info())
output += strx('\nis_ua_in_whitelist', is_content_type_using_cdn.cache_info())
output += strx('\nis_mime_represents_text', is_mime_represents_text.cache_info())
output += strx('\nis_domain_match_glob_whitelist', is_domain_match_glob_whitelist.cache_info())
output += strx('\nverify_ip_hash_cookie', verify_ip_hash_cookie.cache_info())
output += strx('\nis_denied_because_of_spider', is_denied_because_of_spider.cache_info())
output += strx('\nis_ip_not_in_allow_range', is_ip_not_in_allow_range.cache_info())
output += strx('\n\ncurrent_threads_number', threading.active_count())
# output += strx('\nclient_requests_text_rewrite', client_requests_text_rewrite.cache_info())
# output += strx('\nextract_url_path_and_query', extract_url_path_and_query.cache_info())
output += strx('\n\nurl_rewriter_cache len: ', len(url_rewrite_cache),
'Hits:', url_rewrite_cache_hit_count, 'Misses:', url_rewrite_cache_miss_count)
output += strx('\n----------------\n')
output += strx('\ndomain_alias_to_target_set', domain_alias_to_target_set)
return "<pre>" + output + "</pre>\n"
@app.route('/ip_ban_verify_page', methods=['GET', 'POST'])
def ip_ban_verify_page():
"""生成一个身份验证页面"""
if request.method == 'GET':
dbgprint('Verifying IP:', request.remote_addr)
form_body = ''
for q_id, _question in enumerate(human_ip_verification_questions):
form_body += r"""%s <input type="text" name="%d" placeholder="%s" style="width: 190px;" /><br/>""" \
% (_question[0], q_id, (html_escape(_question[2]) if len(_question) >= 3 else ""))
for rec_explain_string, rec_name, input_type in human_ip_verification_identity_record:
form_body += r"""%s %s<input type="%s" name="%s" /><br/>""" % (
rec_explain_string,
('<span style="color: red;">(必填)<span> ' if human_ip_verification_answer_any_one_questions_is_ok else ""),
html_escape(input_type), html_escape(rec_name))
if 'origin' in request.args:
form_body += r"""<input type="hidden" name="origin" value="%s" style="width: 190px;" />""" % html_escape(
request.args.get('origin'))
return r"""<!doctype html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<title>%s</title>
</head>
<body>
<h1>%s</h1>
<p>这样的验证只会出现一次,通过后您会被加入白名单,之后相同IP的访问不会再需要验证。<br/>
提示: 由于手机和宽带IP经常会发生改变,您可能会多次看到这一页面。</p>
%s <br>
<pre style="border: 1px dashed;">%s</pre>
<form method='post'>%s<button type='submit'>递交</button>
</form>
</body>
</html>""" % (
html_escape(human_ip_verification_title), html_escape(human_ip_verification_title),
("只需要回答出以下<b>任意一个</b>问题即可" if human_ip_verification_answer_any_one_questions_is_ok
else "你需要回答出以下<b>所有问题</b>"),
human_ip_verification_description, form_body)
elif request.method == 'POST':
dbgprint('Verify Request Form', request.form)
for q_id, _question in enumerate(human_ip_verification_questions):
if request.form.get(str(q_id)) != _question[1]:
if not human_ip_verification_answer_any_one_questions_is_ok:
return generate_simple_resp_page(b'You got an error in ' + _question[0].encode(), 200)
elif human_ip_verification_answer_any_one_questions_is_ok:
break
else:
if human_ip_verification_answer_any_one_questions_is_ok:
return generate_simple_resp_page(b'Please answer at least ONE questsion', 200)
record_dict = {}
for rec_explain_string, rec_name, form_type in human_ip_verification_identity_record:
if rec_name not in request.form:
return generate_simple_resp_page(b'Param Missing: ' + rec_explain_string.encode(), 200)
else:
record_dict[rec_name] = request.form.get(rec_name)
origin = '/'
if 'origin' in request.form:
try:
origin = base64.urlsafe_b64decode(request.form.get('origin')).decode(encoding='utf-8')
except:
pass
else:
netloc = urlsplit(origin).netloc
if not netloc and netloc != my_host_name:
origin = '/'
if identity_verify_required:
if not custom_identity_verify(record_dict):
return generate_simple_resp_page(b'Verification Failed, please check', 200)
resp = generate_html_redirect_page(origin, msg=human_ip_verification_success_msg)
if human_ip_verification_whitelist_from_cookies:
_hash = generate_ip_verify_hash(record_dict)
resp.set_cookie(
'zmirror_verify',
_hash,
expires=datetime.now() + timedelta(days=human_ip_verification_whitelist_cookies_expires_days),
max_age=human_ip_verification_whitelist_cookies_expires_days * 24 * 3600
# httponly=True,
# domain=my_host_name
)
record_dict['__zmirror_verify'] = _hash
elif enable_custom_access_cookie_generate_and_verify:
try:
_hash = custom_generate_access_cookie(record_dict, request)
dbgprint('SelfGeneratedCookie:', _hash)
if _hash is None:
return generate_simple_resp_page(b'Verification Failed, please check', 200)
resp.set_cookie(
'zmirror_verify',
_hash,
expires=datetime.now() + timedelta(days=human_ip_verification_whitelist_cookies_expires_days),
max_age=human_ip_verification_whitelist_cookies_expires_days * 24 * 3600
# httponly=True,
# domain=my_host_name
)
record_dict['__zmirror_verify'] = _hash
except:
traceback.print_exc()
return generate_simple_resp_page(b'Server Error, please check', 200)
ip_whitelist_add(request.remote_addr, info_record_dict=record_dict)
return resp
# noinspection PyUnusedLocal
@app.route('/', methods=['GET', 'POST', 'OPTIONS', 'PUT', 'DELETE', 'HEAD', 'PATCH'])
@app.route('/<path:input_path>', methods=['GET', 'POST', 'OPTIONS', 'PUT', 'DELETE', 'HEAD', 'PATCH'])
def main_function(input_path='/'):
"""本程序的实际入口函数"""
dbgprint('-----BeginRequest-----')
this_request.start_time = time() # to display compute time
this_request.temporary_domain_alias = () # init temporary_domain_alias
infoprint('From', request.remote_addr, request.method, request.url, request.user_agent)
_temp = decode_mirror_url()
this_request.remote_domain = _temp['domain']
this_request.is_https = _temp['is_https']
this_request.remote_path = _temp['path']
this_request.remote_path_query = _temp['path_query']
# pre-filter client's request
filter_or_rewrite_result = filter_client_request() or is_client_request_need_redirect()
if filter_or_rewrite_result is not None:
dbgprint('-----EndRequest(redirect)-----')
return filter_or_rewrite_result # Ban or redirect if need
has_been_rewrited = rewrite_client_request() # this process may change the global flask request object
if has_been_rewrited:
_temp = decode_mirror_url()
this_request.remote_domain = _temp['domain']
this_request.is_https = _temp['is_https']
this_request.remote_path = _temp['path']
this_request.remote_path_query = _temp['path_query']
dbgprint('ResolveRequestUrl hostname:', this_request.remote_domain,
'is_https:', this_request.is_https, 'exturi:', this_request.remote_path_query)
# Only external in-zone domains are allowed (SSRF check layer 1)
if this_request.remote_domain not in allowed_domains_set:
if not try_match_and_add_domain_to_rewrite_white_list(this_request.remote_domain):
if developer_temporary_disable_ssrf_prevention:
add_ssrf_allowed_domain(this_request.remote_domain)
else:
return generate_simple_resp_page(b'SSRF Prevention! Your Domain Are NOT ALLOWED.', 403)
if verbose_level >= 3: dbgprint('after extract, url:', request.url, ' path:', request.path)
if this_request.remote_domain not in domain_alias_to_target_set:
scheme = 'https://' if this_request.is_https else 'http://'
this_request.remote_url = urljoin(scheme + this_request.remote_domain, this_request.remote_path_query)
dbgprint('remote_url(ext):', this_request.remote_url)
else:
this_request.remote_url = urljoin(target_scheme + target_domain, this_request.remote_path_query)
dbgprint('remote_url(main):', this_request.remote_url)
try:
resp = request_remote_site_and_parse()
except:
traceback.print_exc()
resp = generate_simple_resp_page()
dbgprint('-----EndRequest-----')
return resp
@app.route('/crossdomain.xml')
def crossdomain_xml():
return Response("""<?xml version="1.0"?>
<!DOCTYPE cross-domain-policy SYSTEM "http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd">
<cross-domain-policy>
<allow-access-from domain="*"/>
<site-control permitted-cross-domain-policies="all"/>
<allow-http-request-headers-from domain="*" headers="*" secure="false"/>
</cross-domain-policy>""", content_type='text/x-cross-domain-policy')
# ################# End Flask #################
# ################# Begin Post (auto)Exec Section #################
# ########### domain replacer prefix string buff ###############
prefix_buff = {}
for _domain in allowed_domains_set:
prefix_buff[_domain] = calc_domain_replace_prefix(_domain)
if human_ip_verification_enabled:
single_ip_allowed_set = load_ip_whitelist_file()
if custom_text_rewriter_enable:
try:
from custom_func import custom_response_text_rewriter
except:
identity_verify_required = False
warnprint('Cannot import custom_response_text_rewriter custom_func.py,'
' `custom_text_rewriter` is now disabled(if it was enabled)')
traceback.print_exc()
raise
if identity_verify_required:
try:
from custom_func import custom_identity_verify
except:
identity_verify_required = False
warnprint('Cannot import custom_identity_verify from custom_func.py,'
' `identity_verify` is now disabled (if it was enabled)')
traceback.print_exc()
raise
if enable_custom_access_cookie_generate_and_verify:
try:
from custom_func import custom_generate_access_cookie, custom_verify_access_cookie
except:
enable_custom_access_cookie_generate_and_verify = False
errprint('Cannot import custom_generate_access_cookie and custom_generate_access_cookie from custom_func.py,'
' `enable_custom_access_cookie_generate_and_verify` is now disabled (if it was enabled)')
traceback.print_exc()
raise
try:
from custom_func import *
except:
pass
if enable_cron_tasks:
for _task_dict in cron_tasks_list:
try:
_task_dict['target'] = globals()[_task_dict['target']]
cron_task_container(_task_dict, add_task_only=True)
except Exception as e:
errprint('UnableToInitCronTask', e)
traceback.print_exc()
raise
th = threading.Thread(target=cron_task_host, daemon=True)
th.start()
# ################# End Post (auto)Exec Section #################
if __name__ == '__main__':
errprint('After version 0.21.5, please use `python3 wsgi.py` to run')
exit()
|
rdoclient.py | """
RANDOM.ORG JSON-RPC API (Release 1) implementation.
This is a Python implementation of the RANDOM.ORG JSON-RPC API (R1).
It provides either serialized or unserialized access to both the signed
and unsigned methods of the API through the RandomOrgClient class. It
also provides a convenience class through the RandomOrgClient class,
the RandomOrgCache, for precaching requests.
Classes:
RandomOrgClient -- main class through which API functions are accessed.
RandomOrgCache -- for precaching API responses.
RandomOrgSendTimeoutError -- when request can't be sent in a set time.
RandomOrgKeyNotRunningError -- key stopped exception.
RandomOrgInsufficientRequestsError -- requests allowance exceeded.
RandomOrgInsufficientBitsError -- bits allowance exceeded.
"""
import json
import logging
import threading
import time
import uuid
from datetime import datetime
# Queue was changed to queue in Python 3
from queue import Queue, Empty
import requests
# Basic RANDOM.ORG API functions https://api.random.org/json-rpc/1/
_INTEGER_METHOD = 'generateIntegers'
_DECIMAL_FRACTION_METHOD = 'generateDecimalFractions'
_GAUSSIAN_METHOD = 'generateGaussians'
_STRING_METHOD = 'generateStrings'
_UUID_METHOD = 'generateUUIDs'
_BLOB_METHOD = 'generateBlobs'
_GET_USAGE_METHOD = 'getUsage'
# Signed RANDOM.ORG API functions https://api.random.org/json-rpc/1/signing
_SIGNED_INTEGER_METHOD = 'generateSignedIntegers'
_SIGNED_DECIMAL_FRACTION_METHOD = 'generateSignedDecimalFractions'
_SIGNED_GAUSSIAN_METHOD = 'generateSignedGaussians'
_SIGNED_STRING_METHOD = 'generateSignedStrings'
_SIGNED_UUID_METHOD = 'generateSignedUUIDs'
_SIGNED_BLOB_METHOD = 'generateSignedBlobs'
_VERIFY_SIGNATURE_METHOD = 'verifySignature'
# Blob format literals
_BLOB_FORMAT_BASE64 = 'base64'
_BLOB_FORMAT_HEX = 'hex'
# Default backoff to use if no advisoryDelay backoff supplied by server
_DEFAULT_DELAY = 1.0
# On request fetch fresh allowance state if current state
# data is older than this value
_ALLOWANCE_STATE_REFRESH_SECONDS = 3600.0
class RandomOrgSendTimeoutError(Exception):
"""
RandomOrgClient blocking_timeout exception.
Exception raised by the RandomOrgClient class when blocking_timeout
is exceeded before the request can be sent.
"""
class RandomOrgKeyNotRunningError(Exception):
"""
RandomOrgClient key stopped exception.
Exception raised by the RandomOrgClient class when its API key
has been stopped. Requests will not complete while API key is
in the stopped state.
"""
class RandomOrgInsufficientRequestsError(Exception):
"""
RandomOrgClient server requests allowance exceeded exception.
Exception raised by the RandomOrgClient class when its API key's
server requests allowance has been exceeded. This indicates that a
back-off until midnight UTC is in effect, before which no requests
will be sent by the client as no meaningful server responses will
be returned.
"""
class RandomOrgInsufficientBitsError(Exception):
"""
RandomOrgClient server bits allowance exceeded exception.
Exception raised by the RandomOrgClient class when its API key's
request has exceeded its remaining server bits allowance. If the
client is currently issuing large requests it may be possible to
succeed with smaller requests. Use the client's getBitsLeft() call
to help determine if an alternative request size is appropriate.
"""
class RandomOrgCache(object):
"""
RandomOrgCache for precaching request responses.
Precache for frequently used requests. Instances should only be
obtained using RandomOrgClient's create_x_cache methods, never
created separately.
This class strives to keep a Queue of response results populated
for instant access via its public get method. Work is done by a
background Thread, which issues the appropriate request at suitable
intervals.
Public methods:
stop -- instruct cache to stop repopulating itself.
resume -- if cache is stopped, restart repopulation.
get -- return a response for the request this RandomOrgCache
represents or raise a Queue.Empty exception.
"""
def __init__(self, request_function, process_function, request,
cache_size, bulk_request_number=0, request_number=0):
"""
Constructor.
Initialize class and start Queue population Thread running as a
daemon. Should only be called by RandomOrgClient's
create_x_cache methods.
Keyword arguments:
request_function -- function to send supplied request to server.
process_function -- function to process result of
request_function into expected output.
request -- request to send to server via request_function.
cache_size -- number of request responses to try maintain.
bulk_request_number -- if request is set to be issued in bulk,
number of result sets in a bulk request (default 0).
request_number -- if request is set to be issued in bulk,
number of results in a single request (default 0).
"""
self._request_function = request_function
self._process_function = process_function
self._request = request
self._queue = Queue(cache_size)
self._bulk_request_number = bulk_request_number
self._request_number = request_number
# Condition lock to allow notification when an item is consumed
# or pause state is updated.
self._lock = threading.Condition()
self._paused = False
# Thread to keep RandomOrgCache populated.
self._thread = threading.Thread(target=self._populate_queue)
self._thread.daemon = True
self._thread.start()
def _populate_queue(self):
# Keep issuing requests to server until Queue is full. When
# Queue is full if requests are being issued in bulk, wait
# until Queue has enough space to accomodate all of a bulk
# request before issuing a new request, otherwise issue a new
# request every time an item in the Queue has been consumed.
#
# Note that requests to the server are blocking, i.e., only one
# request will be issued by the cache at any given time.
while True:
while self._paused:
self._lock.acquire()
self._lock.wait()
self._lock.release()
# If we're issuing bulk requests...
if self._bulk_request_number > 0:
# Is there space for a bulk response in the queue?
if self._queue.qsize() < (self._queue.maxsize -
self._bulk_request_number):
# Issue and process request and response.
try:
response = self._request_function(self._request)
result = self._process_function(response)
# Split bulk response into result sets.
for i in range(0, len(result), self._request_number):
self._queue.put(result[i:i+self._request_number])
# TODO: propose appropriate exception
except Exception as e:
# Don't handle failures from _request_function()
# Just try again later.
logging.info(
"RandomOrgCache populate exception: {0}".format(
str(e)
)
)
# No space, sleep and wait for consumed notification.
else:
self._lock.acquire()
self._lock.wait()
self._lock.release()
# Not in bulk mode, repopulate queue as it empties.
elif not self._queue.full():
try:
response = self._request_function(self._request)
self._queue.put(self._process_function(response))
except Exception as e:
# Don't handle failures from _request_function()
# Just try again later.
logging.info(
"RandomOrgCache populate exception: {0}".format(str(e))
)
# No space, sleep and wait for consumed notification.
else:
self._lock.acquire()
self._lock.wait()
self._lock.release()
def stop(self):
"""
Stop cache.
Cache will not continue to populate itself.
"""
self._paused = True
self._lock.acquire()
self._lock.notify()
self._lock.release()
def resume(self):
"""
Resume cache.
Cache will resume populating itself if stopped.
"""
self._paused = False
self._lock.acquire()
self._lock.notify()
self._lock.release()
def get(self):
"""
Get next response.
Get next appropriate response for the request this
RandomOrgCache represents or if Queue is empty raise a
Queue.Empty exception.
"""
result = self._queue.get(False)
self._lock.acquire()
self._lock.notify()
self._lock.release()
return result
class RandomOrgClient(object):
"""
RandomOrgClient main class through which API functions are accessed.
This class provides either serialized or unserialized (determined
on class creation) access to both the signed and unsigned methods
of the RANDOM.ORG API. These are threadsafe and implemented as
blocking remote procedure calls.
If requests are to be issued serially a background Thread will
maintain a Queue of requests to process in sequence.
The class also provides access to creation of a convenience class,
RandomOrgCache, for precaching API responses when the request is
known in advance.
This class will only allow the creation of one instance per API
key. If an instance of this class already exists for a given key,
that instance will be returned on init instead of a new instance.
This class obeys most of the guidelines set forth in
https://api.random.org/guidelines
All requests respect the server's advisoryDelay returned in any
responses, or use _DEFAULT_DELAY if no advisoryDelay is returned. If
the supplied API key is has exceeded its daily request allowance,
this implementation will back off until midnight UTC.
Public methods:
Basic methods for generating randomness, see:
https://api.random.org/json-rpc/1/basic
generate_integers -- get a list of random integers.
generate_decimal_fractions -- get a list of random doubles.
generate_gaussians -- get a list of random numbers.
generate_strings -- get a list of random strings.
generate_UUIDs -- get a list of random UUIDs.
generate_blobs -- get a list of random blobs.
Signed methods for generating randomness, see:
https://api.random.org/json-rpc/1/signing
generate_signed_integers -- get a signed response containing a list
of random integers and a signature.
generate_signed_decimal_fractions -- get a signed response
containing a list of random doubles and a signature.
generate_signed_gaussians -- get a signed response containing a
list of random numbers and a signature.
generate_signed_strings -- get a signed response containing a list
of random strings and a signature.
generate_signed_UUIDs -- get a signed response containing a list of
random UUIDs and a signature.
generate_signed_blobs -- get a signed response containing a list of
random blobs and a signature.
Signature verification for signed methods, see:
https://api.random.org/json-rpc/1/signing
verify_signature -- verify a response against its signature.
# Methods used to create a cache for any given randomness request.
create_integer_cache -- get a RandomOrgCache from which to obtain a
list of random integers.
create_decimal_fraction_cache -- get a RandomOrgCache from which to
obtain a list of random doubles.
create_gaussian_cache -- get a RandomOrgCache from which to obtain
a list of random numbers.
create_string_cache -- get a RandomOrgCache from which to obtain a
list of random strings.
create_UUID_cache -- get a RandomOrgCache from which to obtain a
list of random UUIDs.
create_blob_cache -- get a RandomOrgCache from which to obtain a
list of random blobs.
# Methods for accessing server usage statistics
get_requests_left -- get estimated number of remaining API requests.
get_bits_left -- get estimated number of bits left.
"""
# Maintain a dictionary of API keys and their instances.
__key_indexed_instances = {}
def __new__(cls, *args, **kwds):
"""
Instance creation.
Ensure only one instace of RandomOrgClient exists per API key.
Create a new instance if the supplied key isn't already known,
otherwise return the previously instantiated one.
"""
instance = RandomOrgClient.__key_indexed_instances.get(args[0], None)
if instance is None:
instance = object.__new__(cls)
RandomOrgClient.__key_indexed_instances[args[0]] = instance
return instance
def __init__(self, api_key,
blocking_timeout=24.0*60.0*60.0, http_timeout=120.0,
serialized=True):
"""
Constructor.
Initialize class and start serialized request sending Thread
running as a daemon if applicable.
Keyword arguments:
api_key -- API key obtained from the RANDOM.ORG website, see:
https://api.random.org/api-keys
blocking_timeout -- maximum time in seconds and fractions of
seconds to wait before being allowed to send a request.
Note this is a hint not a guarantee. Be advised advisory
delay from server must always be obeyed. Supply a value
of -1 to allow blocking forever. (default 24.0*60.0*60.0,
i.e., 1 day)
http_timeout -- maximum time in seconds and fractions of
seconds to wait for the server response to a request.
(default 120.0).
serialized -- determines whether or not requests from this
instance will be added to a Queue and issued serially or
sent when received, obeying any advisory delay (default
True).
"""
# __init__ will always be called after __new__, but if an
# instance already exists for the API key we want to bail
# before actually doing anything in init.
if not hasattr(self, '_api_key'):
if serialized:
# set send function
self._send_request = self._send_serialized_request
# set up the serialized request Queue and Thread
self._serialized_queue = Queue()
self._serialized_thread = threading.Thread(
target=self._threaded_request_sending
)
self._serialized_thread.daemon = True
self._serialized_thread.start()
else:
# set send function
self._send_request = self._send_unserialized_request
self._api_key = api_key
self._blocking_timeout = blocking_timeout
self._http_timeout = http_timeout
# maintain info to obey server advisory delay
self._advisory_delay_lock = threading.Lock()
self._advisory_delay = 0
self._last_response_received_time = 0
# maintain usage statistics from server
self._requests_left = None
self._bits_left = None
# backoff info for when API key is detected as not running -
# probably because key has exceeded its daily usage limit.
# Backoff runs until midnight UTC.
self._backoff = None
self._backoff_error = None
else:
logging.info(
"Using RDO instance already created for key: {0}*".format(
api_key[:8]
)
)
# Basic methods for generating randomness, see:
# https://api.random.org/json-rpc/1/basic
def generate_integers(self, n, min, max, replacement=True):
"""
Generate random integers.
Request and return a list (size n) of true random integers
within a user-defined range from the server. See:
https://api.random.org/json-rpc/1/basic#generateIntegers
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random integers you need. Must be within the
[1,1e4] range.
min -- The lower boundary for the range from which the random
numbers will be picked. Must be within the [-1e9,1e9] range.
max -- The upper boundary for the range from which the random
numbers will be picked. Must be within the [-1e9,1e9] range.
replacement -- Specifies whether the random numbers should be
picked with replacement. If True the resulting numbers may
contain duplicate values, otherwise the numbers will all be
unique (default True).
"""
params = {
'apiKey': self._api_key,
'n': n, 'min': min, 'max': max,
'replacement': replacement
}
request = self._generate_request(_INTEGER_METHOD, params)
response = self._send_request(request)
return self._extract_ints(response)
def generate_decimal_fractions(self, n, decimal_places, replacement=True):
"""
Generate random decimal fractions.
Request and return a list (size n) of true random decimal
fractions, from a uniform distribution across the [0,1]
interval with a user-defined number of decimal places from the
server. See:
https://api.random.org/json-rpc/1/basic#generateDecimalFractions
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random decimal fractions you need. Must be within
the [1,1e4] range.
decimal_places -- The number of decimal places to use. Must be
within the [1,20] range.
replacement -- Specifies whether the random numbers should be
picked with replacement. If True the resulting numbers may
contain duplicate values, otherwise the numbers will all be
unique (default True).
"""
params = { 'apiKey':self._api_key, 'n':n,
'decimalPlaces':decimal_places, 'replacement':replacement }
request = self._generate_request(_DECIMAL_FRACTION_METHOD, params)
response = self._send_request(request)
return self._extract_doubles(response)
def generate_gaussians(self, n, mean, standard_deviation,
significant_digits):
"""
Generate random numbers.
Request and return a list (size n) of true random numbers from
a Gaussian distribution (also known as a normal distribution).
The form uses a Box-Muller Transform to generate the Gaussian
distribution from uniformly distributed numbers. See:
https://api.random.org/json-rpc/1/basic#generateGaussians
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random numbers you need. Must be within the
[1,1e4] range.
mean -- The distribution's mean. Must be within the [-1e6,1e6]
range.
standard_deviation -- The distribution's standard deviation.
Must be within the [-1e6,1e6] range.
significant_digits -- The number of significant digits to use.
Must be within the [2,20] range.
"""
params = {
'apiKey': self._api_key,
'n': n, 'mean': mean,
'standardDeviation': standard_deviation,
'significantDigits': significant_digits
}
request = self._generate_request(_GAUSSIAN_METHOD, params)
response = self._send_request(request)
return self._extract_doubles(response)
def generate_strings(self, n, length, characters, replacement=True):
"""
Generate random strings.
Request and return a list (size n) of true random unicode
strings from the server. See:
https://api.random.org/json-rpc/1/basic#generateStrings
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random strings you need. Must be within the
[1,1e4] range.
length -- The length of each string. Must be within the [1,20]
range. All strings will be of the same length.
characters -- A string that contains the set of characters that
are allowed to occur in the random strings. The maximum
number of characters is 80.
replacement -- Specifies whether the random strings should be
picked with replacement. If True the resulting list of
strings may contain duplicates, otherwise the strings will
all be unique (default True).
"""
params = {
'apiKey': self._api_key, 'n': n,
'length': length,
'characters': characters,
'replacement': replacement
}
request = self._generate_request(_STRING_METHOD, params)
response = self._send_request(request)
return self._extract_strings(response)
def generate_UUIDs(self, n):
"""
Generate random UUIDs.
Request and return a list (size n) of version 4 true random
Universally Unique IDentifiers (UUIDs) in accordance with
section 4.4 of RFC 4122, from the server. See:
https://api.random.org/json-rpc/1/basic#generateUUIDs
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random UUIDs you need. Must be within the [1,1e3]
range.
"""
params = {
'apiKey': self._api_key,
'n': n
}
request = self._generate_request(_UUID_METHOD, params)
response = self._send_request(request)
return self._extract_UUIDs(response)
def generate_blobs(self, n, size, format=_BLOB_FORMAT_BASE64):
"""
Generate random BLOBs.
Request and return a list (size n) of Binary Large OBjects
(BLOBs) as unicode strings containing true random data from the
server. See:
https://api.random.org/json-rpc/1/basic#generateBlobs
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random blobs you need. Must be within the [1,100]
range.
size -- The size of each blob, measured in bits. Must be within
the [1,1048576] range and must be divisible by 8.
format -- Specifies the format in which the blobs will be
returned. Values allowed are _BLOB_FORMAT_BASE64 and
_BLOB_FORMAT_HEX (default _BLOB_FORMAT_BASE64).
"""
params = {
'apiKey': self._api_key,
'n': n, 'size': size,
'format': format
}
request = self._generate_request(_BLOB_METHOD, params)
response = self._send_request(request)
return self._extract_blobs(response)
# Signed methods for generating randomness, see:
# https://api.random.org/json-rpc/1/signing
def generate_signed_integers(self, n, min, max, replacement=True):
"""
Generate digitally signed random integers.
Request a list (size n) of true random integers within a
user-defined range from the server. Returns a dictionary object
with the parsed integer list mapped to 'data', the original
response mapped to 'random', and the response's signature
mapped to 'signature'. See:
https://api.random.org/json-rpc/1/signing#generateSignedIntegers
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random integers you need. Must be within the
[1,1e4] range.
min -- The lower boundary for the range from which the random
numbers will be picked. Must be within the [-1e9,1e9] range.
max -- The upper boundary for the range from which the random
numbers will be picked. Must be within the [-1e9,1e9] range.
replacement -- Specifies whether the random numbers should be
picked with replacement. If True the resulting numbers may
contain duplicate values, otherwise the numbers will all be
unique (default True).
"""
params = {
'apiKey': self._api_key,
'n': n, 'min': min, 'max': max,
'replacement': replacement
}
request = self._generate_request(_SIGNED_INTEGER_METHOD, params)
response = self._send_request(request)
return self._extract_signed_response(response, self._extract_ints)
def generate_signed_decimal_fractions(self, n, decimal_places,
replacement=True):
"""
Generate digitally signed random decimal fractions.
Request a list (size n) of true random decimal fractions, from
a uniform distribution across the [0,1] interval with a
user-defined number of decimal places from the server. Returns
a dictionary object with the parsed decimal fraction list
mapped to 'data', the original response mapped to 'random', and
the response's signature mapped to 'signature'. See:
https://api.random.org/json-rpc/1/signing#generateSignedDecimalFractions
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random decimal fractions you need. Must be within
the [1,1e4] range.
decimal_places -- The number of decimal places to use. Must be
within the [1,20] range.
replacement -- Specifies whether the random numbers should be
picked with replacement. If True the resulting numbers may
contain duplicate values, otherwise the numbers will all be
unique (default True).
"""
params = {
'apiKey': self._api_key, 'n': n,
'decimalPlaces': decimal_places,
'replacement': replacement
}
request = self._generate_request(_SIGNED_DECIMAL_FRACTION_METHOD,
params)
response = self._send_request(request)
return self._extract_signed_response(response, self._extract_doubles)
def generate_signed_gaussians(self, n, mean, standard_deviation,
significant_digits):
"""
Generate digitally signed random numbers.
Request a list (size n) of true random numbers from a Gaussian
distribution (also known as a normal distribution). The form
uses a Box-Muller Transform to generate the Gaussian
distribution from uniformly distributed numbers. Returns a
dictionary object with the parsed random number list mapped to
'data', the original response mapped to 'random', and the
response's signature mapped to 'signature'. See:
https://api.random.org/json-rpc/1/signing#generateSignedGaussians
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random numbers you need. Must be within the
[1,1e4] range.
mean -- The distribution's mean. Must be within the [-1e6,1e6]
range.
standard_deviation -- The distribution's standard deviation.
Must be within the [-1e6,1e6] range.
significant_digits -- The number of significant digits to use.
Must be within the [2,20] range.
"""
params = {
'apiKey': self._api_key, 'n': n, 'mean': mean,
'standardDeviation': standard_deviation,
'significantDigits': significant_digits
}
request = self._generate_request(_SIGNED_GAUSSIAN_METHOD, params)
response = self._send_request(request)
return self._extract_signed_response(response, self._extract_doubles)
def generate_signed_strings(self, n, length, characters, replacement=True):
"""
Generate digitally signed random strings.
Request a list (size n) of true random strings from the server.
Returns a dictionary object with the parsed random string list
mapped to 'data', the original response mapped to 'random', and
the response's signature mapped to 'signature'. See:
https://api.random.org/json-rpc/1/signing#generateSignedStrings
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random strings you need. Must be within the
[1,1e4] range.
length -- The length of each string. Must be within the [1,20]
range. All strings will be of the same length.
characters -- A string that contains the set of characters that
are allowed to occur in the random strings. The maximum
number of characters is 80.
replacement -- Specifies whether the random strings should be
picked with replacement. If True the resulting list of
strings may contain duplicates, otherwise the strings will
all be unique (default True).
"""
params = {
'apiKey': self._api_key, 'n': n,
'length': length,
'characters': characters,
'replacement': replacement
}
request = self._generate_request(_SIGNED_STRING_METHOD, params)
response = self._send_request(request)
return self._extract_signed_response(response, self._extract_strings)
def generate_signed_UUIDs(self, n):
"""
Generate digitally signed random UUIDs.
Request a list (size n) of version 4 true random Universally
Unique IDentifiers (UUIDs) in accordance with section 4.4 of
RFC 4122, from the server. Returns a dictionary object with the
parsed random UUID list mapped to 'data', the original response
mapped to 'random', and the response's signature mapped to
'signature'. See:
https://api.random.org/json-rpc/1/signing#generateSignedUUIDs
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random UUIDs you need. Must be within the [1,1e3]
range.
"""
params = {'apiKey': self._api_key, 'n': n}
request = self._generate_request(_SIGNED_UUID_METHOD, params)
response = self._send_request(request)
return self._extract_signed_response(response, self._extract_UUIDs)
def generate_signed_blobs(self, n, size, format=_BLOB_FORMAT_BASE64):
"""
Generate digitally signed random BLOBs.
Request a list (size n) of Binary Large OBjects (BLOBs)
containing true random data from the server. Returns a
dictionary object with the parsed random BLOB list mapped to
'data', the original response mapped to 'random', and the
response's signature mapped to 'signature'. See:
https://api.random.org/json-rpc/1/signing#generateSignedBlobs
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
n -- How many random blobs you need. Must be within the [1,100]
range.
size -- The size of each blob, measured in bits. Must be within
the [1,1048576] range and must be divisible by 8.
format -- Specifies the format in which the blobs will be
returned. Values allowed are _BLOB_FORMAT_BASE64 and
_BLOB_FORMAT_HEX (default _BLOB_FORMAT_BASE64).
"""
params = {
'apiKey': self._api_key, 'n': n,
'size': size, 'format': format
}
request = self._generate_request(_SIGNED_BLOB_METHOD, params)
response = self._send_request(request)
return self._extract_signed_response(response, self._extract_blobs)
# Signature verification for signed methods, see:
# https://api.random.org/json-rpc/1/signing
def verify_signature(self, random, signature):
"""
Verify the signature of a previously received response.
Verify the signature of a response previously received from one
of the methods in the Signed API with the server. This is used
to examine the authenticity of numbers. Return True on
verification success. See:
https://api.random.org/json-rpc/1/signing#verifySignature
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
Keyword arguments:
random -- The random field from a response returned by
RANDOM.ORG through one of the Signed API methods.
signature -- The signature field from the same response that
the random field originates from.
"""
params = {'random': random, 'signature': signature}
request = self._generate_request(_VERIFY_SIGNATURE_METHOD, params)
response = self._send_request(request)
return self._extract_verification_response(response)
# Methods used to create a cache for any given randomness request.
def create_integer_cache(self, n, min, max, replacement=True,
cache_size=20):
"""
Get a RandomOrgCache to obtain random integers.
The RandomOrgCache can be polled for new results conforming to
the output format of the input request. See output of
generate_integers() for the return value of a poll on
RandomOrgCache.
Keyword arguments:
n -- How many random integers you need. Must be within the
[1,1e4] range.
min -- The lower boundary for the range from which the random
numbers will be picked. Must be within the [-1e9,1e9] range.
max -- The upper boundary for the range from which the random
numbers will be picked. Must be within the [-1e9,1e9] range.
replacement -- Specifies whether the random numbers should be
picked with replacement. If True the resulting numbers may
contain duplicate values, otherwise the numbers will all be
unique (default True).
cache_size -- Number of result-sets for the cache to try to
maintain at any given time (default 20, minimum 2).
"""
if cache_size < 2:
cache_size = 2
# if possible, make requests more efficient by bulk-ordering
# from the server. Either 5 sets of items at a time, or
# cache_size/2 if 5 >= cache_size.
if replacement:
bulk_n = cache_size/2 if 5 >= cache_size else 5
params = {
'apiKey': self._api_key, 'n' : bulk_n*n,
'min': min, 'max': max, 'replacement': replacement
}
# not possible to make the request more efficient
else:
bulk_n = 0
params = {
'apiKey': self._api_key, 'n': n,
'min': min, 'max': max,
'replacement': replacement
}
# get the request object for use in all requests from this cache
request = self._generate_request(_INTEGER_METHOD, params)
return RandomOrgCache(self._send_request, self._extract_ints,
request, cache_size, bulk_n, n)
def create_decimal_fraction_cache(self, n, decimal_places,
replacement=True, cache_size=20):
"""
Get a RandomOrgCache to obtain random decimal fractions.
The RandomOrgCache can be polled for new results conforming to
the output format of the input request. See output of
generate_decimal_fractions() for the return value of a poll on
RandomOrgCache.
Keyword arguments:
n -- How many random decimal fractions you need. Must be within
the [1,1e4] range.
decimal_places -- The number of decimal places to use. Must be
within the [1,20] range.
replacement -- Specifies whether the random numbers should be
picked with replacement. If True the resulting numbers may
contain duplicate values, otherwise the numbers will all be
unique (default True).
cache_size -- Number of result-sets for the cache to try to
maintain at any given time (default 20, minimum 2).
"""
if cache_size < 2:
cache_size = 2
# if possible, make requests more efficient by bulk-ordering
# from the server. Either 5 sets of items at a time, or
# cache_size/2 if 5 >= cache_size.
if replacement:
bulk_n = cache_size/2 if 5 >= cache_size else 5
params = {
'apiKey': self._api_key, 'n': bulk_n*n,
'decimalPlaces': decimal_places,
'replacement': replacement
}
# not possible to make the request more efficient
else:
bulk_n = 0
params = {
'apiKey': self._api_key, 'n': n,
'decimalPlaces': decimal_places,
'replacement': replacement
}
# get the request object for use in all requests from this cache
request = self._generate_request(_DECIMAL_FRACTION_METHOD, params)
return RandomOrgCache(self._send_request, self._extract_doubles,
request, cache_size, bulk_n, n)
def create_gaussian_cache(self, n, mean, standard_deviation,
significant_digits, cache_size=20):
"""
Get a RandomOrgCache to obtain random numbers.
The RandomOrgCache can be polled for new results conforming to
the output format of the input request. See output of
generate_gaussians() for the return value of a poll on
RandomOrgCache.
Keyword arguments:
n -- How many random numbers you need. Must be within the
[1,1e4] range.
mean -- The distribution's mean. Must be within the [-1e6,1e6]
range.
standard_deviation -- The distribution's standard deviation.
Must be within the [-1e6,1e6] range.
significant_digits -- The number of significant digits to use.
Must be within the [2,20] range.
cache_size -- Number of result-sets for the cache to try to
maintain at any given time (default 20, minimum 2).
"""
if cache_size < 2:
cache_size = 2
# make requests more efficient by bulk-ordering from the
# server. Either 5 sets of items at a time, or cache_size/2
# if 5 >= cache_size.
bulk_n = cache_size/2 if 5 >= cache_size else 5
params = {
'apiKey': self._api_key, 'n':bulk_n*n, 'mean': mean,
'standardDeviation': standard_deviation,
'significantDigits': significant_digits
}
# get the request object for use in all requests from this cache
request = self._generate_request(_GAUSSIAN_METHOD, params)
return RandomOrgCache(self._send_request, self._extract_doubles,
request, cache_size, bulk_n, n)
def create_string_cache(self, n, length, characters,
replacement=True, cache_size=20):
"""
Get a RandomOrgCache to obtain random strings.
The RandomOrgCache can be polled for new results conforming to
the output format of the input request. See output of
generate_strings() for the return value of a poll on
RandomOrgCache.
Keyword arguments:
n -- How many random strings you need. Must be within the
[1,1e4] range.
length -- The length of each string. Must be within the [1,20]
range. All strings will be of the same length.
characters -- A string that contains the set of characters that
are allowed to occur in the random strings. The maximum
number of characters is 80.
replacement -- Specifies whether the random strings should be
picked with replacement. If True the resulting list of
strings may contain duplicates, otherwise the strings will
all be unique (default True).
cache_size -- Number of result-sets for the cache to try to
maintain at any given time (default 20, minimum 2).
"""
if cache_size < 2:
cache_size = 2
# if possible, make requests more efficient by bulk-ordering
# from the server. Either 5 sets of items at a time, or
# cache_size/2 if 5 >= cache_size.
if replacement:
bulk_n = cache_size/2 if 5 >= cache_size else 5
params = {
'apiKey': self._api_key,
'n': bulk_n*n, 'length': length,
'characters': characters,
'replacement': replacement
}
# not possible to make the request more efficient
else:
bulk_n = 0
params = {
'apiKey': self._api_key,
'n': n, 'length': length,
'characters': characters,
'replacement': replacement
}
# get the request object for use in all requests from this cache
request = self._generate_request(_STRING_METHOD, params)
return RandomOrgCache(self._send_request, self._extract_strings,
request, cache_size, bulk_n, n)
def create_UUID_cache(self, n, cache_size=10):
"""
Get a RandomOrgCache to obtain random UUIDs.
The RandomOrgCache can be polled for new results conforming to
the output format of the input request. See output of
generate_UUIDs() for the return value of a poll on
RandomOrgCache.
Keyword arguments:
n -- How many random UUIDs you need. Must be within the [1,1e3]
range.
cache_size -- Number of result-sets for the cache to try to
maintain at any given time (default 10, minimum 2).
"""
if cache_size < 2:
cache_size = 2
# make requests more efficient by bulk-ordering
# from the server. Either 5 sets of items at a time, or
# cache_size/2 if 5 >= cache_size.
bulk_n = cache_size/2 if 5 >= cache_size else 5
params = {'apiKey': self._api_key, 'n': bulk_n*n}
# get the request object for use in all requests from this cache
request = self._generate_request(_UUID_METHOD, params)
return RandomOrgCache(self._send_request, self._extract_UUIDs,
request, cache_size, bulk_n, n)
def create_blob_cache(self, n, size, format=_BLOB_FORMAT_BASE64,
cache_size=10):
"""
Get a RandomOrgCache to obtain random blobs.
The RandomOrgCache can be polled for new results conforming to
the output format of the input request. See output of
generate_blobs() for the return value of a poll on
RandomOrgCache.
Keyword arguments:
n -- How many random blobs you need. Must be within the [1,100]
range.
size -- The size of each blob, measured in bits. Must be within
the [1,1048576] range and must be divisible by 8.
format -- Specifies the format in which the blobs will be
returned. Values allowed are _BLOB_FORMAT_BASE64 and
_BLOB_FORMAT_HEX (default _BLOB_FORMAT_BASE64).
cache_size -- Number of result-sets for the cache to try to
maintain at any given time (default 10, minimum 2).
"""
if cache_size < 2:
cache_size = 2
# make requests more efficient by bulk-ordering
# from the server. Either 5 sets of items at a time, or
# cache_size/2 if 5 >= cache_size.
bulk_n = cache_size/2 if 5 >= cache_size else 5
params = {
'apiKey': self._api_key, 'n': bulk_n*n,
'size': size, 'format': format
}
# get the request object for use in all requests from this cache
request = self._generate_request(_BLOB_METHOD, params)
return RandomOrgCache(self._send_request, self._extract_blobs,
request, cache_size, bulk_n, n)
# Methods for accessing server usage statistics
def get_requests_left(self):
"""
Get remaining requests.
Return the (estimated) number of remaining API requests
available to the client. If cached usage info is older than
_ALLOWANCE_STATE_REFRESH_SECONDS fresh info is obtained from
server. If fresh info has to be obtained the following
exceptions can be raised.
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
"""
if self._requests_left is None or \
time.clock() > self._last_response_received_time + \
_ALLOWANCE_STATE_REFRESH_SECONDS:
self._get_usage()
return self._requests_left
def get_bits_left(self):
"""
Get remaining bits.
Return the (estimated) number of remaining true random bits
available to the client. If cached usage info is older than
_ALLOWANCE_STATE_REFRESH_SECONDS fresh info is obtained from
server. If fresh info has to be obtained the following
exceptions can be raised.
Raises a RandomOrgSendTimeoutError if time spent waiting before
request is sent exceeds this instance's blocking_timeout.
Raises a RandomOrgKeyNotRunningError if this API key is stopped.
Raises a RandomOrgInsufficientRequestsError if this API key's
server requests allowance has been exceeded and the instance is
backing off until midnight UTC.
Raises a RandomOrgInsufficientBitsError if this API key's
server bits allowance has been exceeded.
Raises a ValueError on RANDOM.ORG Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Raises a RuntimeError on JSON-RPC Errors, error descriptions:
https://api.random.org/json-rpc/1/error-codes
Can also raise connection errors as described here:
http://docs.python-requests.org/en/v2.0-0/user/quickstart/#errors-and-exceptions
"""
if self._bits_left is None or \
time.clock() > self._last_response_received_time + \
_ALLOWANCE_STATE_REFRESH_SECONDS:
self._get_usage()
return self._bits_left
# Private methods for class operation.
def _send_unserialized_request(self, request):
# Send request immediately.
data = self._send_request_core(request)
# Raise any thrown exceptions.
if 'exception' in data:
raise data['exception']
# Return response.
return data['response']
def _send_serialized_request(self, request):
# Add request to the queue with it's own Condition lock.
lock = threading.Condition()
lock.acquire()
data = {
'lock': lock, 'request': request,
'response': None, 'exception': None
}
self._serialized_queue.put(data)
# Wait on the Condition for the specified blocking timeout.
lock.wait(timeout=None if self._blocking_timeout == -1 else
self._blocking_timeout)
# Lock has now either been notified or timed out.
# Examine data to determine which and react accordingly.
# Request wasn't sent in time, cancel and raise exception.
if data['response'] is None and data['exception'] is None:
data['request'] = None
lock.release()
raise RandomOrgSendTimeoutError(
'The defined maximum allowed blocking time of '
'{0}s has been exceeded while waiting for a synchronous '
'request to send.'.format(
str(self._blocking_timeout))
)
# Exception on sending request.
if data['exception'] is not None:
lock.release()
raise data['exception']
# Request was successful.
lock.release()
return data['response']
def _threaded_request_sending(self):
# Thread to execute queued requests.
while True:
# Block and wait for a request.
request = self._serialized_queue.get(block=True)
# Get the request's lock to indicate request in progress.
lock = request['lock']
lock.acquire()
# If request still exists it hasn't been cancelled.
if request['request'] is not None:
# Send request.
data = self._send_request_core(request['request'])
# Set result.
if 'exception' in data:
request['exception'] = data['exception']
else:
request['response'] = data['response']
# Notify completion and return
lock.notify()
lock.release()
def _send_request_core(self, request):
# If a backoff is set, no more requests can be issued until the
# required backoff time is up.
if self._backoff is not None:
# Time not yet up, throw exception.
if datetime.utcnow() < self._backoff:
return {
'exception':
RandomOrgInsufficientRequestsError(self._backoff_error)
}
# Time is up, clear backoff.
else:
self._backoff = None
self._backoff_error = None
# Check server advisory delay.
self._advisory_delay_lock.acquire()
wait = self._advisory_delay - (
time.clock() - self._last_response_received_time
)
self._advisory_delay_lock.release()
# Wait the specified delay if necessary and if wait time is not
# longer than the set blocking_timeout.
if wait > 0:
if self._blocking_timeout != -1 and wait > self._blocking_timeout:
return {
'exception':
RandomOrgSendTimeoutError(
'The server advisory delay of {0}s is greater '
'than the defined maximum allowed '
'blocking time of {1}s.'.format(
str(wait), str(self._blocking_timeout))
)
}
time.sleep(wait)
# Send the request & parse the response.
response = requests.post('https://api.random.org/json-rpc/2/invoke',
data=json.dumps(request),
headers={'content-type': 'application/json'},
timeout=self._http_timeout)
data = response.json()
if 'error' in data:
code = int(data['error']['code'])
message = data['error']['message']
# RuntimeError, error codes listed under JSON-RPC Errors:
# https://api.random.org/json-rpc/1/error-codes
if code in [-32700] + list(
range(-32603, -32600)) + list(range(-32099, -32000)):
return {
'exception':
RuntimeError(
'Error {0}: {1}'.format(
str(code), message)
)
}
# RandomOrgKeyNotRunningError, API key not running, from
# RANDOM.ORG Errors: https://api.random.org/json-rpc/1/error-codes
elif code == 401:
return {
'exception': RandomOrgKeyNotRunningError(
'Error {0}: {1}'.format(
str(code), message)
)
}
# RandomOrgInsufficientRequestsError, requests allowance
# exceeded, backoff until midnight UTC, from RANDOM.ORG
# Errors: https://api.random.org/json-rpc/1/error-codes
elif code == 402:
self._backoff = datetime.utcnow().replace(
day=datetime.utcnow().day+1, hour=0,
minute=0, second=0, microsecond=0
)
self._backoff_error = 'Error {0}: {1}'.format(str(code),
message)
return {
'exception':
RandomOrgInsufficientRequestsError(self._backoff_error)
}
# RandomOrgInsufficientBitsError, bits allowance exceeded,
# from RANDOM.ORG Errors:
# https://api.random.org/json-rpc/1/error-codes
elif code == 403:
return {
'exception': RandomOrgInsufficientBitsError(
'Error {0}: {1}'.format(
str(code), message)
)
}
# ValueError, error codes listed under RANDOM.ORG Errors:
# https://api.random.org/json-rpc/1/error-codes
else:
return {
'exception': ValueError('Error {0}: {1}'.format(
str(code), message)
)
}
# Update usage stats
if 'requestsLeft' in data['result']:
self._requests_left = int(data['result']['requestsLeft'])
self._bits_left = int(data['result']['bitsLeft'])
# Set new server advisory delay
self._advisory_delay_lock.acquire()
if 'advisoryDelay' in data['result']:
# Convert millis to decimal seconds.
# Change long to int for Python 3
self._advisory_delay = int(data['result']['advisoryDelay']) / 1000.0
else:
# Use default if none from server.
self._advisory_delay = _DEFAULT_DELAY
self._last_response_received_time = time.clock()
self._advisory_delay_lock.release()
return { 'response': data }
def _get_usage(self):
# Issue a getUsage request to update bits and requests left.
params = { 'apiKey':self._api_key }
request = self._generate_request(_GET_USAGE_METHOD, params)
response = self._send_request(request)
def _generate_request(self, method, params):
# Base json request.
return {
'jsonrpc': '2.0', 'method': method,
'params': params, 'id': uuid.uuid4().hex
}
def _extract_response(self, response):
# Gets random data.
return response['result']['random']['data']
def _extract_signed_response(self, response, extract_function):
# Gets all random data and signature.
return { 'data':extract_function(response),
'random':response['result']['random'],
'signature':response['result']['signature'] }
def _extract_verification_response(self, response):
# Gets verification boolean.
return bool(response['result']['authenticity'])
def _extract_ints(self, response):
# json to integer list.
return list(map(int, self._extract_response(response)))
def _extract_doubles(self, response):
# json to double list.
return list(map(float, self._extract_response(response)))
def _extract_strings(self, response):
# json to string list (no change).
return self._extract_response(response)
def _extract_UUIDs(self, response):
# json to UUID list.
return list(map(uuid.UUID, self._extract_response(response)))
def _extract_blobs(self, response):
# json to blob list (no change).
return self._extract_response(response)
|
app.py | #!/app/env/bin/python
import http.server
import logging
import socket
from os import environ
from threading import Thread
from kubernetes import client
from kubernetes import config
logging.basicConfig(
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.DEBUG,
)
class AppHTTPHandler(http.server.BaseHTTPRequestHandler):
""" customized http handler"""
def _set_response(self, content_type: str = 'text/html'):
self.send_response(200)
self.send_header('Content-type', content_type)
self.end_headers()
def do_GET(self):
""" manage get requests """
content_type = 'text/plain'
if self.path == '/pods':
response = get_all_pods()
elif self.path == '/me':
response = socket.gethostbyname(socket.gethostname())
elif self.path == '/version':
response = environ.get('APP_VERSION', '')
elif self.path == '/health':
env = environ.get('ENV', False)
response = f'OK {env}' if env else 'OK'
else:
response = ''
self._set_response(content_type)
if isinstance(response, str):
response = response.encode('utf-8')
self.wfile.write(response)
def run_http_server(server_class=http.server.HTTPServer, handler_class=AppHTTPHandler):
""" run http server """
port = int(environ.get('SERVER_PORT', 8080))
logging.debug('going to start http server')
server_address = ('', port)
server = server_class(server_address, handler_class)
tread = Thread(target=server.serve_forever, daemon=False)
tread.start()
logging.info(
'http server started on %s:%s',
socket.gethostbyname(socket.gethostname()), port,
)
def get_all_pods():
config.load_incluster_config()
v1 = client.CoreV1Api()
logging.info('getting all pods')
ret = v1.list_pod_for_all_namespaces(watch=False)
return '\n'.join(
[
f'{i.status.pod_ip}\t{i.metadata.namespace}\t{i.metadata.name}'
for i in ret.items
],
)
if __name__ == '__main__':
run_http_server()
|
app_opencv.py | """
通过实时截屏对比左侧8个指纹切片和右侧指纹图匹配度的方法
"""
from winsound import Beep
from PIL import ImageGrab
import keyboard
import threading
import time
import os
import configparser
import cv2
import numpy as np
class FingerprinterHack:
def __init__(self, mode="1"):
effest = 3
self.__xOy = (
(481 + effest, 277 + effest, 587 - effest, 383 - effest),
(625 + effest, 277 + effest, 731 - effest, 383 - effest),
(481 + effest, 421 + effest, 587 - effest, 527 - effest),
(625 + effest, 421 + effest, 731 - effest, 527 - effest),
(481 + effest, 565 + effest, 587 - effest, 671 - effest),
(625 + effest, 565 + effest, 731 - effest, 671 - effest),
(481 + effest, 709 + effest, 587 - effest, 815 - effest),
(625 + effest, 709 + effest, 731 - effest, 815 - effest),
)
self.__mode = mode
self.__onoff = False
self.__confirmationImg = "confirmation.png"
self.__key_press_delay = 0.05
self.__key_release_delay = 0.05
self.__save_screenshot = False
self.__currentGroupIndex = 99
self.__pos = list()
self.__threadPool = list()
@property
def key_press_delay(self):
return self.__key_press_delay
@key_press_delay.setter
def key_press_delay(self, value: float):
self.__key_press_delay = value / 1000
@property
def key_release_delay(self):
return self.__key_release_delay
@key_release_delay.setter
def key_release_delay(self, value: float):
self.__key_release_delay = value / 1000
@property
def confirmationImg(self):
return self.__confirmationImg
@confirmationImg.setter
def confirmationImg(self, imgPath: str):
self.__confirmationImg = imgPath
@property
def save_screenshot(self):
return self.__save_screenshot
@save_screenshot.setter
def save_screenshot(self, status: bool):
self.__save_screenshot = status
def onoff(self, status: bool):
"""设置开关状态\n
status bool: True开启|False关闭\n
"""
self.__onoff = status
if status is True:
Beep(1600, 600)
else:
Beep(600, 600)
def main_thread_cv2(self):
"""主线程cv2方法"""
while True:
if self.__onoff is False:
time.sleep(0.1)
continue
elif self.mathing_confirmation() < 0.1:
self.__currentGroupIndex = 0
im = ImageGrab.grab()
if self.__save_screenshot is True:
im.save(f"screenshot{self.__currentGroupIndex}.png")
self.cv2screen(cv2.cvtColor(np.asanyarray(im), cv2.COLOR_RGB2GRAY))
for pos in self.__pos:
dis = pos - self.__currentGroupIndex
self.send_key("s", dis // 2)
self.send_key("d", dis % 2)
self.send_key("enter", 1)
self.send_key("tab", 1)
self.__currentGroupIndex = pos
if self.__mode == "1":
self.onoff(False)
else:
time.sleep(4.15)
def send_key(self, key, count: int = 1):
"""发送键盘按键指令\n
key str: 键盘名\n
count int: 重复次数\n
"""
for _ in range(count):
keyboard.press(key)
time.sleep(self.__key_press_delay)
keyboard.release(key)
time.sleep(self.__key_release_delay)
def mathing_fingerprint(self, im_src, fingerprint, index: int):
"""匹配指纹获取格子索引\n
im_src: 屏幕截图序列\n
fingerprint: 指纹图序列\n
index int: 格子位置索引\n
"""
template_src = im_src[self.__xOy[index][1] : self.__xOy[index][3], self.__xOy[index][0] : self.__xOy[index][2]]
template = cv2.resize(template_src, [129, 129])
minV = cv2.minMaxLoc(cv2.matchTemplate(fingerprint, template, cv2.TM_SQDIFF_NORMED))[0]
if minV < 0.3:
self.__pos.append(index)
def mathing_confirmation(self):
"""当前截屏判断"""
templ = cv2.imread(self.__confirmationImg, 0)
im = cv2.cvtColor(np.asanyarray(ImageGrab.grab()), cv2.COLOR_RGB2GRAY)[133:163, 470:515]
res = cv2.matchTemplate(im, templ, cv2.TM_SQDIFF_NORMED)
return cv2.minMaxLoc(res)[0]
def cv2screen(self, screenshot):
"""cv2多线程扫描\n
screenshot numpy.array: 序列化的图片\n
"""
fingerprint = screenshot[130:690, 950:1340]
self.__pos = list()
for i in range(8):
th = threading.Thread(
target=self.mathing_fingerprint,
args=(
screenshot,
fingerprint,
i,
),
)
self.__threadPool.append(th)
th.start()
for th in self.__threadPool:
th.join()
self.__pos.sort()
def status_thread(self):
"""开关控制线程"""
keyboard.add_hotkey("f", self.onoff, args=(True,)) # 开启
keyboard.add_hotkey("q", self.onoff, args=(False,)) # 关闭
def run(self):
threading.Thread(target=self.status_thread).start()
threading.Thread(target=self.main_thread_cv2).start()
def get_input():
return input("\r选择模式:[1]手动触发执行扫描 [2]自动实时扫描 (输入1或2回车)\n输入:").strip()
def main():
while True:
mode = get_input()
if mode in ("1", "2"):
os.system("cls")
break
else:
print(f"输入有误,[{mode}]")
fgh = FingerprinterHack(mode)
try:
with open("config.ini", "r", encoding="utf-8") as f:
configEncoding = "utf-8"
except:
with open("config.ini", "r", encoding="gbk") as f:
configEncoding = "gbk"
try:
config = configparser.ConfigParser()
config.read("config.ini", encoding=configEncoding)
key_press_delay = config.getint("setting", "key_press_delay")
key_release_delay = config.getint("setting", "key_release_delay")
confirmationImg = config.get("setting", "confirmation_image")
save_screenshot = config.getboolean("setting", "save_screenshot")
fgh.key_press_delay = key_press_delay
fgh.key_release_delay = key_release_delay
fgh.confirmationImg = confirmationImg
fgh.save_screenshot = save_screenshot
except Exception as e:
print(e)
fgh.run()
print(
"""需要全屏1920*1080分辨率模式\n
\r按 F 执行扫描屏幕
\r按 Q 暂停扫描
"""
)
if mode == "1":
print("当前模式:手动触发扫描")
else:
print("当前模式:自动实时扫描")
if __name__ == "__main__":
main()
|
CameraDaemon.py | #!/usr/bin/python
# To start the Camera Daemon:
# python3 CameraDaemon.py start
#
# PiCamera documentation: http://picamera.readthedocs.io/en/release-1.10/api_camera.html
import logging
import time
import socket
import threading
import os
import datetime
import io
import atexit
import argparse
from picamera import PiCamera
import daemon
from daemon import pidfile
from DaemonBase import DaemonBase
class RpiCameraDaemon(DaemonBase):
def __init__(self, port, *args, extra2=None, **kwargs):
super().__init__(*args, **kwargs)
self.port = port
self.recording = False
self.camera = None
def setup_logging(self):
self.logger = logging.getLogger(self.name)
self.logger.setLevel(logging.INFO)
fh = logging.FileHandler(self.log_file)
fh.setLevel(logging.INFO)
formatstr = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(formatstr)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def run(self):
self.setup_logging()
try:
self.camera = PiCamera()
except Exception as ex:
self.logger.error( f"Failed to start Raspberry Pi Camera:\n{ex}" )
server = socket.socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(("127.0.0.1",self.port))
server.listen(5)
self.logger.info("Starting up")
self.logger.info(f"Listenting to port {self.port}")
atexit.register(self.stopCamera)
while True:
conn, address = server.accept()
thread = threading.Thread(target=self.handle_client, args=[conn])
thread.daemon = True
thread.start()
def handle_client(self, sock):
for line in sock.makefile('r'):
self.logger.info(line)
if line.startswith( 'video_start', 0, len('video_start') ):
self.startVideo( line[len('video_start '):] )
elif line.startswith( 'video_stop', 0, len('video_stop') ):
self.endVideo( line[len('video_stop '):] )
elif line.startswith( 'set ', 0, len('set ') ):
self.adjust( line[len('set '):] )
else:
jpeg_byte_string = self.getImage2()
sock.sendall( jpeg_byte_string )
sock.close()
#self.logger.info( "Writing camera image" )
def stopCamera(self):
if self.camera:
if self.camera.recording:
self.camera.stop_recording()
self.camera.close()
self.logger.info("Shutting down")
def getImage2(self):
my_stream = io.BytesIO()
self.camera.capture(my_stream, 'jpeg')
my_stream.seek(0)
image_string = my_stream.read(-1)
return image_string
def adjust(self, args):
try:
attr, value = args.split(' ')
if attr == 'brightness':
value = int(value)
if value > 0 and value <= 100:
print( "setting brightness: " + str(value) )
self.camera.brightness = value
elif attr == 'shutter_speed':
value = int(value)
self.camera.shutter_speed = value
print( "setting shutter speed: " + str(value) )
elif attr == 'iso':
value = int(value)
self.camera.iso = value
print( "setting iso: " + str(value) )
elif attr == 'framerate':
value = int(value)
self.camera.framerate = value
print( "setting framerate: " + str(value) )
except Exception as ex:
print( "Exception: {}".format( ex ) )
def startVideo(self, filename):
if self.camera.recording:
self.camera.stop_recording()
filename = os.path.basename(filename)
if not filename:
filename = "malpi.h264"
if not filename.endswith(".h264"):
filename += ".h264"
filename = os.path.join("/var/ramdrive", filename)
#Other possible options
#camera.annotate_text = "Hello world!"
#camera.brightness = 50 #0-100
#camera.contrast = 50 #0-100
self.camera.resolution = (640, 480)
self.camera.framerate = 15
self.camera.start_recording(filename)
self.recording = True
def endVideo(self, filename):
if self.camera.recording:
self.camera.stop_recording()
if __name__ == "__main__":
test = RpiCameraDaemon( 12346, "RpiCameraDaemon", verbose=False)
parser = test.createArgsParser(description="RPi Camera Daemon")
# add any extra command line arguments here
# the results can be accessed via self.args after handleArgs is called
test.handleArgs()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.