source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
data_loader.py
|
import numpy as np
import time
import random
import torch.nn as nn
import torch
import math
import os
import sys
sys.path.insert(0, 'data_util')
import pickle
from torch.multiprocessing import Process, Queue
from torch.autograd import Variable
from formula import NodeType, Node
from utils import split_list
COMM_OP = {"/\\:c", "\\/:c", "+:c", "*:c", "==:c", "|-:c"} # Special treatment for |-
# Input data format
# [onehot, index1, index2, mat]
# onehot: Tensor (num_nodes*input_dim) one hot from dict
# index1: Long Tensor (num_pairs) (source)
# index2: Long Tensor (num_pairs) (target)
# mat: Tensor for index add (num_nodes * num_pairs)
class DataLoader(object):
def __init__(self,
formula_path,
dict_path,
separate_conj_stmt=False,
binary=False,
part_no=-1,
part_total=0,
file_list=None,
deepmath=False,
norename=False,
filter_abelian=False,
compatible=False): # part_no, part_total: will not shuffle.
self.formula_path = formula_path
self.dict_path = dict_path
self.maxsize = 500 # maxsize for async queue
self.iter_ = 0 # epoch. Legacy reason for its name
self.total_in_epoch = -1 # conj, stmt pairs supply in current epoch.
self.total_iter = -1 # total iteration
self.rename = not norename
if not os.path.exists(dict_path):
self.dict = self.build_dictionary()
else:
self.dict = torch.load(dict_path)
self.queue = Queue(self.maxsize)
self.reader = Process(target=self.read)
self.dict_size = len(self.dict.keys())
self.separate_conj_stmt = separate_conj_stmt
self.binary = binary
self.part_no = part_no
self.part_total = part_total
if file_list is None:
file_list = os.listdir(self.formula_path)
if part_total != 0:
file_list.sort()
file_list = split_list(file_list, part_total, part_no)
else:
if part_total != 0:
file_list = split_list(file_list, part_total, part_no)
self.file_list = file_list
self.deepmath = deepmath
self.filter_abelian = filter_abelian
self.compatible = compatible
def start_reader(self):
self.reader.daemon = True
self.reader.start()
def next_batch(self):
# [conjecture, statement, label, conj_binary, stmt_binary]
data = self.queue.get()
if data is None:
self.iter_ += 1
self.total_in_epoch = 0
else:
self.total_in_epoch += 1
self.total_iter += 1
return data
def build_dictionary(self):
def _deter_name(node):
node_name = node.name
if node.type == NodeType.VAR:
node_name = 'VAR'
elif node.type == NodeType.VARFUNC:
node_name == 'VARFUNC'
return node_name
files = os.listdir(self.formula_path)
tokens = set({})
dicts = {}
for i, a_file in enumerate(files):
with open(os.path.join(self.formula_path, a_file), 'rb') as f:
print('Loading file {}/{}'.format(i + 1, len(files)))
dataset = pickle.load(f)
for j, pair in enumerate(dataset):
print('Processing pair {}/{}'.format(j + 1, len(dataset)))
if self.rename:
tokens.update([_deter_name(x) for x in pair[1]])
tokens.update([_deter_name(x) for x in pair[2]])
else:
tokens.update([x.name for x in pair[1]])
tokens.update([x.name for x in pair[2]])
for i, x in enumerate(tokens):
dicts[x] = i
dicts['UNKNOWN'] = len(dicts)
if 'VAR' not in dicts:
dicts['VAR'] = len(dicts)
if 'VARFUNC' not in dicts:
dicts['VARFUNC'] = len(dicts)
torch.save(dicts, self.dict_path)
return dicts
def _decide_name(self, node):
node_name = node.name
if self.rename:
if node.type == NodeType.VAR:
node_name = 'VAR'
elif node.type == NodeType.VARFUNC:
node_name == 'VARFUNC'
if node_name not in self.dict:
node_name = 'UNKNOWN'
return node_name
def generate_one_sentence(self, sentence):
# Undirected graph
# index1 starts, index2 ends
index1 = []
index2 = []
onehot_collect = []
id2pos = {node.id: i for i, node in enumerate(sentence)}
for i, node in enumerate(sentence):
for x in node.incoming:
index1.append(id2pos[x.id])
index2.append(id2pos[node.id])
for x in node.outgoing:
index1.append(id2pos[x.id])
index2.append(id2pos[node.id])
node_name = self._decide_name(node)
onehot_collect.append(self.dict[node_name])
index1 = np.array(index1)
index2 = np.array(index2)
mat = np.zeros((len(sentence), len(index2)), dtype=np.float32)
for x in sentence:
mat[id2pos[x.id], index2 == id2pos[x.id]] = 1.0 / np.sum(
index2 == id2pos[x.id])
if self.compatible:
onehot = np.zeros((len(sentence), self.dict_size), dtype=np.float32)
onehot[range(len(sentence)), onehot_collect] = 1
index1 = torch.from_numpy(index1)
index2 = torch.from_numpy(index2)
if self.compatible:
onehot = torch.from_numpy(onehot)
else:
onehot = torch.LongTensor(onehot_collect)
mat = torch.from_numpy(mat)
return (onehot, index1, index2, mat)
def directed_generate_one_sentence(self, sentence):
# Distinguish in-edges and out-edges
# index1 starts, index2 ends
iindex1 = []
iindex2 = []
oindex1 = []
oindex2 = []
id2pos = {node.id: i for i, node in enumerate(sentence)}
onehot_collect = []
for node in sentence:
for x in node.incoming:
iindex1.append(id2pos[x.id])
iindex2.append(id2pos[node.id])
for x in node.outgoing:
oindex1.append(id2pos[node.id])
oindex2.append(id2pos[x.id])
node_name = self._decide_name(node)
onehot_collect.append(self.dict[node_name])
# Incoming
iindex1 = np.array(iindex1)
iindex2 = np.array(iindex2)
oindex1 = np.array(oindex1)
oindex2 = np.array(oindex2)
imat = np.zeros((len(sentence), len(iindex2)), dtype=np.float32)
omat = np.zeros((len(sentence), len(oindex1)), dtype=np.float32)
for x in sentence:
imat[id2pos[x.id], iindex2 == id2pos[x.id]] = 1.0 / (
np.sum(oindex1 == id2pos[x.id]) + np.sum(iindex2 == id2pos[x.id]))
# Outgoing
for x in sentence:
omat[id2pos[x.id], oindex1 == id2pos[x.id]] = 1.0 / (
np.sum(oindex1 == id2pos[x.id]) + np.sum(iindex2 == id2pos[x.id]))
if self.compatible:
onehot = np.zeros((len(sentence), self.dict_size), dtype=np.float32)
onehot[range(len(sentence)), onehot_collect] = 1
iindex1 = torch.from_numpy(iindex1)
iindex2 = torch.from_numpy(iindex2)
oindex1 = torch.from_numpy(oindex1)
oindex2 = torch.from_numpy(oindex2)
if self.compatible:
onehot = torch.from_numpy(onehot)
else:
onehot = torch.LongTensor(onehot_collect)
imat = torch.from_numpy(imat)
omat = torch.from_numpy(omat)
return (onehot, iindex1, iindex2, imat, oindex1, oindex2, omat)
def generate_one_sentence_binary(self, sentence):
# directed graph
index = []
id2pos = {node.id: i for i, node in enumerate(sentence)}
for node in sentence:
if len(node.outgoing) > 1 and not (self.filter_abelian and node.name in COMM_OP):
for i, n1 in enumerate(node.outgoing):
for n2 in node.outgoing[i + 1:]:
index.append(id2pos[node.id])
index.append(id2pos[n1.id])
index.append(id2pos[n2.id])
if len(node.outgoing) > 1 and (self.filter_abelian and node.name == '|-:c'):
for n1 in node.outgoing[1:]:
index.append(id2pos[node.id])
index.append(id2pos[node.outgoing[0].id])
index.append(id2pos[n1.id])
index = np.array(index)
mat = np.zeros((len(sentence), len(index)), dtype=np.float32)
for x in sentence:
f = index == id2pos[x.id]
if np.sum(f) > 0:
mat[id2pos[x.id], f] = 1.0 / np.sum(f)
#print (index.shape, mat.shape)
if index.shape[0] > 0:
return (torch.from_numpy(index.reshape(-1, 3).T), torch.from_numpy(mat))
else:
#print (index.shape, mat.shape)
return (torch.Tensor(1), torch.Tensor(1))
def read(self):
files = self.file_list
while True:
random.shuffle(files)
for a_file in files:
with open(os.path.join(self.formula_path, a_file), 'rb') as f:
content = pickle.load(f)
random.shuffle(content)
for x in content:
flag, conj, stmt = x
if self.separate_conj_stmt:
self.queue.put(
(self.directed_generate_one_sentence(conj),
self.directed_generate_one_sentence(stmt), flag))
elif self.binary:
self.queue.put(
(self.directed_generate_one_sentence(conj),
self.directed_generate_one_sentence(stmt), flag,
self.generate_one_sentence_binary(conj),
self.generate_one_sentence_binary(stmt)))
else:
self.queue.put((self.generate_one_sentence(conj),
self.generate_one_sentence(stmt), flag))
self.queue.put(None)
def destruct(self):
self.reader.terminate()
|
start_subscribers.py
|
from project.model.subscriber.baby_monitor_subscriber import BabyMonitorSubscriber
from project.model.subscriber.smartphone_subscriber import SmartphoneSubscriber
from project.model.subscriber.smart_tv_subscriber import SmartTvSubscriber
from multiprocessing import Process
from time import sleep
subscriber_list = []
subscriber_list.append(BabyMonitorSubscriber())
subscriber_list.append(SmartphoneSubscriber('babymonitor'))
subscriber_list.append(SmartphoneSubscriber('smart_tv'))
subscriber_list.append(SmartTvSubscriber())
# execute
process_list = []
for sub in subscriber_list:
process = Process(target=sub.run)
process.start()
process_list.append(process)
sleep(1)
|
collect.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import sys
import threading
import time
from os_performance_tools.collectors import _delta
from os_performance_tools.collectors import mysql
from os_performance_tools.collectors import queues
from subunit import v2 as subunit_v2
mysql_data = {}
queues_data = {}
def get_mysql():
global mysql_data
mysql_data = mysql.collect()
def get_queues():
global queues_data
queues_data = queues.collect()
def main(argv=None, stdout=None):
if stdout is None:
stdout = sys.stdout
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('--loglevel', default=logging.INFO)
parser.add_argument('--delta', help="Path to json file to read previous "
"values from")
parser.add_argument('--subunit', nargs='?', default=None,
const='counters.json',
help="Wrap the json output in a subunit stream. If an "
"argument is passed used that as the filename, "
"otherwise 'counters.json' will be used")
parser.add_argument('--output', help="Write JSON here. Does not disable "
"stdout.")
parser.add_argument('--meta-prefix', help="Set a prefix in __meta__")
args = parser.parse_args(argv[1:])
logging.basicConfig(
format='%(asctime)-15s %(levelname)s %(threadName)s: %(message)s')
log = logging.getLogger()
log.setLevel(args.loglevel)
getmysql = threading.Thread(name='mysql', target=get_mysql)
getqueues = threading.Thread(name='queues', target=get_queues)
getmysql.start()
getqueues.start()
log.debug('waiting for threads')
getmysql.join()
getqueues.join()
log.debug('threads all returned')
meta = {'unixtime': time.time()}
if args.meta_prefix:
meta['prefix'] = args.meta_prefix
collected = {
'__meta__': meta,
'mysql': mysql_data,
'queues': queues_data,
}
if args.delta:
collected = _delta.delta_with_file(args.delta, collected)
content = json.dumps(collected, indent=1, sort_keys=True).encode('utf-8')
if args.subunit is not None:
file_name = args.subunit or 'counters.json'
stream = subunit_v2.StreamResultToBytes(stdout)
stream.startTestRun()
stream.status(file_name=file_name, file_bytes=content,
mime_type='application/json')
stream.stopTestRun()
else:
stdout.write(content)
stdout.write(b"\n")
if args.output:
with open(args.output, 'wb') as output:
output.write(content)
output.write(b"\n")
if __name__ == '__main__':
main()
|
agent.py
|
from __future__ import annotations
import asyncio
import io
import json
import time
from threading import Thread
from typing import Dict, IO, Iterator, List, Optional, Tuple
import aiohttp
import requests
from botovod.agents import Agent, Attachment, Chat, Keyboard, Location, Message
from .types import (TelegramAttachment, TelegramCallback, TelegramChat, TelegramInlineKeyboard,
TelegramKeyboard, TelegramMessage, TelegramUser)
class Requester:
BASE_URL = "https://api.telegram.org/bot{token}/{method}"
FILE_URL = "https://api.telegram.org/file/bot{token}/{path}"
def __init__(self, logger):
self.logger = logger
def do_method(self, token: str, method: str, payload: Optional[dict] = None,
files: Optional[Dict[str, IO]] = None):
url = self.BASE_URL.format(token=token, method=method)
response = requests.post(url, data=payload, files=files)
data = response.json()
if data["ok"]:
return data["result"]
async def a_do_method(self, token: str, method: str, payload: Optional[dict] = None,
files: Optional[Dict[str, IO]] = None):
url = self.BASE_URL.format(token=token, method=method)
if payload is not None and files is not None:
payload.update(files)
async with aiohttp.ClientSession() as session:
async with session.post(url, data=payload) as response:
data = await response.json()
if data["ok"]:
return data["result"]
def get_file(self, token: str, path: str):
url = self.FILE_URL.format(token=token, path=path)
response = requests.get(url)
response.raise_for_status()
return response.content
async def a_get_file(self, token: str, path: str):
url = self.FILE_URL.format(token=token, path=path)
async with aiohttp.ClientSession(raist_for_status=True) as session:
async with session.get(url) as response:
return await response.read()
class TelegramAgent(Agent):
WEBHOOK = "webhook"
POLLING = "polling"
def __init__(self, token: str, method: str = POLLING, delay: int = 5,
webhook_url: Optional[str] = None, certificate_path: Optional[str] = None):
super().__init__()
self.requester = Requester(logger=self.logger)
self.token = token
self.method = method
if method == self.POLLING:
self.delay = delay
self.thread = None
elif webhook_url is None:
raise ValueError("Need set webhook_url")
else:
self.webhook_url = webhook_url
self.certificate_path = certificate_path
self.last_update = 0
def start(self):
self.set_webhook()
self.running = True
if self.method == self.POLLING:
self.thread = Thread(target=self.polling)
self.thread.start()
self.logger.info("Started %s by %s.", self.name, self.method)
self.thread.join()
async def a_start(self):
await self.a_set_webhook()
self.running = True
if self.method == self.POLLING:
asyncio.get_running_loop().create_task(self.a_polling())
self.logger.info("Started %s by %s.", self.name, self.method)
def stop(self):
if self.method == self.POLLING:
self.thread.join()
self.thread = None
self.running = False
self.logger.info("Agent %s stopped.", self.name)
async def a_stop(self):
self.running = False
self.logger.info("Agent %s stopped.", self.name)
def parser(self, headers: Dict[str, str],
body: str) -> List[Tuple[Chat, Message]]:
update = json.loads(body)
messages = []
if update["update_id"] <= self.last_update:
return messages
self.last_update = update["update_id"]
if "message" in update:
chat = TelegramChat.parse(agent=self, data=update["message"]["chat"])
message = TelegramMessage.parse(data=update["message"], agent=self)
messages.append((chat, message))
if "callback_query" in update:
data = update["callback_query"]
if "message" in data:
chat = TelegramChat.parse(agent=self, data=data["message"]["chat"])
else:
chat = TelegramUser.parse(agent=self, data=data["from"])
message = TelegramCallback.parse(data=update["callback_query"])
messages.append((chat, message))
return messages
async def a_parser(self, headers: Dict[str, str],
body: str) -> List[Tuple[Chat, Agent]]:
update = json.loads(body)
messages = []
if update["update_id"] <= self.last_update:
return messages
self.last_update = update["update_id"]
if "message" in update:
chat = TelegramChat.parse(agent=self, data=update["message"]["chat"])
message = await TelegramMessage.a_parse(data=update["message"], agent=self)
messages.append((chat, message))
if "callback_query" in update:
data = update["callback_query"]
if "message" in data:
chat = TelegramChat.parse(agent=self, data=data["message"]["chat"])
else:
chat = TelegramUser.parse(agent=self, data=data["from"])
message = TelegramCallback.parse(data=update["callback_query"])
messages.append((chat, message))
return messages
def responser(self, headers: Dict[str, str], body: str) -> Tuple[int, Dict[str, str], str]:
return 200, {}, ""
async def a_responser(self, headers: Dict[str, str],
body: str) -> Tuple[int, Dict[str, str], str]:
return self.responser(headers=headers, body=body)
def polling(self):
while self.running:
try:
payload = {"offset": self.last_update + 1} if self.last_update > 0 else {}
updates = self.requester.do_method(token=self.token, method="getUpdates",
payload=payload)
for update in updates:
self.listen(headers={}, body=json.dumps(update), **self.botovod._items)
except Exception:
self.logger.exception("Got exception")
finally:
time.sleep(self.delay)
async def a_polling(self):
while self.running:
try:
payload = {"offset": self.last_update + 1} if self.last_update > 0 else {}
updates = await self.requester.a_do_method(token=self.token, method="getUpdates",
payload=payload)
for update in updates:
await self.a_listen(headers={}, body=json.dumps(update), **self.botovod._items)
except Exception:
self.logger.exception("Got exception")
finally:
await asyncio.sleep(self.delay)
def set_webhook(self):
payload = {}
files = {}
if self.method == self.WEBHOOK:
payload["url"] = self.webhook_url
if self.certificate_path is not None:
files["certificate"] = open(self.certificate_path)
try:
self.requester.do_method(token=self.token, method="setWebhook", payload=payload,
files=files)
finally:
if files:
files["certificate"].close()
self.logger.info("Set %s webhook.", self.name)
async def a_set_webhook(self):
payload = {}
files = {}
if self.method == self.WEBHOOK:
payload["url"] = self.webhook_url
if self.certificate_path is not None:
files["certificate"] = open(self.certificate_path)
try:
await self.requester.a_do_method(token=self.token, method="setWebhook",
payload=payload, files=files)
finally:
if files:
files["certificate"].close()
self.logger.info("Set %s webhook.", self.name)
def get_webhook_info(self):
return self.requester.do_method(token=self.token, method="getWebhookInfo")
async def a_get_webhook_info(self):
return await self.requester.a_do_method(token=self.token, method="getWebhookInfo")
def get_me(self):
data = self.requester.do_method(token=self.token, method="getMe")
if data is not None:
return TelegramUser.parse(agent=self, data=data)
async def a_get_me(self):
data = await self.requester.a_do_method(token=self.token, method="getMe")
if data is not None:
return TelegramUser.parse(agent=self, data=data)
def send_message(self, chat: Chat, text: Optional[str] = None,
images: Iterator[Attachment] = (), audios: Iterator[Attachment] = (),
documents: Iterator[Attachment] = (), videos: Iterator[Attachment] = (),
locations: Iterator[Location] = (), keyboard: Optional[Keyboard] = None,
html: bool = False, markdown: bool = False, web_preview: bool = True,
notification: bool = True, reply: Optional[Message] = None,
remove_keyboard: bool = False):
messages = []
if text is not None:
payload = {
"chat_id": chat.id,
"text": text,
"disable_web_page_preview": not web_preview,
"disable_notification": not notification,
}
if keyboard is not None:
if hasattr(keyboard, "render"):
payload["reply_markup"] = keyboard.render()
else:
payload["reply_markup"] = TelegramKeyboard.default_render(keyboard)
elif remove_keyboard:
payload["reply_markup"] = '{"remove_keyboard": true}'
if html:
payload["parse_mode"] = "HTML"
elif markdown:
payload["parse_mode"] = "Markdown"
if reply is not None:
payload["reply_to_message_id"] = reply.raw["id"]
data = self.requester.do_method(token=self.token, method="sendMessage", payload=payload)
if data is not None:
messages.append(TelegramMessage.parse(data))
for image in images:
message = self.send_photo(chat, image, keyboard=keyboard,
remove_keyboard=remove_keyboard)
if message is not None:
messages.append(message)
for audio in audios:
message = self.send_audio(chat, audio, keyboard=keyboard,
remove_keyboard=remove_keyboard)
if message is not None:
messages.append(message)
for document in documents:
message = self.send_document(chat, document, keyboard=keyboard,
remove_keyboard=remove_keyboard)
if message is not None:
messages.append(message)
for video in videos:
message = self.send_video(chat, video, keyboard=keyboard,
remove_keyboard=remove_keyboard)
if message is not None:
messages.append(message)
for location in locations:
message = self.send_location(chat, location, keyboard=keyboard,
remove_keyboard=remove_keyboard)
if message is not None:
messages.append(message)
return messages
async def a_send_message(self, chat: Chat, text: Optional[str] = None,
images: Iterator[Attachment] = (), audios: Iterator[Attachment] = (),
documents: Iterator[Attachment] = (),
videos: Iterator[Attachment] = (), locations: Iterator[Location] = (),
keyboard: Optional[Keyboard] = None, html: bool = False,
markdown: bool = False, web_preview: bool = True,
notification: bool = True, reply: Optional[Message] = None,
remove_keyboard: bool = False):
messages = []
if text is not None:
payload = {
"chat_id": chat.id,
"text": text,
"disable_web_page_preview": not web_preview,
"disable_notification": not notification,
}
if keyboard is not None:
if hasattr(keyboard, "render"):
payload["reply_markup"] = keyboard.render()
else:
payload["reply_markup"] = TelegramKeyboard.default_render(keyboard)
elif remove_keyboard:
payload["reply_markup"] = '{"remove_keyboard": true}'
if html:
payload["parse_mode"] = "HTML"
elif markdown:
payload["parse_mode"] = "Markdown"
if reply is not None:
payload["reply_to_message_id"] = reply.raw["id"]
data = await self.requester.a_do_method(token=self.token, method="sendMessage",
payload=payload)
if data is not None:
messages.append(TelegramMessage.parse(data))
for image in images:
message = await self.a_send_photo(chat, image, keyboard=keyboard,
remove_keyboard=remove_keyboard)
if message is not None:
messages.append(message)
for audio in audios:
message = await self.a_send_audio(chat, audio, keyboard=keyboard,
remove_keyboard=remove_keyboard)
if message is not None:
messages.append(message)
for document in documents:
message = await self.a_send_document(chat, document, keyboard=keyboard,
remove_keyboard=remove_keyboard)
if message is not None:
messages.append(message)
for video in videos:
message = await self.a_send_video(chat, video, keyboard=keyboard,
remove_keyboard=remove_keyboard)
if message is not None:
messages.append(message)
for location in locations:
message = await self.a_send_location(chat, location, keyboard=keyboard,
remove_keyboard=remove_keyboard)
if message is not None:
messages.append(message)
return messages
def send_attachment(self, type: str, chat: Chat, attachment: Attachment,
keyboard: Optional[Keyboard] = None, remove_keyboard: bool = False):
attachment_data = TelegramAttachment.render(attachment)
payload = {"chat_id": chat.id}
files = {}
if isinstance(attachment_data, io.IOBase):
files = {type: attachment_data}
else:
payload[type] = attachment_data
if keyboard is not None:
if hasattr(keyboard, "render"):
payload["reply_markup"] = keyboard.render()
else:
payload["reply_markup"] = TelegramKeyboard.default_render(keyboard)
elif remove_keyboard:
payload["reply_markup"] = '{"remove_keyboard": true}'
data = self.requester.do_method(token=self.token, method="send"+type.capitalize(),
payload=payload, files=files)
if data is not None:
return TelegramMessage.parse(data)
async def a_send_attachment(self, type: str, chat: Chat, attachment: Attachment,
keyboard: Optional[Keyboard] = None, remove_keyboard: bool = False):
attachment_data = await TelegramAttachment.a_render(attachment)
payload = {"chat_id": chat.id}
files = {}
if isinstance(attachment_data, io.IOBase):
files = {type: attachment_data}
else:
payload[type] = attachment_data
if keyboard is not None:
if hasattr(keyboard, "render"):
payload["reply_markup"] = keyboard.render()
else:
payload["reply_markup"] = TelegramKeyboard.default_render(keyboard)
elif remove_keyboard:
payload["reply_markup"] = '{"remove_keyboard": true}'
data = await self.requester.a_do_method(token=self.token, method="send"+type.capitalize(),
payload=payload, files=files)
if data is not None:
return await TelegramMessage.a_parse(data)
def send_photo(self, chat: Chat, image: Attachment, keyboard: Optional[Keyboard] = None,
remove_keyboard: bool = False):
return self.send_attachment(
type="photo",
chat=chat,
attachment=image,
keyboard=keyboard,
remove_keyboard=remove_keyboard,
)
async def a_send_photo(self, chat: Chat, image: Attachment, keyboard: Optional[Keyboard] = None,
remove_keyboard: bool = False):
return await self.a_send_attachment(
type="photo",
chat=chat,
attachment=image,
keyboard=keyboard,
remove_keyboard=remove_keyboard,
)
def send_audio(self, chat: Chat, audio: Attachment, keyboard: Optional[Keyboard] = None,
remove_keyboard: bool = False):
return self.send_attachment(
type="audio",
chat=chat,
attachment=audio,
keyboard=keyboard,
remove_keyboard=remove_keyboard,
)
async def a_send_audio(self, chat: Chat, audio: Attachment, keyboard: Optional[Keyboard] = None,
remove_keyboard: bool = False):
return await self.a_send_attachment(
type="audio",
chat=chat,
attachment=audio,
keyboard=keyboard,
remove_keyboard=remove_keyboard,
)
def send_document(self, chat: Chat, document: Attachment, keyboard: Optional[Keyboard] = None,
remove_keyboard: bool = False):
return self.send_attachment(
type="document",
chat=chat,
attachment=document,
keyboard=keyboard,
remove_keyboard=remove_keyboard,
)
async def a_send_document(self, chat: Chat, document: Attachment,
keyboard: Optional[Keyboard] = None, remove_keyboard: bool = False):
return await self.a_send_attachment(
type="document",
chat=chat,
attachment=document,
keyboard=keyboard,
remove_keyboard=remove_keyboard,
)
def send_video(self, chat: Chat, video: Attachment, keyboard: Optional[Keyboard] = None,
remove_keyboard: bool = False):
return self.send_attachment(
type="video",
chat=chat,
attachment=video,
keyboard=keyboard,
remove_keyboard=remove_keyboard,
)
async def a_send_video(self, chat: Chat, video: Attachment, keyboard: Optional[Keyboard] = None,
remove_keyboard: bool = False):
return await self.a_send_attachment(
type="video",
chat=chat,
attachment=video,
keyboard=keyboard,
remove_keyboard=remove_keyboard,
)
def send_location(self, chat: Chat, location: Location, keyboard: Optional[Keyboard] = None,
remove_keyboard: bool = True):
payload = {
"chat_id": chat.id,
"longitude": location.longitude,
"latitude": location.latitude,
}
if keyboard is not None:
if hasattr(keyboard, "render"):
payload["reply_markup"] = keyboard.render()
else:
payload["reply_markup"] = TelegramKeyboard.default_render(keyboard)
elif remove_keyboard:
payload["reply_markup"] = '{"remove_keyboard": true}'
data = self.requester.do_method(token=self.token, method="sendLocation", payload=payload)
if data is not None:
return TelegramMessage.parse(data)
async def a_send_location(self, chat: Chat, location: Location,
keyboard: Optional[Keyboard] = None, remove_keyboard: bool = False):
payload = {
"chat_id": chat.id,
"longitude": location.longitude,
"latitude": location.latitude,
}
if keyboard is not None:
if hasattr(keyboard, "render"):
payload["reply_markup"] = keyboard.render()
else:
payload["reply_markup"] = TelegramKeyboard.default_render(keyboard)
elif remove_keyboard:
payload["reply_markup"] = '{"remove_keyboard": true}'
data = await self.requester.a_do_method(token=self.token, method="sendLocation",
payload=payload)
if data is not None:
return await TelegramMessage.a_parse(data)
def get_file(self, file_id: int):
data = self.requester.do_method(
token=self.token,
method="getFile",
payload={"file_id": file_id},
)
return TelegramAttachment.parse(data, agent=self)
async def a_get_file(self, file_id: int):
data = await self.requester.a_do_method(
token=self.token,
method="getFile",
payload={"file_id": file_id},
)
return await TelegramAttachment.a_parse(data, agent=self)
def edit_message_text(self, chat: Chat, message: TelegramMessage, text: str,
keyboard: Optional[TelegramInlineKeyboard] = None, html: bool = False,
markdown: bool = False, web_preview: bool = True):
payload = {
"chat_id": chat.id,
"message_id": message.raw["id"],
"text": text,
"disable_web_page_preview": not web_preview,
}
if keyboard is not None:
payload["reply_markup"] = keyboard.render()
if html:
payload["parse_mode"] = "HTML"
elif markdown:
payload["parse_mode"] = "Markdown"
self.requester.do_method(token=self.token, method="editMessageText", payload=payload)
async def a_edit_message_text(self, chat: Chat, message: TelegramMessage, text: str,
keyboard: Optional[TelegramInlineKeyboard] = None,
html: bool = False, markdown: bool = False,
web_preview: bool = True):
payload = {
"chat_id": chat.id,
"message_id": message.raw["id"],
"text": text,
"disable_web_page_preview": not web_preview,
}
if keyboard is not None:
payload["reply_markup"] = keyboard.render()
if html:
payload["parse_mode"] = "HTML"
elif markdown:
payload["parse_mode"] = "Markdown"
await self.requester.a_do_method(
token=self.token,
method="editMessageText",
payload=payload,
)
def edit_message_caption(self, chat: Chat, message: TelegramMessage, caption: str,
keyboard: Optional[TelegramInlineKeyboard] = None, html: bool = False,
markdown: bool = False):
payload = {"chat_id": chat.id, "message_id": message.raw["id"], "caption": caption}
if keyboard is not None:
payload["reply_markup"] = keyboard.render()
if html:
payload["parse_mode"] = "HTML"
elif markdown:
payload["parse_mode"] = "Markdown"
self.requester.do_method(token=self.token, method="editMessageCaption", payload=payload)
async def a_edit_message_caption(self, chat: Chat, message: TelegramMessage, caption: str,
keyboard: Optional[TelegramInlineKeyboard] = None,
html: bool = False, markdown: bool = False):
payload = {"chat_id": chat.id, "message_id": message.raw["id"], "caption": caption}
if keyboard is not None:
payload["reply_markup"] = keyboard.render()
if html:
payload["parse_mode"] = "HTML"
elif markdown:
payload["parse_mode"] = "Markdown"
await self.requester.a_do_method(token=self.token, method="editMessageCaption",
payload=payload)
def edit_message_media(self, chat: Chat, message: TelegramMessage, media: Attachment, type: str,
thumb: Optional[Attachment] = None, caption: Optional[str] = None,
markdown: bool = False, html: bool = False,
keyboard: Optional[TelegramInlineKeyboard] = None, **raw):
attachment_data = TelegramAttachment.render(media)
thumb_data = TelegramAttachment(thumb) if thumb else None
payload = {"chat_id": chat.id, "message_id": message.id}
media_payload = {"type": type}
files = {}
if isinstance(attachment_data, io.IOBase):
files["media"] = attachment_data
else:
media_payload["media"] = attachment_data
if isinstance(thumb_data, io.IOBase):
files["thumb"] = thumb_data
else:
media_payload["thumb"] = thumb_data
if caption is not None:
media_payload["caption"] = caption
if markdown:
media_payload["parse_mode"] = "Markdown"
elif html:
media_payload["parse_mode"] = "HTML"
media_payload.update(raw)
if keyboard is not None:
payload["reply_markup"] = keyboard.render()
payload["media"] = json.dumps(media_payload)
self.requester.do_method(token=self.token, method="editMessageMedia", payload=payload,
files=files)
async def a_edit_message_media(self, chat: Chat, message: TelegramMessage, media: Attachment,
type: str, thumb: Optional[Attachment] = None,
caption: Optional[str] = None, markdown: bool = False,
html: bool = False,
keyboard: Optional[TelegramInlineKeyboard] = None, **raw):
attachment_data = await TelegramAttachment.a_render(media)
thumb_data = await TelegramAttachment.a_render(thumb) if thumb else None
payload = {"chat_id": chat.id, "message_id": message.id}
media_payload = {"type": type, "media": attachment_data}
files = {}
if isinstance(attachment_data, io.IOBase):
files["media"] = attachment_data
else:
media_payload["media"] = attachment_data
if isinstance(thumb_data, io.IOBase):
files["thumb"] = thumb_data
elif thumb_data:
media_payload["thumb"] = thumb_data
if caption:
media_payload["caption"] = caption
if markdown:
media_payload["parse_mode"] = "Markdown"
elif html:
media_payload["parse_mode"] = "HTML"
media_payload.update(raw)
if keyboard:
payload["reply_markup"] = keyboard.render()
payload["media"] = json.dumps(media_payload)
await self.requester.a_do_method(token=self.token, method="edtMessageMedia",
payload=payload, files=files)
def edit_message_image(self, chat: Chat, message: TelegramMessage, image: Attachment,
caption: Optional[str] = None, markdown: bool = False,
html: bool = False, keyboard: Optional[TelegramInlineKeyboard] = None):
return self.edit_message_media(chat=chat, message=message, media=image, type="photo",
caption=caption, markdown=markdown, html=html,
keyboard=keyboard)
async def a_edit_message_image(self, chat: Chat, message: TelegramMessage, image: Attachment,
caption: Optional[str] = None, markdown: bool = False,
html: bool = False,
keyboard: Optional[TelegramInlineKeyboard] = None):
return await self.a_edit_message_media(chat=chat, message=message, media=image,
type="photo", caption=caption, markdown=markdown,
html=html, keyboard=keyboard)
def edit_message_video(self, chat: Chat, message: TelegramMessage, video: Attachment,
thumb: Optional[Attachment] = None, caption: Optional[str] = None,
markdown: bool = False, html: bool = False, width: Optional[int] = None,
height: Optional[int] = None, duration: Optional[int] = None,
supports_streaming: Optional[bool] = None,
keyboard: Optional[TelegramInlineKeyboard] = None):
data = {}
if width:
data["width"] = width
if height:
data["height"] = height
if duration:
data["duration"] = duration
if supports_streaming:
data["supports_streaming"] = supports_streaming
return self.edit_message_media(chat=chat, message=message, media=video, type="video",
thumb=thumb, caption=caption, markdown=markdown, html=html,
keyboard=keyboard, **data)
async def a_edit_message_video(self, chat: Chat, message: TelegramMessage, video: Attachment,
thumb: Optional[Attachment] = None,
caption: Optional[str] = None, markdown: bool = False,
html: bool = False, width: Optional[int] = None,
height: Optional[int] = None, duration: Optional[int] = None,
supports_streaming: Optional[bool] = None,
keyboard: Optional[TelegramInlineKeyboard] = None):
data = {}
if width:
data["width"] = width
if height:
data["height"] = height
if duration:
data["duration"] = duration
if supports_streaming:
data["supports_streaming"] = supports_streaming
return await self.a_edit_message_media(chat=chat, message=message, media=video,
type="video", thumb=thumb, caption=caption,
markdown=markdown, html=html, keyboard=keyboard,
**data)
def edit_message_animation(self, chat: Chat, message: TelegramMessage, animation: Attachment,
thumb: Optional[Attachment] = None, caption: Optional[str] = None,
markdown: bool = False, html: bool = False,
width: Optional[int] = None, height: Optional[int] = None,
duration: Optional[int] = None,
keyboard: Optional[TelegramInlineKeyboard] = None):
data = {}
if width is not None:
data["width"] = width
if height is not None:
data["height"] = height
if duration is not None:
data["duration"] = duration
return self.edit_message_media(chat=chat, message=message, media=animation,
type="animation", thumb=thumb, caption=caption,
markdown=markdown, html=html, keyboard=keyboard, **data)
async def a_edit_message_animation(self, chat: Chat, message: TelegramMessage,
animation: Attachment, thumb: Optional[Attachment] = None,
caption: Optional[str] = None, markdown: bool = False,
html: bool = False, width: Optional[int] = None,
height: Optional[int] = None, duration: Optional[int] = None,
keyboard: Optional[TelegramInlineKeyboard] = None):
data = {}
if width is not None:
data["width"] = width
if height is not None:
data["height"] = height
if duration is not None:
data["duration"] = duration
return await self.a_edit_message_media(chat=chat, message=message, media=animation,
type="animation", thumb=thumb, caption=caption,
markdown=markdown, html=html, keyboard=keyboard,
**data)
def edit_message_audio(self, chat: Chat, message: TelegramMessage, audio: Attachment,
thumb: Optional[Attachment] = None, caption: Optional[str] = None,
markdown: bool = False, html: bool = False,
duration: Optional[int] = None, performer: Optional[str] = None,
title: Optional[str] = None,
keyboard: Optional[TelegramInlineKeyboard] = None):
data = {}
if duration is not None:
data["duration"] = duration
if performer is not None:
data["performer"] = performer
if title is not None:
data["title"] = title
return self.edit_message_media(chat=chat, message=message, media=audio, type="audio",
thumb=thumb, caption=caption, markdown=markdown, html=html,
keyboard=keyboard, **data)
async def a_edit_message_audio(self, chat: Chat, message: TelegramMessage,
audio: Attachment, thumb: Optional[Attachment] = None,
caption: Optional[str] = None, markdown: bool = False,
html: bool = False, duration: Optional[int] = None,
performer: Optional[str] = None, title: Optional[str] = None,
keyboard: Optional[TelegramInlineKeyboard] = None):
data = {}
if duration is not None:
data["duration"] = duration
if performer is not None:
data["performer"] = performer
if title is not None:
data["title"] = title
return await self.a_edit_message_media(chat=chat, message=message, media=audio,
type="audio", thumb=thumb, caption=caption,
markdown=markdown, html=html, keyboard=keyboard,
**data)
def edit_message_document(self, chat: Chat, message: TelegramMessage, document: Attachment,
thumb: Optional[Attachment] = None, caption: Optional[str] = None,
markdown: bool = False, html: bool = False,
keyboard: Optional[TelegramInlineKeyboard] = None):
return self.edit_message_media(chat=chat, message=message, media=document, type="document",
thumb=thumb, caption=caption, markdown=markdown, html=html,
keyboard=keyboard)
async def a_edit_message_document(self, chat: Chat, message: TelegramMessage,
document: Attachment, thumb: Optional[Attachment] = None,
caption: Optional[str] = None, markdown: bool = False,
html: bool = False,
keyboard: Optional[TelegramInlineKeyboard] = None):
return await self.a_edit_message_media(chat=chat, message=message, media=document,
type="document", thumb=thumb, caption=caption,
markdown=markdown, html=html, keyboard=keyboard)
def edit_message_keyboard(self, chat: Chat, message: TelegramMessage,
keyboard: TelegramInlineKeyboard):
payload = {
"chat_id": chat.id,
"message_id": message.raw["id"],
"reply_markup": keyboard.render(),
}
self.requester.do_method(token=self.token, method="editMessageReplyMarkup", payload=payload)
async def a_edit_message_keyboard(self, chat: Chat, message: TelegramMessage,
keyboard: TelegramInlineKeyboard):
payload = {
"chat_id": chat.id,
"message_id": message.raw["id"],
"reply_markup": keyboard.render(),
}
await self.requester.a_do_method(token=self.token, method="editMessageReplyMarkup",
payload=payload)
def delete_message(self, chat: Chat, message: TelegramMessage):
payload = {"chat_id": chat.id, "message_id": message.raw["id"]}
self.requester.do_method(token=self.token, method="deleteMessage", payload=payload)
async def a_delete_message(self, chat: Chat, message: TelegramMessage):
payload = {"chat_id": chat.id, "message_id": message.raw["id"]}
await self.requester.a_do_method(token=self.token, method="deleteMessage", payload=payload)
def send_chat_action(self, chat: Chat, action: str):
payload = {"chat_id": chat.id, "action": action}
self.requester.do_method(token=self.token, method="sendChatAction", payload=payload)
async def a_send_chat_action(self, chat: Chat, action: str):
payload = {"chat_id": chat.id, "action": action}
await self.requester.a_do_method(token=self.token, method="sendChatAction", payload=payload)
def send_sticker(self, chat: Chat, sticker: Attachment, keyboard: Optional[Keyboard] = None,
remove_keyboard: bool = False):
return self.send_attachment(
type="sticker",
chat=chat,
attachment=sticker,
keyboard=keyboard,
remove_keyboard=remove_keyboard,
)
async def a_send_sticker(self, chat: Chat, sticker: Attachment,
keyboard: Optional[Keyboard] = None, remove_keyboard: bool = False):
return await self.a_send_attachment(
type="sticker",
chat=chat,
attachment=sticker,
keyboard=keyboard,
remove_keyboard=remove_keyboard,
)
"""
def get_me(self):
pass
def forward_message(self, to_chat, from_chat, message):
pass
def send_voice(self, chat, audio, **args):
url = self.url % (self.token, "sendVoice")
data = {"chat_id": chat.id}
data.extend(**args)
if hasattr(audio, "id") and not audio.id is None:
data["voice"] = audio.id
response = requests.post(url, data=data)
elif not audio.url is None:
data["voice"] = audio.url
response = requests.post(url, data=data)
elif not audio.file_path is None:
with open(audio.file_path) as f:
response = requests.post(url, data=data, files={"voice": f})
def send_venue(self, chat, location, title, address, **args):
url = self.url % (self.token, "sendLocation")
data = {"chat_id": chat.id, "longitude": location.longitude, "latitude": location.latitude,
"title": title, "address": address}
data.extend(**args)
response = requests.post(url, data=data)
def send_contact(self, chat, attachment):
pass
def send_chat_action(self, chat, action):
pass
def get_user_profile_photo(self, user_id):
pass
def kick_chat_member(self, chat, user_id):
pass
def unban_chat_member(self, chat, user_id):
pass
def answer_callback_query(self, callback_query):
pass
def edit_message_text(self, message, text):
pass
def edit_message_caption(self, message, text):
pass
def edit_message_reply_markup(self):
pass
"""
|
accumulators.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> a = sc.accumulator(1)
>>> a.value
1
>>> a.value = 2
>>> a.value
2
>>> a += 5
>>> a.value
7
>>> sc.accumulator(1.0).value
1.0
>>> sc.accumulator(1j).value
1j
>>> rdd = sc.parallelize([1,2,3])
>>> def f(x):
... global a
... a += x
>>> rdd.foreach(f)
>>> a.value
13
>>> b = sc.accumulator(0)
>>> def g(x):
... b.add(x)
>>> rdd.foreach(g)
>>> b.value
6
>>> from pyspark.accumulators import AccumulatorParam
>>> class VectorAccumulatorParam(AccumulatorParam):
... def zero(self, value):
... return [0.0] * len(value)
... def addInPlace(self, val1, val2):
... for i in range(len(val1)):
... val1[i] += val2[i]
... return val1
>>> va = sc.accumulator([1.0, 2.0, 3.0], VectorAccumulatorParam())
>>> va.value
[1.0, 2.0, 3.0]
>>> def g(x):
... global va
... va += [x] * 3
>>> rdd.foreach(g)
>>> va.value
[7.0, 8.0, 9.0]
>>> rdd.map(lambda x: a.value).collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError:...
>>> def h(x):
... global a
... a.value = 7
>>> rdd.foreach(h) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError:...
>>> sc.accumulator([1.0, 2.0, 3.0]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
"""
import sys
import select
import struct
if sys.version < '3':
import SocketServer
else:
import socketserver as SocketServer
import threading
from pyspark.serializers import read_int, PickleSerializer
__all__ = ['Accumulator', 'AccumulatorParam']
pickleSer = PickleSerializer()
# Holds accumulators registered on the current machine, keyed by ID. This is then used to send
# the local accumulator updates back to the driver program at the end of a task.
_accumulatorRegistry = {}
def _deserialize_accumulator(aid, zero_value, accum_param):
from pyspark.accumulators import _accumulatorRegistry
# If this certain accumulator was deserialized, don't overwrite it.
if aid in _accumulatorRegistry:
return _accumulatorRegistry[aid]
else:
accum = Accumulator(aid, zero_value, accum_param)
accum._deserialized = True
_accumulatorRegistry[aid] = accum
return accum
class Accumulator(object):
"""
A shared variable that can be accumulated, i.e., has a commutative and associative "add"
operation. Worker tasks on a Spark cluster can add values to an Accumulator with the C{+=}
operator, but only the driver program is allowed to access its value, using C{value}.
Updates from the workers get propagated automatically to the driver program.
While C{SparkContext} supports accumulators for primitive data types like C{int} and
C{float}, users can also define accumulators for custom types by providing a custom
L{AccumulatorParam} object. Refer to the doctest of this module for an example.
"""
def __init__(self, aid, value, accum_param):
"""Create a new Accumulator with a given initial value and AccumulatorParam object"""
from pyspark.accumulators import _accumulatorRegistry
self.aid = aid
self.accum_param = accum_param
self._value = value
self._deserialized = False
_accumulatorRegistry[aid] = self
def __reduce__(self):
"""Custom serialization; saves the zero value from our AccumulatorParam"""
param = self.accum_param
return (_deserialize_accumulator, (self.aid, param.zero(self._value), param))
@property
def value(self):
"""Get the accumulator's value; only usable in driver program"""
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
return self._value
@value.setter
def value(self, value):
"""Sets the accumulator's value; only usable in driver program"""
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
self._value = value
def add(self, term):
"""Adds a term to this accumulator's value"""
self._value = self.accum_param.addInPlace(self._value, term)
def __iadd__(self, term):
"""The += operator; adds a term to this accumulator's value"""
self.add(term)
return self
def __str__(self):
return str(self._value)
def __repr__(self):
return "Accumulator<id=%i, value=%s>" % (self.aid, self._value)
class AccumulatorParam(object):
"""
Helper object that defines how to accumulate values of a given type.
"""
def zero(self, value):
"""
Provide a "zero value" for the type, compatible in dimensions with the
provided C{value} (e.g., a zero vector)
"""
raise NotImplementedError
def addInPlace(self, value1, value2):
"""
Add two values of the accumulator's data type, returning a new value;
for efficiency, can also update C{value1} in place and return it.
"""
raise NotImplementedError
class AddingAccumulatorParam(AccumulatorParam):
"""
An AccumulatorParam that uses the + operators to add values. Designed for simple types
such as integers, floats, and lists. Requires the zero value for the underlying type
as a parameter.
"""
def __init__(self, zero_value):
self.zero_value = zero_value
def zero(self, value):
return self.zero_value
def addInPlace(self, value1, value2):
value1 += value2
return value1
# Singleton accumulator params for some standard types
INT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0)
FLOAT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0)
COMPLEX_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0j)
class _UpdateRequestHandler(SocketServer.StreamRequestHandler):
"""
This handler will keep polling updates from the same socket until the
server is shutdown.
"""
def handle(self):
from pyspark.accumulators import _accumulatorRegistry
auth_token = self.server.auth_token
def poll(func):
while not self.server.server_shutdown:
# Poll every 1 second for new data -- don't block in case of shutdown.
r, _, _ = select.select([self.rfile], [], [], 1)
if self.rfile in r:
if func():
break
def accum_updates():
num_updates = read_int(self.rfile)
for _ in range(num_updates):
(aid, update) = pickleSer._read_with_length(self.rfile)
_accumulatorRegistry[aid] += update
# Write a byte in acknowledgement
self.wfile.write(struct.pack("!b", 1))
return False
def authenticate_and_accum_updates():
received_token = self.rfile.read(len(auth_token))
if isinstance(received_token, bytes):
received_token = received_token.decode("utf-8")
if (received_token == auth_token):
accum_updates()
# we've authenticated, we can break out of the first loop now
return True
else:
raise Exception(
"The value of the provided token to the AccumulatorServer is not correct.")
if auth_token is not None:
# first we keep polling till we've received the authentication token
poll(authenticate_and_accum_updates)
# now we've authenticated if needed, don't need to check for the token anymore
poll(accum_updates)
class AccumulatorServer(SocketServer.TCPServer):
def __init__(self, server_address, RequestHandlerClass, auth_token):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
self.auth_token = auth_token
"""
A simple TCP server that intercepts shutdown() in order to interrupt
our continuous polling on the handler.
"""
server_shutdown = False
def shutdown(self):
self.server_shutdown = True
SocketServer.TCPServer.shutdown(self)
self.server_close()
def _start_update_server(auth_token):
"""Start a TCP server to receive accumulator updates in a daemon thread, and returns it"""
server = AccumulatorServer(("localhost", 0), _UpdateRequestHandler, auth_token)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
|
simpleimp.py
|
#*****************************************************************************#
#* Copyright (c) 2004-2008, SRI International. *#
#* All rights reserved. *#
#* *#
#* Redistribution and use in source and binary forms, with or without *#
#* modification, are permitted provided that the following conditions are *#
#* met: *#
#* * Redistributions of source code must retain the above copyright *#
#* notice, this list of conditions and the following disclaimer. *#
#* * Redistributions in binary form must reproduce the above copyright *#
#* notice, this list of conditions and the following disclaimer in the *#
#* documentation and/or other materials provided with the distribution. *#
#* * Neither the name of SRI International nor the names of its *#
#* contributors may be used to endorse or promote products derived from *#
#* this software without specific prior written permission. *#
#* *#
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS *#
#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT *#
#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR *#
#* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT *#
#* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *#
#* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *#
#* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *#
#* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *#
#* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *#
#* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *#
#* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *#
#*****************************************************************************#
#* "$Revision:: 201 $" *#
#* "$HeadURL:: https://svn.ai.sri.com/projects/spark/trunk/spark/src/spar#$" *#
#*****************************************************************************#
from __future__ import generators
import inspect
import operator
from spark.internal.version import *
from spark.internal.exception import LowError, Failure, ExceptionFailure, LocatedError, MessageFailure, CapturedError
from spark.internal.common import NEWPM, DEBUG
from spark.internal.parse.basicvalues import ConstructibleValue, installConstructor
import threading
from spark.pylang.implementation import Imp, FunImpInt, PredImpInt, PersistablePredImpInt, ActImpInt
from spark.internal.repr.taskexpr import SUCCESS, TFrame
from spark.internal.common import NO_SOLUTIONS, ONE_SOLUTION, SOLVED, NOT_SOLVED
from spark.internal.repr.procedure import ProcedureInt
from spark.internal.parse.usagefuns import *
debug = DEBUG(__name__)#.on()
#from spark.mode import RequiredMode, ALL_EVALUABLE_CALL_MODE
#from spark.set import IBITS, bitmap_indices
def make_callable(x):
if callable(x):
return x
elif inspect.isclass(x): # Java classes mistakenly fail callable
return x
else:
raise LowError("Cannot coerce %r to something that is callable", x)
class _Basic(ConstructibleValue):
__slots__ = (
"_function",
"_modeString",
"_sym",
"_both_indices",
"_input_indices",
"_rest_mode",
"_nrequired",
"_output_indices",
"_output_range",
"_input_agent_p",
#"_optargs_mode",
)
def __init__(self, mode, fun):
ConstructibleValue.__init__(self)
# Mode string is of the form [A]?[+?-]+[*]?
#print "New Basic:", mode, fun
self._modeString = mode
if mode.startswith("A"):
# fun takes the agent as an argument
self._input_agent_p = True
io = mode[1:]
else:
self._input_agent_p = False
io = mode
self._input_indices = []
self._both_indices = []
self._output_indices = []
if io.endswith("*"):
if not (io[-2:] in ("+*","-*","?*")): raise AssertionError, \
"Repeated mode must be one of +, ?, or -: %s" % mode
self._rest_mode = io[-2]
io = io[:-2]
elif len(io) > 1 and io[-2] == "*": # allow *+ stlye rest mode declaration
if not (io[-2:] in ("*+", "*-", "*?")): raise AssertionError, \
"Repeated mode must be one of +, ?, or -: %s" % mode
self._rest_mode = io[-1]
io = io[:-2]
else:
self._rest_mode = None
self._nrequired = len(io)
for i in range(len(io)):
char = io[i]
if char == "+":
# This is an input parameter
self._input_indices.append(i)
elif char == "-":
# This is an output parameter
self._output_indices.append(i)
elif char == "?":
# Acts as both an input and an output
self._input_indices.append(i)
self._output_indices.append(i)
self._both_indices.append(i)
else:
raise LowError("The mode string %r is not valid", mode)
self._function = make_callable(fun)
self._output_range = range(len(self._output_indices))
self._sym = None
def valid_mode_p(self, agent, bindings, zexpr):
# # DNM - THIS IS A DREADFUL HACK NEEDED UNTIL WE DELAY DECL EVALUATION
# from spark.internal.repr.varbindings import NULL_BINDINGS
# if bindings == NULL_BINDINGS:
# return True
for i in self._input_indices:
if not (i in self._both_indices or termEvalP(agent, bindings, zexpr[i])):
return False
if self._rest_mode == "-":
i = self._nrequired
limit = len(zexpr)
while i < limit:
if not termEvalP(agent, bindings, zexpr[i]):
return False
i = i + 1
return True
def generate_args(self, agent, bindings, zexpr):
"Returns a list of the input argument values"
# should check arity as well
if not self.valid_mode_p(agent, bindings, zexpr):
raise LowError("Required argument is not bound")
args = [termEvalOpt(agent, bindings, zexpr[i]) \
for i in self._input_indices]
if self._rest_mode in ("+", "?"): # accumulate rest arguments
i = self._nrequired
limit = len(zexpr)
while i < limit:
args.append(termEvalOpt(agent, bindings, zexpr[i]))
i = i + 1
if self._input_agent_p:
args.insert(0, agent) # prepend agent
return args
def bind_result(self, agent, bindings, zexpr, result):
if result is None: # None does not matching anything
return False
output_indices = self._output_indices
has_rest_output = self._rest_mode in ("?", "-")
l = len(output_indices)
if l > 1 or has_rest_output:
# Treat result as a tuple of values
if not (operator.isSequenceType(result)): raise AssertionError, \
"Python function should return a sequence but returned %r instead"%(result,)
minOut = len(self._output_range)
actualOut = len(result)
if has_rest_output:
if actualOut < minOut:
raise LocatedError(zexpr, "Expecting python function to return at least %s elements, not %r"%(minOut, result))
else:
if actualOut != minOut:
raise LocatedError(zexpr, "Expecting python function to return exactly %s elements, not %r"%(minOut, result))
for i in self._output_range:
if not termMatch(agent, bindings, zexpr[output_indices[i]], result[i]):
return False
if has_rest_output:
limit = len(zexpr)
i = self._nrequired
offset = i - len(self._output_indices)
while i < limit:
if termEvalP(agent, bindings, zexpr[i]):
pass # No need to match the argument
elif not termMatch(agent, bindings, zexpr[i], result[i-offset]):
return False
i = i+1
return True
elif l == 1: # Exactly one output parameter
# Treat result as a value
return termMatch(agent, bindings, zexpr[output_indices[0]], result)
elif result: # l == 0, no output parameters
return True
else:
return False
def setDecl(self, decl):
symbol = decl and decl.asSymbol()
if not (self._sym is None or self._sym == symbol): raise AssertionError, \
"Attempting to rename a named object %r"%self
self._sym = symbol
def __call__(self, decl):
self.setDecl(decl)
return self
def constructor_args(self):
return [self._modeString, self._function]
cvcategory = "B"
def cvname(self):
if self._sym:
return self._sym.id
else:
return ""
constructor_mode = "VI"
def function_name(self):
f = self._function
from spark.lang.builtin import PythonProxyRaw, Method
if isinstance(f, PythonProxyRaw):
return "function "+f._PythonProxyModName + "." + f._PythonProxyVarName
elif isinstance(f, Method):
return "method ."+f._methodname
else:
return "function ?"
################################################################
# Functions
class BasicFun(_Basic, FunImpInt):
__slots__ = ()
def __init__(self, mode, fun):
_Basic.__init__(self, mode, fun)
if self._output_indices: #len(self._output_indices) > 0:
raise LowError("Mode string %r is not valid for a function", mode)
def call(self, agent, bindings, zexpr):
args = self.generate_args(agent, bindings, zexpr)
return self._function(*args)
def match_inverse(self, agent, bindings, zexpr, obj):
raise LowError("Trying to match non-invertible function")
installConstructor(BasicFun)
class ReversibleFun(BasicFun):
__slots__ = (
"_inverse",
)
def __init__(self, mode, fun, inverse):
BasicFun.__init__(self, mode, fun)
self._inverse = make_callable(inverse)
def match_inverse(self, agent, bindings, zexpr, obj):
inverse = self._inverse
# If the function requires the agent, so will the inverse
if self._input_agent_p:
result = self._inverse(agent, obj)
else:
result = self._inverse(obj)
# A None result indicates no match
if result == None:
return False
# Identify number of inputs parameters to function
num_fixed_input_args = len(self._input_indices)
if self._rest_mode is not None \
or num_fixed_input_args > 1: # may be multiple
# Result should be a tuple
length = len(result)
if length != len(zexpr):
return False
i = 0
while i < length:
if not termMatch(agent, bindings, zexpr[i], result[i]):
return False
i += 1
return True
elif num_fixed_input_args == 1: # exactly one
# Result should be a value
return termMatch(agent, bindings, zexpr[0], result)
else: # exactly 0
# result should be a Boolean
return result
def constructor_args(self):
return BasicFun.constructor_args(self) + [self._inverse]
constructor_mode = "VII"
installConstructor(ReversibleFun)
################################################################
# Predicates
class BasicPred(_Basic, PredImpInt):
"""Implements a predicate via a function that returns None or a solution"""
__slots__ = ()
def solution(self, agent, bindings, zexpr):
result = self._function(*self.generate_args(agent, bindings, zexpr))
debug("BasicPred function returned %r", result)
if result is not None:
if self.bind_result(agent, bindings, zexpr, result):
debug("BasicPred solution %r", result)
return SOLVED
return NOT_SOLVED
# def find_solutions(self, agent, bindings, zexpr):
# result = self._function(*self.generate_args(agent, bindings, zexpr))
# debug("BasicPred function returned %r", result)
# if result is not None:
# if self.bind_result(agent, bindings, zexpr, result):
# debug("BasicPred solution %r", result)
# bindings.bfound_solution(agent, zexpr)
installConstructor(BasicPred)
class BasicPredSequence(_Basic, PredImpInt):
"""Implements a predicate via a function that returns a sequence of solutions"""
__slots__ = ()
def solution(self, agent, bindings, zexpr):
result = self._function(*self.generate_args(agent, bindings, zexpr))
debug("BasicPredSequence function returned %r", result)
# Should check for None
for res in result:
if self.bind_result(agent, bindings, zexpr, res):
debug("BasicPredSequence solution %r", res)
return SOLVED
return NOT_SOLVED
def solutions(self, agent, bindings, zexpr):
result = self._function(*self.generate_args(agent, bindings, zexpr))
debug("BasicPredSequence function returned %r", result)
# Should check for None
for res in result:
if self.bind_result(agent, bindings, zexpr, res):
debug("BasicPredSequence solution %r", res)
yield SOLVED
# def find_solutions(self, agent, bindings, zexpr):
# result = self._function(*self.generate_args(agent, bindings, zexpr))
# debug("BasicPredSequence function returned %r", result)
# # Should check for None
# for res in result:
# if self.bind_result(agent, bindings, zexpr, res):
# debug("BasicPredSequence solution %r", res)
# bindings.bfound_solution(agent, zexpr)
installConstructor(BasicPredSequence)
################################################################
class MultiModalPred(PredImpInt):
"""A predicate implementation that is based on other implementations"""
__slots__ = (
"_imps",
"_sym",
)
def __init__(self, *imps):
self._imps = imps
self._sym = None
def valid_mode_p(self, agent, bindings, zexpr):
for imp in self._imps:
if imp.valid_mode_p(agent, bindings, zexpr):
return True
return False
def solution(self, agent, bindings, zexpr):
for imp in self._imps:
if imp.valid_mode_p(agent, bindings, zexpr):
return imp.solution(agent, bindings, zexpr)
raise LowError("Calling predicate with invalid mode")
def solutions(self, agent, bindings, zexpr):
for imp in self._imps:
if imp.valid_mode_p(agent, bindings, zexpr):
return imp.solutions(agent, bindings, zexpr)
raise LowError("Calling predicate with invalid mode")
# def find_solutions(self, agent, bindings, zexpr):
# for imp in self._imps:
# if imp.valid_mode_p(agent, bindings, zexpr):
# imp.find_solutions(agent, bindings, zexpr)
# return
# raise LowError("Calling predicate with invalid mode")
def setDecl(self, decl):
symbol = decl and decl.asSymbol()
if not (self._sym is None or self._sym == symbol): raise AssertionError, \
"Attempting to rename a named object %r"%self
self._sym = symbol
for imp in self._imps:
imp(decl)
def __call__(self, decl):
self.setDecl(decl)
return self
################################################################
# Actions
#@-timing# from spark.internal.timer import TIMER
#@-# T_tfcont = TIMER.newRecord("tfcont")
#@-# T_genargs = TIMER.newRecord("genargs")
#@-# T_callfn = TIMER.newRecord("callfn")
#@-# T_processresult = TIMER.newRecord("processresult")
class BasicImpTFrame(TFrame):
__slots__ = (
"_basicact",
"_bindings",
"_zexpr",
)
def cvname(self):
return self._basicact.cvname()
def __init__(self, name, event, basicact, bindings, zexpr):
TFrame.__init__(self, name, event)
self._basicact = basicact
self._bindings = bindings
self._zexpr = zexpr
def constructor_args(self):
return TFrame.constructor_args(self) + [self._basicact, self._bindings, self._zexpr]
constructor_mode = "VIIIV"
def tfcont(self, agent):
#@-timing# T_tfcont.start()
try:
#debug("Creating args for %r", self)
#@-timing# T_genargs.start()
args = self._basicact.generate_args(agent, self._bindings, self._zexpr)
#@-timing# T_genargs.stop()
#debug("Calling %r on %r", self._basicact._function, args)
#@-timing# T_callfn.start()
val = self._basicact._function(*args)
#@-timing# T_callfn.stop()
except LocatedError, e:
errid = NEWPM.displayError()
#@-timing# T_callfn.stop()
#@-timing# T_processresult.start()
self.process_exception(agent, e, errid)
#@-timing# T_processresult.stop()
#@-timing# T_tfcont.stop()
return
except AnyException, e:
errid = NEWPM.displayError()
#@-timing# T_callfn.stop()
#@-timing# T_processresult.start()
# TODO: work out if this is the right way to handle this case
new_err = CapturedError(self._zexpr, errid, "executing")
self.process_exception(agent, new_err, errid)
#@-timing# T_processresult.stop()
#@-timing# T_tfcont.stop()
return
#@-timing# T_processresult.start()
self.process_result(agent, val)
#@-timing# T_processresult.stop()
#@-timing# T_tfcont.stop()
def process_exception(self, agent, e, errid):
result = e
if not isinstance(e, Failure):
result = ExceptionFailure(self, e, errid)
#print "Setting result=", result
self.tf_set_completed(agent, result)
def process_result(self, agent, val):
if not self._basicact._output_indices: # i.e., length == 0:
# Note that a function returning None is treated as SUCCESS
if val is False:
result = MessageFailure(self, "%s implementing action returned False"%self._basicact.function_name())
elif isinstance(val, Failure):
result = val
else:
result = SUCCESS
elif self._basicact.bind_result(agent, self._bindings, self._zexpr, val):
result = SUCCESS
elif val == None:
result = MessageFailure(self, "%s implementing action returned None"
%self._basicact.function_name())
else:
try:
raise Exception("%s implementing action returned a value %r that does not match the output params %r"%(self._basicact.function_name(), val, self._basicact._output_indices))
except AnyException, e:
errid = NEWPM.displayError()
#@-timing# T_callfn.stop()
#@-timing# T_processresult.start()
# TODO: work out if this is the right way to handle this case
new_err = CapturedError(self._zexpr, errid, "executing")
self.process_exception(agent, new_err, errid)
return
#print "Setting result=", result
self.tf_set_completed(agent, result)
installConstructor(BasicImpTFrame)
class BasicAct(_Basic, ActImpInt):
__slots__ = ()
tframe_class = BasicImpTFrame
def tframes(self, agent, event, bindings, zexpr):
name = "%s.%d"%(self._sym, agent.nextid())
return [self.tframe_class(name, event, self, bindings, zexpr)]
installConstructor(BasicAct)
#@-timing# T_ThreadImpTFrameInit = TIMER.newRecord("ThreadImpTFrameInit")
class ThreadImpTFrame(BasicImpTFrame):
__slots__ = (
"_thread_event",
"_thread_result",
"_thread_exception",
"_agent_to_do_flag",
"_errid",
)
def __init__(self, name, event, threadact, bindings, zexpr):
BasicImpTFrame.__init__(self, name, event, threadact, bindings, zexpr)
self._thread_event = None
def state_args(self):
return (0,)
def set_state(self, agent, ignore):
self.tf_set_completed(agent, MessageFailure(self, "ThreadImpTFrame cannot be resumed"))
def tfcont(self, agent):
#debug("ThreadImpTFrame tfcont")
#@-timing# T_tfcont.start()
te = self._thread_event
if te is None:
#print "Starting thread"
#@-timing# T_genargs.start()
args = self._basicact.generate_args(agent, self._bindings, self._zexpr)
#@-timing# T_genargs.stopstart(T_callfn)
self._thread_event = threading.Event()
self._thread_result = None
self._thread_exception = None
self._agent_to_do_flag = agent._something_to_do_event
#debug("ThreadImpTFrame starting thread")
threading.Thread(target=self.run, args=args).start()
#THREAD_POOL.startDaemonThread(target=self.run, args=args)
#@-timing# T_callfn.stop()
elif te.isSet():
#@-timing# T_processresult.start()
#print "Noticed thread finished"
if self._thread_exception:
#debug("ThreadImpTFrame processing exception %s", self._thread_exception)
self.process_exception(agent, self._thread_exception, self._errid)
else:
#debug("ThreadImpTFrame processing result %s", self._thread_result)
self.process_result(agent, self._thread_result)
#@-timing# T_processresult.stop()
#debug("ThreadImpTFrame tfcont return not waiting")
else:
#print "Waiting for thread"
#debug("ThreadImpTFrame tfcont return waiting")
#return None
pass
#@-timing# T_tfcont.stop()
def run(self, *args):
try:
#print " Starting fun"
self._thread_result = self._basicact._function(*args)
#print " Finished fun"
except AnyException, e:
#print " Exception in fun"
errid = NEWPM.displayError()
self._thread_exception = e
self._errid = errid
self._thread_event.set()
self._agent_to_do_flag.set()
installConstructor(ThreadImpTFrame)
class ThreadAct(BasicAct):
__slots__ = ()
tframe_class = ThreadImpTFrame
installConstructor(ThreadAct)
################################################################
# What follows should be removed once things that depend upon it are fixed
################################################################
class SimpleProcedureInstance(TFrame):
__slots__ = (
"_procedure",
"_bindings", # should be able to get from event
"_zexpr", # should be able to get from event
"_features",
)
def __init__(self, name, procedure, event, bindings, zexpr):
TFrame.__init__(self, name, event)
self._procedure = procedure
self._bindings = bindings
self._zexpr = zexpr
self._features = ()
def constructor_args(self):
[name, event] = TFrame.constructor_args(self)
return [name, self._procedure, event, self._bindings, self._zexpr]
constructor_mode = "VIIIV"
def tfcont(self, agent):
result = self._procedure.execute(agent,self._event,self._bindings,self._zexpr)
if not (result is SUCCESS or isinstance(result, Failure)):
raise LowError(\
"""execute method of procedure returned something that was
neither SUCCESS of a Failure instance
procedure: %s
return value: %s""", self._procedure, result)
self.tf_set_completed(agent, result)
def features(self, agent):
return self._features
installConstructor(SimpleProcedureInstance)
class SimpleProcedure(ProcedureInt):
__slots__ = (
"_name",
)
def __init__(self, name):
self._name = name
def name(self):
return self._name
def append_tframes(self, agent, event, bindings, zexpr, list):
if self.applicable(agent, event, bindings, zexpr):
name = "%s.%d"%(self._name, agent.nextid())
list.append(SimpleProcedureInstance(name, self, event, bindings, zexpr))
def applicable(self, agent, event, bindings, zexpr):
return True # default precondition
def execute(self, agent, event, bindings, zexpr):
raise LowError("execute method not defined for %r", self)
def __str__(self):
return self.__class__.__name__ + "." + self._name
def __repr__(self):
return self.__class__.__name__ + "." + self._name
################################################################
# class ThreadProcedureInstance(TFrame):
# __slots__ = (
# "_procedure",
# "_bindings", # should be able to get from event
# "_zexpr", # should be able to get from event
# "_thread_event",
# "_thread_result",
# "_thread_exception",
# )
# def __init__(self, name, procedure, event, bindings, zexpr):
# TFrame.__init__(self, name, event)
# self._procedure = procedure
# self._bindings = bindings
# self._zexpr = zexpr
# self._thread_event = None
# self._thread_result = None
# self._thread_exception = None
# def tfcont(self, agent):
# te = self._thread_event
# try:
# if te is None: # thread not started
# data = self._procedure.prologue(agent,self._event,self._bindings,self._zexpr)
# te = threading.Event()
# self._thread_event = te
# #print "STARTING te.isSet() =", te.isSet()
# thread = threading.Thread(target=self._threadrun, \
# args=(agent, data), name="Thread")
# thread.start()
# elif te.isSet(): # thread complete
# #print "FINISHED te.isSet() =", te.isSet()
# if self._thread_exception:
# raise self._thread_exception # DNM - maybe re-raise
# result = self._procedure.epilogue(agent, self._event, self._bindings, self._zexpr, self._thread_result)
# if not (result is SUCCESS or isinstance(result, Failure)): raise AssertionError
# self.tf_set_completed(agent, result)
# else:
# #print "WAITING te.isSet() =", te.isSet(), self._procedure
# return NOCHANGE
# except Failure, failure:
# self.tf_set_completed(agent, failure)
# except AnyException, e:
# pm.setprint()
# self.tf_set_completed(agent, ExceptionFailure(self, e))
# return None
# def _threadrun(self, agent, data):
# try:
# self._thread_result = self._procedure.threadrun(data)
# except AnyException, e:
# self._thread_exception = ExceptionFailure(self, e)
# pm.setprint()
# #print "THREAD COMPLETED"
# self._thread_event.set()
# agent._something_to_do_event.set()
# class ThreadProcedure(object, ProcedureInt):
# __slots__ = (
# "_name",
# )
# def __init__(self, name):
# self._name = name
# def name(self):
# return self._name
# def append_tframes(self, agent, event, bindings, zexpr, list):
# if self.applicable(agent, event, bindings, zexpr):
# name = "%s.%d"%(self._name, agent.nextid())
# list.append(ThreadProcedureInstance(name, self, event, bindings, zexpr))
# def applicable(self, agent, event, bindings, zexpr):
# return True # default precondition
# def prologue(self, agent, event, bindings, zexpr):
# return None
# def threadrun(self, prologue_data):
# raise LowError("threadrun method not implemented for %r", self)
# def epilogue(self, agent, event, bindings, zexpr, threadrun_result):
# return threadrun_result
class ProcedureActImp(ActImpInt):
"""An action that is implemented by executing a fixed procedure"""
def __init__(self, procedure):
if not (isinstance(procedure, ProcedureInt)): raise AssertionError
self._procedure = procedure
def __call__(self, symbol):
self._symbol = symbol
return self
def tframes(self, agent, event, bindings, zexpr):
tframes = []
self._procedure.append_tframes(agent, event, bindings, zexpr, tframes)
return tframes
################################################################
class CounterImp(Imp, PersistablePredImpInt):
#NOTE: single agent implementation (SINGLEAGENT). easy to make multiagent
__slots__ = ('counter')
def __init__(self, decl):
Imp.__init__(self, decl)
self.counter = 0
#PERSIST
def persist_arity_or_facts(self, ignore_agent):
return [[self.counter]]
def resume_conclude(self, agent, bindings, zexpr):
self.counter = termEvalErr(agent, bindings, zexpr[0])
def solution(self, agent, bindings, zexpr):
if len(zexpr) != 1:
raise zexpr.error("Invalid arity -- counters have arity of 1")
if self.counter is not None and \
termMatch(agent, bindings, zexpr[0], self.counter):
return SOLVED
else:
return NOT_SOLVED
# def find_solutions(self, agent, bindings, zexpr):
# if len(zexpr) != 1:
# raise zexpr.error("Invalid arity -- counters have arity of 1")
# if self.counter is not None and \
# termMatch(agent, bindings, zexpr[0], self.counter):
# bindings.bfound_solution(agent, zexpr)
def conclude(self, agent, bindings, zexpr):
"""Conclude this predicate and return whether the database has
been changed."""
if len(zexpr) != 1:
raise zexpr.error("Invalid arity -- counters have arity of 1")
self.counter = termEvalErr(agent, bindings, zexpr[0])
def retractall(self, agent, bindings, zexpr):
"""Retract anything matching this predicate and return whether
the datadase has been changed."""
raise zexpr.error("Cannot retractall on a counter implementation")
# ################################################################
# # ThreadPool
# #
# # source of threads that can be reused
# #
# import Queue
# class ThreadPool(object):
# __slots__ = (
# "idleWorkerThreadsQueue",
# )
# def __init__(self, maxsize=0):
# self.idleWorkerThreadsQueue = Queue.Queue(maxsize)
# def startDaemonThread(self, target=None, name=None, args={}, kwargs={}):
# "Take an idle WorkerThread from the pool and run target function on the given args. Create a new _WorkerThread if necessary."
# queue = self.idleWorkerThreadsQueue
# try:
# workerThread = queue.get(False)
# #print "Taking", workerThread, "from", queue
# except Queue.Empty:
# workerThread = _WorkerThread()
# #print "Creating new", workerThread
# workerThread.setDaemon(True)
# workerThread.start()
# workerThread.invokeRun(queue, target, name, args, kwargs)
# def killIdleWorkerThreads(self, leave=0):
# queue = self.idleWorkerThreadsQueue
# while queue.qsize() > leave:
# try:
# workerThread = queue.get(False)
# except Queue.Empty:
# return
# workerThread.killIdleWorkerThread()
# THREAD_POOL = ThreadPool(0) # Allow an unlimited number of idle threads
# class _WorkerThread(threading.Thread):
# __slots__ = (
# "workLock", # Lock to say work is available
# "oldname", # original name of thread
# "idleWorkerThreadsQueue", # queue to put self on when idle
# "target", # runnable object
# "name", # name to use
# "args", # args to use
# "kwargs", # keyword args to use
# )
# def __init__(self):
# threading.Thread.__init__(self)
# self.workLock = threading.Lock()
# self.workLock.acquire() # invokeRun will release this Lock
# self.oldname = self.getName()
# self._reset()
# def _reset(self):
# self.setName(self.oldname)
# self.idleWorkerThreadsQueue = None
# self.target = None
# self.name = None
# self.args = None
# self.kwargs = None
# def run(self):
# terminate = False
# while not terminate:
# terminate = self.performRun()
# def performRun(self):
# "Acquire lock, and perform the target function. Return Ture if thread should exit."
# self.workLock.acquire() # wait until something to do
# # Check if we should die
# idleWorkerThreadsQueue = self.idleWorkerThreadsQueue
# if idleWorkerThreadsQueue is None:
# #print "Killing", self
# return True
# # call the target function
# #print "Using", self, "to run", self.target
# if self.name is not None:
# self.setName(self.name)
# try:
# self.target(*self.args, **self.kwargs)
# except:
# print "Exception in WorkerThread", self.getName()
# errid = NEWPM.displayError()
# # reset self and go back on the idle queue
# self._reset()
# #print "Putting", self, "back on", idleWorkerThreadsQueue
# try:
# idleWorkerThreadsQueue.put_nowait(self)
# except Queue.Full:
# #print "Idle _WorkerThread Queue is full, kill _WorkerThread instead"
# return True
# return False
# def invokeRun(self, queue, target, name, args, kwargs):
# if self.workLock.acquire(False): raise AssertionError, \
# "Trying to call _WorkerThread.invokeRun with workLock unlocked"
# if queue is None: raise AssertionError, \
# "Must supply a Queue to return _WorkerThread to when done"
# if target is None: raise AssertionError, \
# "Must supply a target function to execute"
# self.idleWorkerThreadsQueue = queue
# self.target = target
# self.name = name
# self.args = args
# self.kwargs = kwargs
# self.workLock.release()
# def killIdleWorkerThread(self):
# "Kill off this _WorkerThread"
# if self.target is not None: raise AssertionError, \
# "Cannot kill a _WorkerThread that is current executing something"
# self.idleWorkerThreadsQueue = None
# self.workLock.release()
# ONE_WORKER_THREAD = _WorkerThread()
|
chrome_test_server_spawner.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A "Test Server Spawner" that handles killing/stopping per-test test servers.
It's used to accept requests from the device to spawn and kill instances of the
chrome test server on the host.
"""
import BaseHTTPServer
import json
import logging
import os
import select
import struct
import subprocess
import sys
import threading
import time
import urlparse
import constants
import ports
# Path that are needed to import necessary modules when launching a testserver.
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s:%s:%s'
% (os.path.join(constants.DIR_SOURCE_ROOT, 'third_party'),
os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', 'tlslite'),
os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', 'pyftpdlib',
'src'),
os.path.join(constants.DIR_SOURCE_ROOT, 'net', 'tools', 'testserver'),
os.path.join(constants.DIR_SOURCE_ROOT, 'sync', 'tools', 'testserver')))
SERVER_TYPES = {
'http': '',
'ftp': '-f',
'sync': '', # Sync uses its own script, and doesn't take a server type arg.
'tcpecho': '--tcp-echo',
'udpecho': '--udp-echo',
}
# The timeout (in seconds) of starting up the Python test server.
TEST_SERVER_STARTUP_TIMEOUT = 10
def _WaitUntil(predicate, max_attempts=5):
"""Blocks until the provided predicate (function) is true.
Returns:
Whether the provided predicate was satisfied once (before the timeout).
"""
sleep_time_sec = 0.025
for attempt in xrange(1, max_attempts):
if predicate():
return True
time.sleep(sleep_time_sec)
sleep_time_sec = min(1, sleep_time_sec * 2) # Don't wait more than 1 sec.
return False
def _CheckPortStatus(port, expected_status):
"""Returns True if port has expected_status.
Args:
port: the port number.
expected_status: boolean of expected status.
Returns:
Returns True if the status is expected. Otherwise returns False.
"""
return _WaitUntil(lambda: ports.IsHostPortUsed(port) == expected_status)
def _CheckDevicePortStatus(adb, port):
"""Returns whether the provided port is used."""
return _WaitUntil(lambda: ports.IsDevicePortUsed(adb, port))
def _GetServerTypeCommandLine(server_type):
"""Returns the command-line by the given server type.
Args:
server_type: the server type to be used (e.g. 'http').
Returns:
A string containing the command-line argument.
"""
if server_type not in SERVER_TYPES:
raise NotImplementedError('Unknown server type: %s' % server_type)
if server_type == 'udpecho':
raise Exception('Please do not run UDP echo tests because we do not have '
'a UDP forwarder tool.')
return SERVER_TYPES[server_type]
class TestServerThread(threading.Thread):
"""A thread to run the test server in a separate process."""
def __init__(self, ready_event, arguments, adb, tool, forwarder, build_type):
"""Initialize TestServerThread with the following argument.
Args:
ready_event: event which will be set when the test server is ready.
arguments: dictionary of arguments to run the test server.
adb: instance of AndroidCommands.
tool: instance of runtime error detection tool.
forwarder: instance of Forwarder.
build_type: 'Release' or 'Debug'.
"""
threading.Thread.__init__(self)
self.wait_event = threading.Event()
self.stop_flag = False
self.ready_event = ready_event
self.ready_event.clear()
self.arguments = arguments
self.adb = adb
self.tool = tool
self.test_server_process = None
self.is_ready = False
self.host_port = self.arguments['port']
assert isinstance(self.host_port, int)
self._test_server_forwarder = forwarder
# The forwarder device port now is dynamically allocated.
self.forwarder_device_port = 0
# Anonymous pipe in order to get port info from test server.
self.pipe_in = None
self.pipe_out = None
self.command_line = []
self.build_type = build_type
def _WaitToStartAndGetPortFromTestServer(self):
"""Waits for the Python test server to start and gets the port it is using.
The port information is passed by the Python test server with a pipe given
by self.pipe_out. It is written as a result to |self.host_port|.
Returns:
Whether the port used by the test server was successfully fetched.
"""
assert self.host_port == 0 and self.pipe_out and self.pipe_in
(in_fds, _, _) = select.select([self.pipe_in, ], [], [],
TEST_SERVER_STARTUP_TIMEOUT)
if len(in_fds) == 0:
logging.error('Failed to wait to the Python test server to be started.')
return False
# First read the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the Python test server packs
# size as native byte order and all Chromium platforms so far are
# configured to use little-endian.
# TODO(jnd): Change the Python test server and local_test_server_*.cc to
# use a unified byte order (either big-endian or little-endian).
data_length = os.read(self.pipe_in, struct.calcsize('=L'))
if data_length:
(data_length,) = struct.unpack('=L', data_length)
assert data_length
if not data_length:
logging.error('Failed to get length of server data.')
return False
port_json = os.read(self.pipe_in, data_length)
if not port_json:
logging.error('Failed to get server data.')
return False
logging.info('Got port json data: %s', port_json)
port_json = json.loads(port_json)
if port_json.has_key('port') and isinstance(port_json['port'], int):
self.host_port = port_json['port']
return _CheckPortStatus(self.host_port, True)
logging.error('Failed to get port information from the server data.')
return False
def _GenerateCommandLineArguments(self):
"""Generates the command line to run the test server.
Note that all options are processed by following the definitions in
testserver.py.
"""
if self.command_line:
return
# The following arguments must exist.
type_cmd = _GetServerTypeCommandLine(self.arguments['server-type'])
if type_cmd:
self.command_line.append(type_cmd)
self.command_line.append('--port=%d' % self.host_port)
# Use a pipe to get the port given by the instance of Python test server
# if the test does not specify the port.
if self.host_port == 0:
(self.pipe_in, self.pipe_out) = os.pipe()
self.command_line.append('--startup-pipe=%d' % self.pipe_out)
self.command_line.append('--host=%s' % self.arguments['host'])
data_dir = self.arguments['data-dir'] or 'chrome/test/data'
if not os.path.isabs(data_dir):
data_dir = os.path.join(constants.DIR_SOURCE_ROOT, data_dir)
self.command_line.append('--data-dir=%s' % data_dir)
# The following arguments are optional depending on the individual test.
if self.arguments.has_key('log-to-console'):
self.command_line.append('--log-to-console')
if self.arguments.has_key('auth-token'):
self.command_line.append('--auth-token=%s' % self.arguments['auth-token'])
if self.arguments.has_key('https'):
self.command_line.append('--https')
if self.arguments.has_key('cert-and-key-file'):
self.command_line.append('--cert-and-key-file=%s' % os.path.join(
constants.DIR_SOURCE_ROOT, self.arguments['cert-and-key-file']))
if self.arguments.has_key('ocsp'):
self.command_line.append('--ocsp=%s' % self.arguments['ocsp'])
if self.arguments.has_key('https-record-resume'):
self.command_line.append('--https-record-resume')
if self.arguments.has_key('ssl-client-auth'):
self.command_line.append('--ssl-client-auth')
if self.arguments.has_key('tls-intolerant'):
self.command_line.append('--tls-intolerant=%s' %
self.arguments['tls-intolerant'])
if self.arguments.has_key('ssl-client-ca'):
for ca in self.arguments['ssl-client-ca']:
self.command_line.append('--ssl-client-ca=%s' %
os.path.join(constants.DIR_SOURCE_ROOT, ca))
if self.arguments.has_key('ssl-bulk-cipher'):
for bulk_cipher in self.arguments['ssl-bulk-cipher']:
self.command_line.append('--ssl-bulk-cipher=%s' % bulk_cipher)
def run(self):
logging.info('Start running the thread!')
self.wait_event.clear()
self._GenerateCommandLineArguments()
command = constants.DIR_SOURCE_ROOT
if self.arguments['server-type'] == 'sync':
command = [os.path.join(command, 'sync', 'tools', 'testserver',
'sync_testserver.py')] + self.command_line
else:
command = [os.path.join(command, 'net', 'tools', 'testserver',
'testserver.py')] + self.command_line
logging.info('Running: %s', command)
self.process = subprocess.Popen(command)
if self.process:
if self.pipe_out:
self.is_ready = self._WaitToStartAndGetPortFromTestServer()
else:
self.is_ready = _CheckPortStatus(self.host_port, True)
if self.is_ready:
self._test_server_forwarder.Run(
[(0, self.host_port)], self.tool, '127.0.0.1')
# Check whether the forwarder is ready on the device.
self.is_ready = False
device_port = self._test_server_forwarder.DevicePortForHostPort(
self.host_port)
if device_port and _CheckDevicePortStatus(self.adb, device_port):
self.is_ready = True
self.forwarder_device_port = device_port
# Wake up the request handler thread.
self.ready_event.set()
# Keep thread running until Stop() gets called.
_WaitUntil(lambda: self.stop_flag, max_attempts=sys.maxint)
if self.process.poll() is None:
self.process.kill()
self._test_server_forwarder.UnmapDevicePort(self.forwarder_device_port)
self.process = None
self.is_ready = False
if self.pipe_out:
os.close(self.pipe_in)
os.close(self.pipe_out)
self.pipe_in = None
self.pipe_out = None
logging.info('Test-server has died.')
self.wait_event.set()
def Stop(self):
"""Blocks until the loop has finished.
Note that this must be called in another thread.
"""
if not self.process:
return
self.stop_flag = True
self.wait_event.wait()
class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler used to process http GET/POST request."""
def _SendResponse(self, response_code, response_reason, additional_headers,
contents):
"""Generates a response sent to the client from the provided parameters.
Args:
response_code: number of the response status.
response_reason: string of reason description of the response.
additional_headers: dict of additional headers. Each key is the name of
the header, each value is the content of the header.
contents: string of the contents we want to send to client.
"""
self.send_response(response_code, response_reason)
self.send_header('Content-Type', 'text/html')
# Specify the content-length as without it the http(s) response will not
# be completed properly (and the browser keeps expecting data).
self.send_header('Content-Length', len(contents))
for header_name in additional_headers:
self.send_header(header_name, additional_headers[header_name])
self.end_headers()
self.wfile.write(contents)
self.wfile.flush()
def _StartTestServer(self):
"""Starts the test server thread."""
logging.info('Handling request to spawn a test server.')
content_type = self.headers.getheader('content-type')
if content_type != 'application/json':
raise Exception('Bad content-type for start request.')
content_length = self.headers.getheader('content-length')
if not content_length:
content_length = 0
try:
content_length = int(content_length)
except:
raise Exception('Bad content-length for start request.')
logging.info(content_length)
test_server_argument_json = self.rfile.read(content_length)
logging.info(test_server_argument_json)
assert not self.server.test_server_instance
ready_event = threading.Event()
self.server.test_server_instance = TestServerThread(
ready_event,
json.loads(test_server_argument_json),
self.server.adb,
self.server.tool,
self.server.forwarder,
self.server.build_type)
self.server.test_server_instance.setDaemon(True)
self.server.test_server_instance.start()
ready_event.wait()
if self.server.test_server_instance.is_ready:
self._SendResponse(200, 'OK', {}, json.dumps(
{'port': self.server.test_server_instance.forwarder_device_port,
'message': 'started'}))
logging.info('Test server is running on port: %d.',
self.server.test_server_instance.host_port)
else:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during starting a test server.')
def _KillTestServer(self):
"""Stops the test server instance."""
# There should only ever be one test server at a time. This may do the
# wrong thing if we try and start multiple test servers.
if not self.server.test_server_instance:
return
port = self.server.test_server_instance.host_port
logging.info('Handling request to kill a test server on port: %d.', port)
self.server.test_server_instance.Stop()
# Make sure the status of test server is correct before sending response.
if _CheckPortStatus(port, False):
self._SendResponse(200, 'OK', {}, 'killed')
logging.info('Test server on port %d is killed', port)
else:
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during killing a test server.')
self.server.test_server_instance = None
def do_POST(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
logging.info('Action for POST method is: %s.', action)
if action == '/start':
self._StartTestServer()
else:
self._SendResponse(400, 'Unknown request.', {}, '')
logging.info('Encounter unknown request: %s.', action)
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1)
logging.info('Action for GET method is: %s.', action)
for param in params:
logging.info('%s=%s', param, params[param][0])
if action == '/kill':
self._KillTestServer()
elif action == '/ping':
# The ping handler is used to check whether the spawner server is ready
# to serve the requests. We don't need to test the status of the test
# server when handling ping request.
self._SendResponse(200, 'OK', {}, 'ready')
logging.info('Handled ping request and sent response.')
else:
self._SendResponse(400, 'Unknown request', {}, '')
logging.info('Encounter unknown request: %s.', action)
class SpawningServer(object):
"""The class used to start/stop a http server."""
def __init__(self, test_server_spawner_port, adb, tool, forwarder,
build_type):
logging.info('Creating new spawner on port: %d.', test_server_spawner_port)
self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
SpawningServerRequestHandler)
self.server.adb = adb
self.server.tool = tool
self.server.forwarder = forwarder
self.server.test_server_instance = None
self.server.build_type = build_type
def _Listen(self):
logging.info('Starting test server spawner')
self.server.serve_forever()
def Start(self):
"""Starts the test server spawner."""
listener_thread = threading.Thread(target=self._Listen)
listener_thread.setDaemon(True)
listener_thread.start()
def Stop(self):
"""Stops the test server spawner.
Also cleans the server state.
"""
self.CleanupState()
self.server.shutdown()
def CleanupState(self):
"""Cleans up the spawning server state.
This should be called if the test server spawner is reused,
to avoid sharing the test server instance.
"""
if self.server.test_server_instance:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
|
sim.py
|
#!/usr/bin/env python3
import csv
import numpy
import multiprocessing
def sigmoid(x):
return 1.0/(1.0+numpy.exp(-(x-5.0)))
def sim_func(q,alpha1,alpha2,Nalpha1,Nalpha2):
#parameters
time_pitch=1.0 #ms
save_pitch=10
save_pitch_weight=1000
simlen_sec=900.0
simlen=int(simlen_sec*1000.0/time_pitch)
tauL=10.0 #ms
phi=80.0/1000.0
phi_input=80.0/1000.0
alpha_som=alpha1
alpha_dnd=alpha2
beta_som=0.0
beta_dnd=0.0
gamma=1.0
c0=70.0
eta_som=0.2
eta_dnd=0.2
taudeltaW=1.0*1000.0 #ms
tau_mean=60.0*1000.0
eta_Wdecay=1e-7
Wnoise_amp=5e-3/numpy.sqrt(time_pitch)
som_input_num=50
dnd_input_num=som_input_num+0
group1_num=10
input_src_num=4
tau_input=10.0 #ms
input_amp=0.1/numpy.sqrt(time_pitch)
noise_amp=0.1/numpy.sqrt(time_pitch)
Winit=5.0
Wmin=0.0
E0=0.05
#variables
x=0.0
y=0.0
Ex=E0
Ey=E0
input_src=numpy.zeros(input_src_num)
som_input_current=numpy.zeros(som_input_num)
dnd_input_current=numpy.zeros(dnd_input_num)
som_inputPSC=numpy.zeros(som_input_num)
dnd_inputPSC=numpy.zeros(dnd_input_num)
deltaWsom=numpy.zeros(som_input_num)
deltaWdnd=numpy.zeros(dnd_input_num)
Wsom=Winit*(numpy.random.rand(som_input_num))
Wdnd=Winit*(numpy.random.rand(dnd_input_num))
som_src=numpy.zeros([som_input_num, input_src_num])
som_src[:group1_num, 0]=1.0
som_src[group1_num:, 2]=1.0
dnd_src=numpy.zeros([dnd_input_num, input_src_num])
dnd_src[:group1_num,0]=1.0
dnd_src[group1_num:,3]=1.0
#simulation
for t in range(simlen):
time_sec=float(t)*time_pitch/1000.0
#if time_sec==int(time_sec):
# print(time_sec,"sec")
#source signal
input_src=input_src+time_pitch*(-input_src/tau_input+input_amp*numpy.random.randn(input_src_num))
#inputs
som_input_current+=time_pitch*(-som_input_current/tauL+som_src@input_src+noise_amp*numpy.random.randn(som_input_num))
dnd_input_current+=time_pitch*(-dnd_input_current/tauL+dnd_src@input_src+noise_amp*numpy.random.randn(dnd_input_num))
som_input=phi_input*sigmoid(som_input_current)
dnd_input=phi_input*sigmoid(dnd_input_current)
som_inputPSC+=time_pitch*(-som_inputPSC/tauL+som_input)
dnd_inputPSC+=time_pitch*(-dnd_inputPSC/tauL+dnd_input)
#dynamics
xprev=x+0.0
yprev=y+0.0
Isom=Wsom@som_inputPSC
Idnd=Wdnd@dnd_inputPSC
x=sigmoid(Isom+beta_som*yprev)
y=sigmoid(Idnd+beta_dnd*xprev)
z=(1.0+gamma*y)*phi*x
#plasticity
#som
Wsom+=time_pitch*(eta_som*deltaWsom+Wnoise_amp*numpy.random.randn(som_input_num)-eta_Wdecay*Wsom)
Wsom[Wsom<Wmin]=Wmin
xamp=(1.0-alpha_som)*x+alpha_som*x*y
theta_som=c0*Ex*Ex
deltaWsom+=time_pitch*(-deltaWsom+(xamp*(xamp-theta_som))*(1.0-x)*som_inputPSC)/taudeltaW
#dnd
Wdnd+=time_pitch*(eta_dnd*deltaWdnd+Wnoise_amp*numpy.random.randn(dnd_input_num)-eta_Wdecay*Wdnd)
Wdnd[Wdnd<Wmin]=Wmin
yamp=(1.0-alpha_dnd)*y+alpha_dnd*x*y
theta_dnd=c0*Ey*Ey
deltaWdnd+=time_pitch*(-deltaWdnd+(yamp*(yamp-theta_dnd))*(1.0-y)*dnd_inputPSC)/taudeltaW
Ex+=time_pitch*(-Ex+xamp)/tau_mean
Ey+=time_pitch*(-Ey+yamp)/tau_mean
wdif_som=numpy.sum(Wsom[:group1_num])-numpy.sum(Wsom[group1_num:])
wdif_dnd=numpy.sum(Wdnd[:group1_num])-numpy.sum(Wdnd[group1_num:])
q.put((Nalpha1,Nalpha2,wdif_som,wdif_dnd))
if __name__=="__main__":
alpha1=numpy.arange(0.0,1.0,0.1)
alpha2=numpy.arange(0.0,1.0,0.1)
max_process=40
que=multiprocessing.Queue()
process_arr=[]
results=[]
process_num=0
for i in range(len(alpha1)):
for j in range(len(alpha2)):
print(alpha1[i],alpha2[j])
process_arr.append(multiprocessing.Process(target=sim_func, args=(que,alpha1[i],alpha2[j],i,j)))
process_arr[-1].start()
process_num+=1
if process_num>=max_process:
for k in range(process_num):
process_arr[k].join()
for k in range(process_num):
tmp=que.get()
results.append(tmp)
process_arr.clear()
process_num=0
for i in range(process_num):
process_arr[i].join()
for k in range(process_num):
tmp=que.get()
results.append(tmp)
numpy.savetxt("alpha_som.csv", alpha1, delimiter=",")
numpy.savetxt("alpha_dnd.csv", alpha2, delimiter=",")
numpy.savetxt("wdif.csv", results, delimiter=",")
|
cronjobs.py
|
#!/usr/bin/env python
"""Cron management classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import sys
import threading
import time
from future.utils import iterkeys
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import random
from grr_response_core.stats import stats_collector_instance
from grr_response_server import access_control
from grr_response_server import aff4
from grr_response_server import cronjobs
from grr_response_server import data_store
from grr_response_server import flow
from grr_response_server import master
from grr_response_server import queue_manager
from grr_response_server.rdfvalues import cronjobs as rdf_cronjobs
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import hunts as rdf_hunts
class Error(Exception):
pass
class CronManager(object):
"""CronManager is used to schedule/terminate cron jobs."""
CRON_JOBS_PATH = rdfvalue.RDFURN("aff4:/cron")
def CreateJob(self, cron_args=None, job_id=None, token=None, enabled=True):
"""Creates a cron job that runs given flow with a given frequency.
Args:
cron_args: A protobuf of type rdf_cronjobs.CreateCronJobArgs.
job_id: Use this job_id instead of an autogenerated unique name (used for
system cron jobs - we want them to have well-defined persistent name).
token: Security token used for data store access.
enabled: If False, the job object will be created, but will be disabled.
Returns:
Name of the cron job created.
"""
if not job_id:
uid = random.UInt16()
job_id = "%s_%s" % (cron_args.flow_name, uid)
flow_runner_args = rdf_flow_runner.FlowRunnerArgs(
flow_name="CreateAndRunGenericHuntFlow")
flow_args = rdf_hunts.CreateGenericHuntFlowArgs()
flow_args.hunt_args.flow_args = cron_args.flow_args
flow_args.hunt_args.flow_runner_args.flow_name = cron_args.flow_name
flow_args.hunt_runner_args = cron_args.hunt_runner_args
flow_args.hunt_runner_args.hunt_name = "GenericHunt"
create_cron_args = rdf_cronjobs.CreateCronJobFlowArgs(
description=cron_args.description,
periodicity=cron_args.frequency,
flow_runner_args=flow_runner_args,
flow_args=flow_args,
allow_overruns=cron_args.allow_overruns,
lifetime=cron_args.lifetime)
cron_job_urn = self.CRON_JOBS_PATH.Add(job_id)
with aff4.FACTORY.Create(
cron_job_urn,
aff4_type=CronJob,
mode="rw",
token=token,
force_new_version=False) as cron_job:
# If the cronjob was already present we don't want to overwrite the
# original start_time.
existing_cron_args = cron_job.Get(cron_job.Schema.CRON_ARGS)
if existing_cron_args and existing_cron_args.start_time:
create_cron_args.start_time = existing_cron_args.start_time
if create_cron_args != existing_cron_args:
cron_job.Set(cron_job.Schema.CRON_ARGS(create_cron_args))
cron_job.Set(cron_job.Schema.DISABLED(not enabled))
return job_id
def ListJobs(self, token=None):
"""Returns a list of all currently running cron jobs."""
job_root = aff4.FACTORY.Open(self.CRON_JOBS_PATH, token=token)
return [urn.Basename() for urn in job_root.ListChildren()]
def ReadJob(self, job_id, token=None):
job_urn = self.CRON_JOBS_PATH.Add(job_id)
return aff4.FACTORY.Open(
job_urn, aff4_type=CronJob, token=token, age=aff4.ALL_TIMES)
def ReadJobs(self, token=None):
job_urns = [self.CRON_JOBS_PATH.Add(job_id) for job_id in self.ListJobs()]
return aff4.FACTORY.MultiOpen(
job_urns, aff4_type=CronJob, token=token, age=aff4.ALL_TIMES)
def ReadJobRuns(self, job_id, token=None):
job_urn = self.CRON_JOBS_PATH.Add(job_id)
fd = aff4.FACTORY.Open(job_urn, token=token)
return list(fd.OpenChildren())
def EnableJob(self, job_id, token=None):
"""Enable cron job with the given URN."""
job_urn = self.CRON_JOBS_PATH.Add(job_id)
cron_job = aff4.FACTORY.Open(
job_urn, mode="rw", aff4_type=CronJob, token=token)
cron_job.Set(cron_job.Schema.DISABLED(0))
cron_job.Close()
def DisableJob(self, job_id, token=None):
"""Disable cron job with the given URN."""
job_urn = self.CRON_JOBS_PATH.Add(job_id)
cron_job = aff4.FACTORY.Open(
job_urn, mode="rw", aff4_type=CronJob, token=token)
cron_job.Set(cron_job.Schema.DISABLED(1))
cron_job.Close()
def DeleteJob(self, job_id, token=None):
"""Deletes cron job with the given URN."""
job_urn = self.CRON_JOBS_PATH.Add(job_id)
aff4.FACTORY.Delete(job_urn, token=token)
def RunOnce(self, token=None, force=False, names=None):
"""Tries to lock and run cron jobs.
Args:
token: security token
force: If True, force a run
names: List of job names to run. If unset, run them all
"""
names = names or self.ListJobs(token=token)
urns = [self.CRON_JOBS_PATH.Add(name) for name in names]
for cron_job_urn in urns:
try:
with aff4.FACTORY.OpenWithLock(
cron_job_urn, blocking=False, token=token,
lease_time=600) as cron_job:
try:
logging.info("Running cron job: %s", cron_job.urn)
cron_job.Run(force=force)
except Exception as e: # pylint: disable=broad-except
logging.exception("Error processing cron job %s: %s", cron_job.urn,
e)
stats_collector_instance.Get().IncrementCounter(
"cron_internal_error")
except aff4.LockError:
pass
def DeleteOldRuns(self, job, cutoff_timestamp=None, token=None):
"""Deletes flows initiated by the job that are older than specified."""
if cutoff_timestamp is None:
raise ValueError("cutoff_timestamp can't be None")
child_flows = list(job.ListChildren(age=cutoff_timestamp))
with queue_manager.QueueManager(token=token) as queuemanager:
queuemanager.MultiDestroyFlowStates(child_flows)
aff4.FACTORY.MultiDelete(child_flows, token=token)
return len(child_flows)
def GetCronManager():
if data_store.RelationalDBReadEnabled(category="cronjobs"):
return cronjobs.CronManager()
return CronManager()
class SystemCronFlow(flow.GRRFlow):
"""SystemCronFlows are scheduled automatically on workers startup."""
frequency = rdfvalue.Duration("1d")
lifetime = rdfvalue.Duration("20h")
allow_overruns = False
# Jobs that are broken, or are under development can be disabled using
# the "enabled" attribute. These jobs won't get scheduled automatically,
# and will get paused if they were scheduled before.
enabled = True
__abstract = True # pylint: disable=g-bad-name
def _ValidateState(self):
# For normal flows it's a bug to write an empty state, here it's ok.
pass
@property
def disabled(self):
raise ValueError("Disabled flag is deprecated, use enabled instead.")
@disabled.setter
def disabled(self, _):
raise ValueError("Disabled flag is deprecated, use enabled instead.")
class StateReadError(Error):
pass
class StateWriteError(Error):
pass
class StatefulSystemCronFlow(SystemCronFlow):
"""SystemCronFlow that keeps a permanent state between iterations."""
__abstract = True
@property
def cron_job_urn(self):
return CronManager.CRON_JOBS_PATH.Add(self.__class__.__name__)
def ReadCronState(self):
# TODO(amoser): This is pretty bad, there is no locking for state.
try:
cron_job = aff4.FACTORY.Open(
self.cron_job_urn, aff4_type=CronJob, token=self.token)
res = cron_job.Get(cron_job.Schema.STATE_DICT)
if res:
return flow.AttributedDict(res.ToDict())
return flow.AttributedDict()
except aff4.InstantiationError as e:
raise StateReadError(e)
def WriteCronState(self, state):
if not state:
return
try:
with aff4.FACTORY.OpenWithLock(
self.cron_job_urn, aff4_type=CronJob, token=self.token) as cron_job:
cron_job.Set(cron_job.Schema.STATE_DICT(state))
except aff4.InstantiationError as e:
raise StateWriteError(e)
def ScheduleSystemCronFlows(names=None, token=None):
"""Schedule all the SystemCronFlows found."""
if data_store.RelationalDBReadEnabled(category="cronjobs"):
return cronjobs.ScheduleSystemCronJobs(names=names)
errors = []
for name in config.CONFIG["Cron.disabled_system_jobs"]:
try:
cls = registry.AFF4FlowRegistry.FlowClassByName(name)
except ValueError:
errors.append("No such flow: %s." % name)
continue
if not issubclass(cls, SystemCronFlow):
errors.append("Disabled system cron job name doesn't correspond to "
"a flow inherited from SystemCronFlow: %s" % name)
if names is None:
names = iterkeys(registry.AFF4FlowRegistry.FLOW_REGISTRY)
for name in names:
cls = registry.AFF4FlowRegistry.FlowClassByName(name)
if not issubclass(cls, SystemCronFlow):
continue
cron_args = rdf_cronjobs.CreateCronJobFlowArgs(
periodicity=cls.frequency,
lifetime=cls.lifetime,
allow_overruns=cls.allow_overruns)
cron_args.flow_runner_args.flow_name = name
if cls.enabled:
enabled = name not in config.CONFIG["Cron.disabled_system_jobs"]
else:
enabled = False
job_urn = CronManager.CRON_JOBS_PATH.Add(name)
with aff4.FACTORY.Create(
job_urn,
aff4_type=CronJob,
mode="rw",
token=token,
force_new_version=False) as cron_job:
# If the cronjob was already present we don't want to overwrite the
# original start_time.
existing_cron_args = cron_job.Get(cron_job.Schema.CRON_ARGS)
if cron_args != existing_cron_args:
cron_job.Set(cron_job.Schema.CRON_ARGS(cron_args))
cron_job.Set(cron_job.Schema.DISABLED(not enabled))
if errors:
raise ValueError(
"Error(s) while parsing Cron.disabled_system_jobs: %s" % errors)
class CronWorker(object):
"""CronWorker runs a thread that periodically executes cron jobs."""
def __init__(self, thread_name="grr_cron", sleep=60 * 5):
self.thread_name = thread_name
self.sleep = sleep
# SetUID is required to write cronjobs under aff4:/cron/
self.token = access_control.ACLToken(
username="GRRCron", reason="Implied.").SetUID()
def _RunLoop(self):
ScheduleSystemCronFlows(token=self.token)
while True:
if not master.MASTER_WATCHER.IsMaster():
time.sleep(self.sleep)
continue
try:
GetCronManager().RunOnce(token=self.token)
except Exception as e: # pylint: disable=broad-except
logging.error("CronWorker uncaught exception: %s", e)
time.sleep(self.sleep)
def Run(self):
"""Runs a working thread and waits for it to finish."""
self.RunAsync().join()
def RunAsync(self):
"""Runs a working thread and returns immediately."""
self.running_thread = threading.Thread(
name=self.thread_name, target=self._RunLoop)
self.running_thread.daemon = True
self.running_thread.start()
return self.running_thread
class CronJob(aff4.AFF4Volume):
"""AFF4 object corresponding to cron jobs."""
class SchemaCls(aff4.AFF4Volume.SchemaCls):
"""Schema for CronJob AFF4 object."""
CRON_ARGS = aff4.Attribute("aff4:cron/args",
rdf_cronjobs.CreateCronJobFlowArgs,
"This cron jobs' arguments.")
DISABLED = aff4.Attribute(
"aff4:cron/disabled",
rdfvalue.RDFBool,
"If True, don't run this job.",
versioned=False)
CURRENT_FLOW_URN = aff4.Attribute(
"aff4:cron/current_flow_urn",
rdfvalue.RDFURN,
"URN of the currently running flow corresponding to this cron job.",
versioned=False,
lock_protected=True)
LAST_RUN_TIME = aff4.Attribute(
"aff4:cron/last_run",
rdfvalue.RDFDatetime,
"The last time this cron job ran.",
"last_run",
versioned=False,
lock_protected=True)
LAST_RUN_STATUS = aff4.Attribute(
"aff4:cron/last_run_status",
rdf_cronjobs.CronJobRunStatus,
"Result of the last flow",
lock_protected=True,
creates_new_object_version=False)
STATE_DICT = aff4.Attribute(
"aff4:cron/state_dict",
rdf_protodict.AttributedDict,
"Cron flow state that is kept between iterations",
lock_protected=True,
versioned=False)
def IsRunning(self):
"""Returns True if there's a currently running iteration of this job."""
current_urn = self.Get(self.Schema.CURRENT_FLOW_URN)
if not current_urn:
return False
try:
current_flow = aff4.FACTORY.Open(
urn=current_urn, aff4_type=flow.GRRFlow, token=self.token, mode="r")
except aff4.InstantiationError:
# This isn't a flow, something went really wrong, clear it out.
logging.error("Unable to open cron job run: %s", current_urn)
self.DeleteAttribute(self.Schema.CURRENT_FLOW_URN)
self.Flush()
return False
return current_flow.GetRunner().IsRunning()
def DueToRun(self):
"""Called periodically by the cron daemon, if True Run() will be called.
Returns:
True if it is time to run based on the specified frequency.
"""
if self.Get(self.Schema.DISABLED):
return False
cron_args = self.Get(self.Schema.CRON_ARGS)
last_run_time = self.Get(self.Schema.LAST_RUN_TIME)
now = rdfvalue.RDFDatetime.Now()
# Its time to run.
if (last_run_time is None or
now > cron_args.periodicity.Expiry(last_run_time)):
# Not due to start yet.
if now < cron_args.start_time:
return False
# Do we allow overruns?
if cron_args.allow_overruns:
return True
# No currently executing job - lets go.
if self.Get(self.Schema.CURRENT_FLOW_URN) is None:
return True
return False
def StopCurrentRun(self, reason="Cron lifetime exceeded."):
current_flow_urn = self.Get(self.Schema.CURRENT_FLOW_URN)
if current_flow_urn:
flow.GRRFlow.TerminateAFF4Flow(
current_flow_urn, reason=reason, token=self.token)
self.Set(
self.Schema.LAST_RUN_STATUS,
rdf_cronjobs.CronJobRunStatus(
status=rdf_cronjobs.CronJobRunStatus.Status.TIMEOUT))
self.DeleteAttribute(self.Schema.CURRENT_FLOW_URN)
self.Flush()
def KillOldFlows(self):
"""Disable cron flow if it has exceeded CRON_ARGS.lifetime.
Returns:
bool: True if the flow is was killed.
"""
if not self.IsRunning():
return False
start_time = self.Get(self.Schema.LAST_RUN_TIME)
lifetime = self.Get(self.Schema.CRON_ARGS).lifetime
elapsed = rdfvalue.RDFDatetime.Now() - start_time
if lifetime and elapsed > lifetime:
self.StopCurrentRun()
stats_collector_instance.Get().IncrementCounter(
"cron_job_timeout", fields=[self.urn.Basename()])
stats_collector_instance.Get().RecordEvent(
"cron_job_latency", elapsed.seconds, fields=[self.urn.Basename()])
return True
return False
def Run(self, force=False):
"""Do the actual work of the Cron.
Will first check if DueToRun is True.
CronJob object must be locked (i.e. opened via OpenWithLock) for Run() to be
called.
Args:
force: If True, the job will run no matter what (i.e. even if DueToRun()
returns False).
Raises:
LockError: if the object is not locked.
"""
if not self.locked:
raise aff4.LockError("CronJob must be locked for Run() to be called.")
self.KillOldFlows()
# If currently running flow has finished, update our state.
current_flow_urn = self.Get(self.Schema.CURRENT_FLOW_URN)
if current_flow_urn:
current_flow = aff4.FACTORY.Open(current_flow_urn, token=self.token)
runner = current_flow.GetRunner()
if not runner.IsRunning():
if runner.context.state == rdf_flow_runner.FlowContext.State.ERROR:
self.Set(
self.Schema.LAST_RUN_STATUS,
rdf_cronjobs.CronJobRunStatus(
status=rdf_cronjobs.CronJobRunStatus.Status.ERROR))
stats_collector_instance.Get().IncrementCounter(
"cron_job_failure", fields=[self.urn.Basename()])
else:
self.Set(
self.Schema.LAST_RUN_STATUS,
rdf_cronjobs.CronJobRunStatus(
status=rdf_cronjobs.CronJobRunStatus.Status.OK))
start_time = self.Get(self.Schema.LAST_RUN_TIME)
elapsed = time.time() - start_time.AsSecondsSinceEpoch()
stats_collector_instance.Get().RecordEvent(
"cron_job_latency", elapsed, fields=[self.urn.Basename()])
self.DeleteAttribute(self.Schema.CURRENT_FLOW_URN)
self.Flush()
if not force and not self.DueToRun():
return
# Make sure the flow is created with cron job as a parent folder.
cron_args = self.Get(self.Schema.CRON_ARGS)
cron_args.flow_runner_args.base_session_id = self.urn
flow_urn = flow.StartAFF4Flow(
runner_args=cron_args.flow_runner_args,
args=cron_args.flow_args,
token=self.token,
sync=False)
self.Set(self.Schema.CURRENT_FLOW_URN, flow_urn)
self.Set(self.Schema.LAST_RUN_TIME, rdfvalue.RDFDatetime.Now())
self.Flush()
class CronHook(registry.InitHook):
"""Init hook for cron job metrics."""
pre = [aff4.AFF4InitHook, master.MasterInit]
def RunOnce(self):
"""Main CronHook method."""
# Start the cron thread if configured to.
if config.CONFIG["Cron.active"]:
self.cron_worker = CronWorker()
self.cron_worker.RunAsync()
class LegacyCronJobAdapterMixin(object):
"""Mixin used by DualDBSystemCronJob decorator to generate legacy classes."""
def Start(self):
self.Run()
def DualDBSystemCronJob(legacy_name=None, stateful=False):
"""Decorator that creates AFF4 and RELDB cronjobs from a given mixin."""
def Decorator(cls):
"""Decorator producing 2 classes: legacy style one and a new style one."""
if not legacy_name:
raise ValueError("legacy_name has to be provided")
# Legacy cron jobs have different base classes depending on whether they're
# stateful or not.
if stateful:
aff4_base_cls = StatefulSystemCronFlow
else:
aff4_base_cls = SystemCronFlow
# Make sure that we're dealing with a true mixin to avoid subtle errors.
if issubclass(cls, cronjobs.SystemCronJobBase):
raise ValueError("Mixin class shouldn't inherit from SystemCronJobBase")
if issubclass(cls, aff4_base_cls):
raise ValueError(
"Mixin class shouldn't inherit from %s" % aff4_base_cls.__name__)
# Generate legacy class. Register it within the module as it's not going
# to be returned from the decorator.
aff4_cls = compatibility.MakeType(
legacy_name, (cls, LegacyCronJobAdapterMixin, aff4_base_cls), {})
module = sys.modules[cls.__module__]
setattr(module, legacy_name, aff4_cls)
# Generate new class. No need to register it in the module (like the legacy
# one) since it will replace the original decorated class.
reldb_cls = compatibility.MakeType(
compatibility.GetName(cls), (cls, cronjobs.SystemCronJobBase), {})
return reldb_cls
return Decorator
|
test_mcrouter_basic.py
|
# Copyright (c) 2015, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from threading import Thread
import time
from mcrouter.test.MCProcess import McrouterClient, Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestMcrouterBasic(McrouterTestCase):
config = './mcrouter/test/mcrouter_test_basic_1_1_1.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc = self.add_server(Memcached())
def get_mcrouter(self, additional_args=[]):
return self.add_mcrouter(
self.config, extra_args=self.extra_args + additional_args)
def test_basic_lease(self):
mcr = self.get_mcrouter()
result = mcr.leaseGet("testkey")
real_token = result["token"]
self.assertNotEqual(real_token, None)
result["value"] = "newvalue"
result["token"] = 42000
self.assertFalse(mcr.leaseSet("testkey", result))
result["token"] = real_token
self.assertTrue(mcr.leaseSet("testkey", result))
result2 = mcr.leaseGet("testkey")
self.assertEqual(result2["token"], None)
self.assertEqual(result2["value"], "newvalue")
# Test stale stored: lease-get followed by a delete
result = mcr.leaseGet("newtestkey")
self.assertFalse(mcr.delete("newtestkey"))
self.assertTrue(mcr.leaseSet("newtestkey", result, is_stalestored=True))
def test_invalid_key(self):
"""
Tests behavior when mcrouter routes keys which have prefixes that are
not in the config.
"""
mcr = self.get_mcrouter()
invalid_key = '/blah/bloh/key'
self.assertFalse(mcr.set(invalid_key, 'value'))
self.assertEqual(mcr.get(invalid_key), "SERVER_ERROR local error")
def test_stats_deadlock(self):
mcr = self.get_mcrouter(['--proxy-threads=8'])
def run_client(fail, port):
mc = McrouterClient(port)
mc.connect()
for i in range(1000):
s = mc.stats()
if not s:
fail[0] = True
return
f = [False]
ts = [Thread(target=run_client, args=(f, mcr.port)) for i in range(8)]
[t.start() for t in ts]
[t.join() for t in ts]
self.assertFalse(f[0])
def test_basic_cas(self):
mcr = self.get_mcrouter()
self.assertIsNone(mcr.cas('key', 'value', 1))
self.assertIsNone(mcr.gets('key'))
self.assertTrue(mcr.add('key', 'value'))
ret = mcr.gets('key')
self.assertIsNotNone(ret)
old_cas = ret['cas']
self.assertEqual(ret['value'], 'value')
self.assertTrue(mcr.cas('key', 'value2', ret["cas"]))
ret = mcr.gets('key')
self.assertEqual(ret['value'], 'value2')
self.assertNotEqual(old_cas, ret['cas'])
self.assertTrue(mcr.set('key', 'value2'))
self.assertFalse(mcr.cas('key', 'value3', ret['cas']))
self.assertEqual(mcr.gets('key')['value'], 'value2')
def test_shutdown(self):
mcr = self.get_mcrouter()
mcr.shutdown()
time.sleep(2)
self.assertFalse(mcr.is_alive())
def test_set_exptime(self):
mcr = self.get_mcrouter()
# positive
self.assertTrue(mcr.set('key', 'value', exptime=10))
self.assertEqual(mcr.get('key'), 'value')
# negative
self.assertTrue(mcr.set('key', 'value', exptime=-10))
self.assertIsNone(mcr.get('key'))
# future: year 2033
self.assertTrue(mcr.set('key', 'value', exptime=2000000000))
self.assertEqual(mcr.get('key'), 'value')
# past
self.assertTrue(mcr.set('key', 'value', exptime=1432250000))
self.assertIsNone(mcr.get('key'))
class TestMcrouterInvalidRoute(McrouterTestCase):
config = './mcrouter/test/mcrouter_test_basic_1_1_1.json'
extra_args = ['--send-invalid-route-to-default']
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc = self.add_server(Memcached())
def get_mcrouter(self, additional_args=[]):
return self.add_mcrouter(
self.config, extra_args=self.extra_args + additional_args)
def test_basic_invalid_route(self):
mcr = self.get_mcrouter()
self.assertTrue(mcr.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
self.assertTrue(mcr.set("/././key", "value2"))
self.assertEqual(mcr.get("/././key"), "value2")
self.assertEqual(mcr.get("/f/f/key"), "value2")
self.assertEqual(mcr.get("/test/test/key"), "value2")
self.assertEqual(mcr.get("key"), "value2")
self.assertTrue(mcr.set("/a/a/key", "value3"))
self.assertEqual(mcr.get("/a/a/key"), "value3")
self.assertEqual(mcr.get("key"), "value3")
self.assertTrue(mcr.set("/*/a/key", "value4"))
self.assertEqual(mcr.get("/a/a/key"), "value4")
self.assertEqual(mcr.get("key"), "value4")
self.assertTrue(mcr.set("/*/*/key", "value4"))
self.assertEqual(mcr.get("/a/a/key"), "value4")
self.assertEqual(mcr.get("key"), "value4")
class TestMcrouterBasic2(McrouterTestCase):
config = './mcrouter/test/mcrouter_test_basic_2_1_1.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
def get_mcrouter(self, additional_args=[]):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args + additional_args)
def test_prefix_routing(self):
mcr = self.get_mcrouter()
# first test default routing prefix
self.mc1.set("cluster1_key", "cluster1")
self.assertEqual(mcr.get("cluster1_key"), "cluster1")
# next set to a remote cluster
mcr.set("/b/b/cluster2_key_router", "cluster2_router")
self.assertEqual(self.mc2.get("cluster2_key_router"), "cluster2_router")
# try fetching a value from a remote cluster
self.mc2.set("cluster2_key", "cluster2")
self.assertEqual(self.mc2.get("cluster2_key"), "cluster2")
self.assertEqual(mcr.get("/b/b/cluster2_key"), "cluster2")
def test_delete(self):
mcr = self.get_mcrouter()
mcr.set('foobarbizbang', 'some_value')
self.assertTrue(mcr.delete('foobarbizbang'))
self.assertFalse(mcr.delete('foobarbizbang2'))
self.assertTrue(mcr.set('hello', 'world'))
self.assertEqual(mcr.get('hello'), 'world')
def test_malformed_umbrella_length(self):
mcr = self.get_mcrouter()
# Send an umbrella request with a malformed length, and check that we
# get something back from the server (i.e. that it doesn't crash)
mcr.socket.settimeout(10)
mcr.socket.send('}}\x00\x01\x00\x00\x00\x00')
data = mcr.socket.recv(1024)
self.assertTrue(data)
# else hang
def test_use_big_value(self):
mcr = self.get_mcrouter(['--big-value-split-threshold=100'])
reply = mcr.get('__mcrouter__.route_handles(get,test)')
self.assertEqual(reply.count('big-value'), 1)
def test_no_big_value(self):
mcr = self.get_mcrouter()
reply = mcr.get('__mcrouter__.route_handles(get,test)')
self.assertNotIn('big-value', reply)
def test_enable_logging_route(self):
mcr = self.get_mcrouter(['--enable-logging-route'])
reply = mcr.get('__mcrouter__.route_handles(get,test)')
self.assertEqual(reply.count('logging'), 1)
def test_no_logging_route(self):
mcr = self.get_mcrouter()
reply = mcr.get('__mcrouter__.route_handles(get,test)')
self.assertNotIn('logging', reply)
class TestBasicAllSync(McrouterTestCase):
config = './mcrouter/test/test_basic_all_sync.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
self.mc3 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_basic_all_sync(self):
"""
Tests that the responses are being aggregated and the most awful
(based on the awfulness map) is begin returned
"""
mcr = self.get_mcrouter()
# set key in three cluster
self.mc1.set("key", "value")
self.mc2.set("key", "value")
self.mc3.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
# delete will return True on DELETED
# will return False on NOT_FOUND
# perform a delete and check the response
# the aggregated response should be DELETED
self.assertTrue(mcr.delete("key"))
# set key in only one cluster
self.mc1.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
# the aggregated response should be NOT_FOUND
self.assertFalse(mcr.delete("key"))
class TestBasicAllFirst(McrouterTestCase):
config = './mcrouter/test/test_basic_all_first.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
self.mc3 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_basic_all_first(self):
"""
Tests that the first non-tko response is returned
"""
mcr = self.get_mcrouter()
self.mc1.terminate()
self.assertTrue(mcr.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
class TestBasicAllMajority(McrouterTestCase):
config = './mcrouter/test/test_basic_all_majority.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
self.mc3 = self.add_server(Memcached())
self.mc4 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_basic_all_majority(self):
"""
Tests that the majority response (ties broken by awfulness) is being
returned
"""
mcr = self.get_mcrouter()
# set key in four cluster
self.mc1.set("key", "value")
self.mc2.set("key", "value")
self.mc3.set("key", "value")
self.mc4.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
self.assertEqual(self.mc4.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
# perform a delete and check the response
# the majority response should be DELETED
self.assertTrue(mcr.delete("key"))
# make sure all deletes complete (otherwise they can race
# with the sets below)
time.sleep(1)
# set key in three clusters
self.assertTrue(self.mc1.set("key", "value"))
self.assertTrue(self.mc2.set("key", "value"))
self.assertTrue(self.mc3.set("key", "value"))
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
# the majority response should be DELETED
self.assertTrue(mcr.delete("key"))
# make sure all deletes complete (otherwise they can race
# with the sets below)
time.sleep(1)
# set key in only one clusters
self.mc1.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
# the majority response should be NOT_FOUND
self.assertFalse(mcr.delete("key"))
# make sure all deletes complete (otherwise they can race
# with the sets below)
time.sleep(1)
# set key in two out of four clusters
self.mc1.set("key", "value")
self.mc2.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
# the majority response should be NOT_FOUND
# since it is sorted by awfulness map
self.assertFalse(mcr.delete("key"))
class TestBasicFailover(McrouterTestCase):
config = './mcrouter/test/test_basic_failover.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_failover(self):
"""
Tests that the failover path works.
"""
# default path is mctestc01
mcr = self.get_mcrouter()
# Go through the default route and verify a get.
self.assertTrue(self.mc1.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
self.mc1.terminate()
# Go through the failover now.
# We assert twice since in the first call mcrouter will discover
# a tko host and it short circuits the second time.
self.assertEqual(mcr.get("key"), None)
self.assertEqual(mcr.get("key"), None)
# Set in the failover and check.
self.assertTrue(self.mc2.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
def test_failover_negative_exptime(self):
mcr = self.get_mcrouter()
# Go through the default route and verify a get.
self.assertTrue(mcr.set("key", "value", exptime=0))
self.assertEqual(mcr.get("key"), "value")
# Exptime using negative value: past
self.assertTrue(mcr.set("key", "value", exptime=-10))
self.assertIsNone(mcr.get("key"))
self.mc1.terminate()
# Go through the failover now.
# We assert twice since in the first call mcrouter will discover
# a tko host and it short circuits the second time.
self.assertEqual(mcr.get("key"), None)
self.assertEqual(mcr.get("key"), None)
# Check get failover still works
self.assertTrue(self.mc2.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
# Exptime using negative value: past
self.assertTrue(mcr.set("key", "value", exptime=-10))
self.assertIsNone(mcr.get("key"))
class TestBasicFailoverOverride(McrouterTestCase):
config = './mcrouter/test/test_basic_failover_override.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_failover_override(self):
"""
Tests that the failover overrides work.
"""
mcr = self.get_mcrouter()
# See that failovers are disabled for cluster1
self.mc1.terminate()
self.assertEqual(mcr.set("key1", "value1"), None)
self.assertEqual(mcr.get("key1"), None)
self.assertEqual(mcr.get("key1"), None)
# Check get failover still works
self.assertTrue(self.mc2.set("key2", "value2"))
self.assertEqual(mcr.get("key2"), "value2")
self.assertEqual(mcr.get("key2"), "value2")
class TestMcrouterBasicL1L2(McrouterTestCase):
config = './mcrouter/test/test_basic_l1_l2.json'
config_ncache = './mcrouter/test/test_basic_l1_l2_ncache.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.l1 = self.add_server(Memcached())
self.l2 = self.add_server(Memcached())
def get_mcrouter(self, config):
return self.add_mcrouter(config, extra_args=self.extra_args)
def test_l1_l2_get(self):
"""
Tests that gets using l1/l2 caching and result upgrading is working
"""
mcr = self.get_mcrouter(self.config)
# get a non-existent key
self.assertFalse(mcr.get("key1"))
# set keys in only l1 pool
self.l1.set("key1", "value1")
self.assertEqual(self.l1.get("key1"), "value1")
# perform a get and check the response
self.assertTrue(mcr.get("key1"), "value1")
# set key only in l2 pool
self.l2.set("key2", "value2")
self.assertEqual(self.l2.get("key2"), "value2")
self.assertEqual(self.l1.get("key2"), None)
# perform a get and check the response
self.assertEqual(mcr.get("key2"), "value2")
# perform the same get until it gets upgraded to l1
# if the test gets stuck in an infinite loop here upgrading results is
# not working
while self.l1.get("key2") != "value2":
self.assertEqual(mcr.get("key2"), "value2")
def test_l1_l2_get_l1_down(self):
"""
Tests that gets using l1/l2 caching is working when l1 is down
"""
mcr = self.get_mcrouter(self.config)
# set key in l1 and l2 pools
self.l1.set("key1", "value1")
self.l2.set("key1", "value1")
self.assertEqual(self.l1.get("key1"), "value1")
self.assertEqual(self.l2.get("key1"), "value1")
# terminate the l1 pool
self.l1.terminate()
# we should still be able to get from l2
self.assertEqual(mcr.get("key1"), "value1")
def test_l1_l2_get_l2_down(self):
"""
Tests that gets using l1/l2 caching is working when l2 is down
"""
mcr = self.get_mcrouter(self.config)
# set key in l1 and l2 pools
self.l1.set("key1", "value1")
self.l2.set("key1", "value1")
self.assertEqual(self.l1.get("key1"), "value1")
self.assertEqual(self.l2.get("key1"), "value1")
# terminate the l2 regional pool
self.l2.terminate()
# we should still be able to get from l1
self.assertTrue(mcr.get("key1"), "value1")
# terminate l1 pool as well
self.l1.terminate()
# we should get nothing back
self.assertFalse(mcr.get("key1"))
def test_l1_l2_get_ncache(self):
mcr = self.get_mcrouter(self.config_ncache)
# get a non-existent key
self.assertFalse(mcr.get("key1"))
time.sleep(1)
self.assertEqual(self.l1.get("key1"), "ncache")
self.assertTrue(self.l2.set("key1", "value1"))
self.assertFalse(mcr.get("key1"))
self.assertFalse(mcr.get("key1"))
self.assertFalse(mcr.get("key1"))
self.assertFalse(mcr.get("key1"))
self.assertFalse(mcr.get("key1"))
time.sleep(1)
self.assertEqual(mcr.get("key1"), "value1")
self.assertEqual(self.l1.get("key1"), "value1")
|
tx_recv_interactive.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import sys
import threading
from proton.reactor import ApplicationEvent, Container, EventInjector
from proton.handlers import MessagingHandler, TransactionHandler
class TxRecv(MessagingHandler, TransactionHandler):
def __init__(self):
super(TxRecv, self).__init__(prefetch=0, auto_accept=False)
def on_start(self, event):
self.container = event.container
self.conn = self.container.connect("localhost:5672")
self.receiver = self.container.create_receiver(self.conn, "examples")
self.container.declare_transaction(self.conn, handler=self, settle_before_discharge=True)
self.transaction = None
def on_message(self, event):
print(event.message.body)
self.transaction.accept(event.delivery)
def on_transaction_declared(self, event):
self.transaction = event.transaction
print("transaction declared")
def on_transaction_committed(self, event):
print("transaction committed")
self.container.declare_transaction(self.conn, handler=self)
def on_transaction_aborted(self, event):
print("transaction aborted")
self.container.declare_transaction(self.conn, handler=self)
def on_commit(self, event):
self.transaction.commit()
def on_abort(self, event):
self.transaction.abort()
def on_fetch(self, event):
self.receiver.flow(1)
def on_quit(self, event):
c = self.receiver.connection
self.receiver.close()
c.close()
try:
reactor = Container(TxRecv())
events = EventInjector()
reactor.selectable(events)
thread = threading.Thread(target=reactor.run)
thread.daemon=True
thread.start()
print("Enter 'fetch', 'commit' or 'abort'")
while True:
line = sys.stdin.readline()
if line:
events.trigger(ApplicationEvent(line.strip()))
else:
break
except KeyboardInterrupt: pass
|
p_executer.py
|
# -*- encoding: utf-8 -*-
'''
Current module: pyrunner.p_executer
Rough version history:
v1.0 Original version to use
v1.1 add 'launch_mobile' function
v2.1 reconstitute this module with unittest and support mutil runner
********************************************************************
@AUTHOR: Administrator-Bruce Luo(罗科峰)
MAIL: lkf20031988@163.com
RCS: rtsf.p_executer,v 2.1 2018年9月2日
FROM: 2015年5月11日
********************************************************************
======================================================================
UI and Web Http automation frame for python.
'''
import unittest,sys,os
import multiprocessing,threading
from functools import partial
from rtsf.p_applog import logger
from rtsf.p_tracer import Tracer
from rtsf.p_testcase import YamlCaseLoader,parse_project_data
from rtsf import p_testcase, p_compat,p_exception
class TestCase(unittest.TestCase):
""" create a testcase.
"""
def __init__(self, test_runner, testcase_dict, variables):
super(TestCase, self).__init__()
self.test_runner = test_runner
self.testcase_dict = testcase_dict.copy()
self.variables = variables
def runTest(self):
""" run testcase and check result.
"""
self.test_runner._run_test(self.testcase_dict, self.variables)
class TestSuite(unittest.TestSuite):
""" create test suite with a testset, it may include one or several testcases.
each suite should initialize a separate Runner() with testset config.
@param
(dict) testset
{
"name": "testset description",
"project": {
"name": "project name",
"module": "testset description"
"data":[
{'csv': 'username_password.csv', 'by': 'Sequential'},
{'csv': 'devices.csv', 'by': 'Sequential'}
]
},
"cases": [
{
"name": "testcase description",
"tester": "", # optional
"responsible": "", # optional
"pre_command": [], # optional
"steps": [],
"post_command": {}, # optional
"verify": [] # optional
},
testcase12
]
}
"""
def __init__(self, testset, runner_cls):
super(TestSuite, self).__init__()
file_path = testset.get("file_path")
project = testset.get("project")
testcases = testset.get("cases", [])
project_data = project.pop("data",[])
test_runner = self.test_runner = runner_cls()
if not isinstance(test_runner._default_devices, (list, tuple)):
raise TypeError("_default_devices not a list or tuple.")
test_runner.init_runner(parser = p_testcase.TestCaseParser(file_path = file_path),
tracers = {device:Tracer(device_id = device, dir_name = os.path.dirname(os.path.abspath(file_path))) for device in test_runner._default_devices},
projinfo = project
)
for data_variables_dict in parse_project_data(project_data, file_path) or [{}]:
for testcase_dict in testcases:
self._add_test_to_suite(testcase_dict["name"], test_runner, testcase_dict, data_variables_dict)
def _add_test_to_suite(self, testcase_name, test_runner, testcase_dict, variables):
if p_compat.is_py3:
TestCase.runTest.__doc__ = testcase_name
else:
TestCase.runTest.__func__.__doc__ = testcase_name
test = TestCase(test_runner, testcase_dict, variables)
[self.addTest(test) for _ in range(int(testcase_dict.get("times", 1)))]
@property
def tests(self):
return self._tests
class TaskSuite(unittest.TestSuite):
""" create task suite with specified testcase path.
each task suite may include one or several test suite.
"""
def __init__(self, testsets, runner_cls):
"""
@params
testsets (dict/list): testset or list of testset
testset_dict
or
[
testset_dict_1,
testset_dict_2,
{
"name": "desc1",
"config": {},
"api": {},
"testcases": [testcase11, testcase12]
}
]
mapping (dict):
passed in variables mapping, it will override variables in config block
"""
super(TaskSuite, self).__init__()
if not testsets:
raise p_exception.TestcaseNotFound
if isinstance(testsets, dict):
testsets = [testsets]
self.suite_list = []
for testset in testsets:
suite = TestSuite(testset, runner_cls)
self.addTest(suite)
self.suite_list.append(suite)
@property
def tasks(self):
return self.suite_list
def init_test_suite(path_or_testsets, runner_cls):
if not p_testcase.is_testsets(path_or_testsets):
YamlCaseLoader.load_dependencies(path_or_testsets)
testsets = YamlCaseLoader.load_files(path_or_testsets)
else:
testsets = path_or_testsets
return TaskSuite(testsets, runner_cls)
class TestRunner(object):
def __init__(self, **kwargs):
""" initialize test runner
@param (dict) kwargs: key-value arguments used to initialize TextTestRunner
"""
runner_cls = kwargs.pop("runner", Runner)
if not callable(runner_cls) and not isinstance(runner_cls(), Runner):
raise p_exception.InstanceTypeError("Invalid runner, must be instance of Runner.")
self._runner_cls = runner_cls
self.runner = unittest.TextTestRunner(**kwargs)
def run(self, path_or_testsets):
""" start to run test with varaibles mapping
@param path_or_testsets: YAML/JSON testset file path or testset list
path: path could be in several type
- absolute/relative file path
- absolute/relative folder path
- list/set container with file(s) and/or folder(s)
testsets: testset or list of testset
- (dict) testset_dict
- (list) list of testset_dict
[
testset_dict_1,
testset_dict_2
]
"""
try:
self._task_suite =init_test_suite(path_or_testsets, self._runner_cls)
except p_exception.TestcaseNotFound:
logger.log_error("Testcases not found in {}".format(path_or_testsets))
sys.exit(1)
self.text_test_result = self.runner.run(self._task_suite)
return self
def gen_html_report(self):
html_report = []
for suite in self._task_suite.tasks:
proj_name = suite.test_runner.proj_info["name"]
reporters = suite.test_runner.tracers.values()
for reporter in reporters:
html_report.extend(reporter.generate_html_report(proj_name, proj_module=None))
return html_report
class Runner(object):
def __init__(self):
'''
@note: maybe override variables
_default_devices -> list type; to genrate tracer map, format is `{device_id: tracer_obj}`
e.g.
default {"":tracer_obj} use to generate report for local host;
{"192.168.0.1:5555":tracer_obj1, "192.168.0.2:5555":tracer_obj2} use to generate report for remote host if run with mutilple process
_default_drivers -> list type; to define driver map, format is `(device_id: driver)`
e.g.
default ("", None) use to run case with a driver;
[("192.168.0.1:5555":selenium_driver), ("192.168.0.2:5555":appium_driver), ...] use for multiple process to run case with specified drivers
'''
self._default_devices = [""]
self._default_drivers = [("",None)]
self._local_driver = True
def init_runner(self, parser, tracers, projinfo):
''' initial some instances for preparing to run test case
@note: should not override
@param parser: instance of TestCaseParser
@param tracers: dict type for the instance of Tracer. Such as {"":tracer_obj} or {"192.168.0.1:5555":tracer_obj1, "192.168.0.2:5555":tracer_obj2}
@param proj_info: dict type of test case. use like: self.proj_info["module"], self.proj_info["name"]
yaml case like:
- project:
name: xxx
module: xxxx
dict case like:
{"project": {"name": xxx, "module": xxxx}}
'''
self.parser = parser
self.tracers = tracers
self.proj_info = projinfo
def run_test(self, testcase_dict, variables, driver_map):
''' define how to run a case. override this method
@param testcase_dice: yaml case
@param driver_map: device id map to a driver
'''
fn, _ = driver_map
reporter = self.tracers[fn]
parser = self.parser
parser.update_binded_variables(variables)
case_name = parser.eval_content_with_bind_actions(testcase_dict.get("name",u'rtsf'))
reporter.start(self.proj_info["module"], case_name, testcase_dict.get("responsible",u"rock feng"), testcase_dict.get("tester",u"rock feng"))
reporter.log_debug(u"===== run_test\n\t{}".format(testcase_dict))
reporter.section(u"------------section ok")
reporter.step(u"step ok")
reporter.normal(u"normal ok")
reporter.stop()
return reporter
def _run_test(self, testcase_dict, variables={}):
''' guide the running case
@param testcase_dice: yaml case
@param variables: dict type; this is defined the variables for the data-driven test
e.g.
default {} use to run case without data-driven
{"username":"test1","password":"123456"}
'''
if self._local_driver:
self.run_test(testcase_dict, variables, self._default_drivers[0])
else:
self._drivers = []
self._run_grid_multithread(partial(self.run_test, testcase_dict, variables), self._default_drivers)
def _run_grid_multiprocess(self, func, iterables):
''' running case with mutil process to support selenium grid-mode(multiple web) and appium grid-mode(multiple devices).
@param func: function object
@param iterables: iterable objects
'''
multiprocessing.freeze_support()
pool = multiprocessing.Pool()
pool_tracers = pool.map(func, iterables)
pool.close()
pool.join()
# 传递给 pool.map的 实例对象,内存地址发生变化, 因此,这里在运行结束后,重新定义 self.tracers
self.tracers = dict(zip(self._default_devices, pool_tracers))
def _run_grid_multithread(self, func, iterables):
''' running case with mutil process to support selenium grid-mode(multiple web) and appium grid-mode(multiple devices).
@param func: function object
@param iterables: iterable objects
'''
f = lambda x: threading.Thread(target = func,args = (x,))
threads = map(f, iterables)
for thread in threads:
thread.setDaemon(True)
thread.start()
thread.join()
|
__init__.py
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse
import os
import roslib.network
import rospy
import socket
import subprocess
import sys
import threading
from master_discovery_fkie.common import get_hostname
from node_manager_fkie.common import get_ros_home, masteruri_from_ros
from node_manager_fkie.file_watcher import FileWatcher
from node_manager_fkie.history import History
from node_manager_fkie.master_view_proxy import LaunchArgsSelectionRequest
from node_manager_fkie.name_resolution import NameResolution
from node_manager_fkie.progress_queue import InteractionNeededError
from node_manager_fkie.screen_handler import ScreenHandler, ScreenSelectionRequest, NoScreenOpenLogRequest
from node_manager_fkie.settings import Settings
from node_manager_fkie.ssh_handler import SSHhandler, AuthenticationRequest
from node_manager_fkie.start_handler import StartException, AdvRunCfg
from node_manager_fkie.start_handler import StartHandler, BinarySelectionRequest
PKG_NAME = 'node_manager_fkie'
__author__ = "Alexander Tiderko (Alexander.Tiderko@fkie.fraunhofer.de)"
__copyright__ = "Copyright (c) 2012 Alexander Tiderko, Fraunhofer FKIE/US"
__license__ = "BSD"
__version__ = "unknown" # git describe --tags --dirty --always
__date__ = "unknown" # git log -1 --date=iso
# PYTHONVER = (2, 7, 1)
# if sys.version_info < PYTHONVER:
# print 'For full scope of operation this application requires python version > %s, current: %s' % (str(PYTHONVER), sys.version_info)
HOSTS_CACHE = dict()
'''
the cache directory to store the results of tests for local hosts.
@see: L{is_local()}
'''
_LOCK = threading.RLock()
_MAIN_FORM = None
_SETTINGS = None
_SSH_HANDLER = None
_SCREEN_HANDLER = None
_START_HANDLER = None
_NAME_RESOLUTION = None
_HISTORY = None
_FILE_WATCHER = None
_FILE_WATCHER_PARAM = None
_QAPP = None
def settings():
'''
@return: The global settings
@rtype: L{Settings}
'''
return _SETTINGS
def ssh():
'''
@return: The SSH handler to handle the SSH connections
@rtype: L{SSHhandler}
'''
return _SSH_HANDLER
def screen():
'''
@return: The screen handler to the screens.
@rtype: L{ScreenHandler}
@see: U{http://linuxwiki.de/screen}
'''
return _SCREEN_HANDLER
def starter():
'''
@return: The start handler to handle the start of new ROS nodes on local or
remote machines.
@rtype: L{StartHandler}
'''
return _START_HANDLER
def nameres():
'''
@return: The name resolution object translate the the name to the host or
ROS master URI.
@rtype: L{NameResolution}
'''
return _NAME_RESOLUTION
def history():
'''
@return: The history of entered parameter.
@rtype: L{History}
'''
return _HISTORY
def filewatcher():
'''
@return: The file watcher object with all loaded configuration files.
@rtype: L{FileWatcher}
'''
return _FILE_WATCHER
def file_watcher_param():
'''
@return: The file watcher object with all configuration files referenced by
parameter value.
@rtype: L{FileWatcher}
'''
return _FILE_WATCHER_PARAM
def get_ros_hostname(url):
'''
Returns the host name used in a url, if it is a name. If it is an IP an
empty string will be returned.
@return: host or '' if url is an IP or invalid
@rtype: C{str}
'''
return NameResolution.get_ros_hostname(url)
def is_local(hostname, wait=False):
'''
Test whether the given host name is the name of the local host or not.
@param hostname: the name or IP of the host
@type hostname: C{str}
@return: C{True} if the hostname is local or None
@rtype: C{bool}
@raise Exception: on errors while resolving host
'''
if hostname is None:
return True
with _LOCK:
if hostname in HOSTS_CACHE:
if isinstance(HOSTS_CACHE[hostname], threading.Thread):
return False
return HOSTS_CACHE[hostname]
try:
socket.inet_aton(hostname)
local_addresses = ['localhost'] + roslib.network.get_local_addresses()
# check 127/8 and local addresses
result = hostname.startswith('127.') or hostname in local_addresses
with _LOCK:
HOSTS_CACHE[hostname] = result
return result
except socket.error:
# the hostname must be resolved => do it in a thread
if wait:
result = __is_local(hostname)
return result
else:
thread = threading.Thread(target=__is_local, args=((hostname,)))
thread.daemon = True
with _LOCK:
HOSTS_CACHE[hostname] = thread
thread.start()
return False
def __is_local(hostname):
'''
Test the hostname whether it is local or not. Uses socket.gethostbyname().
'''
try:
machine_addr = socket.gethostbyname(hostname)
except socket.gaierror:
with _LOCK:
HOSTS_CACHE[hostname] = False
return False
local_addresses = ['localhost'] + roslib.network.get_local_addresses()
# check 127/8 and local addresses
result = machine_addr.startswith('127.') or machine_addr in local_addresses
with _LOCK:
HOSTS_CACHE[hostname] = result
return result
def detect_version():
'''
Try to detect the current version from git, installed VERSION/DATE files or package.xml
'''
try:
global __version__
global __date__
pkg_path = roslib.packages.get_pkg_dir(PKG_NAME)
if pkg_path is not None and os.path.isfile("%s/VERSION" % pkg_path):
try:
with open("%s/VERSION" % pkg_path) as f:
version = f.read()
__version__ = version.strip()
with open("%s/DATE" % pkg_path) as f:
datetag = f.read().split()
if datetag:
__date__ = datetag[0]
except Exception as err:
print >> sys.stderr, "version detection error: %s" % err
elif os.path.isdir("%s/../.git" % settings().PACKAGE_DIR):
try:
os.chdir(settings().PACKAGE_DIR)
ps = subprocess.Popen(args=['git', 'describe', '--tags', '--dirty', '--always'], stdin=None, stdout=subprocess.PIPE, stderr=None)
output = ps.stdout.read()
ps.wait()
__version__ = output.strip()
ps = subprocess.Popen(args=['git', 'show', '-s', '--format=%ci'], stdin=None, stdout=subprocess.PIPE, stderr=None)
output = ps.stdout.read().split()
if output:
__date__ = output[0]
ps.wait()
except Exception as err:
print >> sys.stderr, "version detection error: %s" % err
else:
import xml.dom
import xml.dom.minidom as dom
ppath = roslib.packages.find_resource(PKG_NAME, 'package.xml')
if ppath:
doc = dom.parse(ppath[0])
version_tags = doc.getElementsByTagName("version")
if version_tags:
version = version_tags[0].firstChild.data
__version__ = version
else:
print >> sys.stderr, "version detection: no version tag in package.xml found!"
else:
print >> sys.stderr, "version detection: package.xml not found!"
except Exception as err:
print >> sys.stderr, "version detection error: %s" % err
def finish(*arg):
'''
Callback called on exit of the ros node.
'''
# close all ssh sessions
if _SSH_HANDLER is not None:
_SSH_HANDLER.close()
# save the launch history
if _HISTORY is not None:
try:
_HISTORY.storeAll()
except Exception as err:
print >> sys.stderr, "Error while store history: %s" % err
from node_manager_fkie.main_window import MainWindow
# stop all threads in the main window
if isinstance(_MAIN_FORM, MainWindow):
_MAIN_FORM.finish()
if _QAPP is not None:
_QAPP.exit()
def set_terminal_name(name):
'''
Change the terminal name.
@param name: New name of the terminal
@type name: C{str}
'''
sys.stdout.write("\x1b]2;%s\x07" % name)
def set_process_name(name):
'''
Change the process name.
@param name: New process name
@type name: C{str}
'''
try:
from ctypes import cdll, byref, create_string_buffer
libc = cdll.LoadLibrary('libc.so.6')
buff = create_string_buffer(len(name) + 1)
buff.value = name
libc.prctl(15, byref(buff), 0, 0, 0)
except:
pass
def init_settings():
global _SETTINGS
_SETTINGS = Settings()
def init_globals(masteruri):
'''
:return: True if the masteruri referred to localhost
:rtype: bool
'''
# initialize the global handler
global _SSH_HANDLER
global _SCREEN_HANDLER
global _START_HANDLER
global _NAME_RESOLUTION
global _HISTORY
global _FILE_WATCHER
global _FILE_WATCHER_PARAM
_SSH_HANDLER = SSHhandler()
_SCREEN_HANDLER = ScreenHandler()
_START_HANDLER = StartHandler()
_NAME_RESOLUTION = NameResolution()
_HISTORY = History()
_FILE_WATCHER = FileWatcher()
_FILE_WATCHER_PARAM = FileWatcher()
# test where the roscore is running (local or remote)
__is_local('localhost') # fill cache
return __is_local(get_hostname(masteruri)) # fill cache
def init_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--version", action="version", version="%s %s" % ("%(prog)s", __version__))
parser.add_argument("-f", "--file", nargs=1, help="loads the given file as default on start")
parser.add_argument("-m", "--muri", nargs=1, default='', help="starts ROS master with given URI, usefull on hosts "
"with multiple interfaces. ROS_HOSTNAME will be set "
"to the host of this URI, but only if it is not an IP.")
group = parser.add_argument_group('echo')
group.add_argument("--echo", nargs=2, help="starts an echo dialog instead of node manager", metavar=('name', 'type'))
group.add_argument("--hz", action="store_true", help="shows only the Hz value instead of topic content in echo dialog")
group.add_argument("--ssh", action="store_true", help="connects via SSH")
return parser
def init_echo_dialog(prog_name, masteruri, topic_name, topic_type, hz=False, use_ssh=False):
'''
Intialize the environment to start an echo window.
'''
# start ROS-Master, if not currently running
# StartHandler._prepareROSMaster(masteruri)
name = '%s_echo' % prog_name
rospy.init_node(name, anonymous=True, log_level=rospy.INFO)
set_terminal_name(name)
set_process_name(name)
from node_manager_fkie.echo_dialog import EchoDialog
global _SSH_HANDLER
_SSH_HANDLER = SSHhandler()
return EchoDialog(topic_name, topic_type, hz, masteruri, use_ssh=use_ssh)
def init_main_window(prog_name, masteruri, launch_files=[]):
'''
Intialize the environment to start Node Manager.
'''
# start ROS-Master, if not currently running
StartHandler._prepareROSMaster(masteruri)
# setup the loglevel
try:
log_level = getattr(rospy, rospy.get_param('/%s/log_level' % prog_name, "INFO"))
except Exception as err:
print("Error while set the log level: %s\n->INFO level will be used!" % err)
log_level = rospy.INFO
rospy.init_node(prog_name, anonymous=False, log_level=log_level)
set_terminal_name(prog_name)
set_process_name(prog_name)
from node_manager_fkie.main_window import MainWindow
local_master = init_globals(masteruri)
return MainWindow(launch_files, not local_master, launch_files)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% MAIN %%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def main(name):
'''
Start the NodeManager or EchoDialog.
:param name: the name propagated to the rospy.init_node()
:type name: str
'''
try:
from python_qt_binding.QtGui import QApplication
except:
try:
from python_qt_binding.QtWidgets import QApplication
except:
print >> sys.stderr, "please install 'python_qt_binding' package!!"
sys.exit(-1)
init_settings()
detect_version()
parser = init_arg_parser()
args = rospy.myargv(argv=sys.argv)
parsed_args = parser.parse_args(args[1:])
if parsed_args.muri:
masteruri = parsed_args.muri[0]
hostname = NameResolution.get_ros_hostname(masteruri)
os.environ['ROS_MASTER_URI'] = masteruri
if hostname:
os.environ['ROS_HOSTNAME'] = hostname
masteruri = settings().masteruri()
# Initialize Qt
global _QAPP
_QAPP = QApplication(sys.argv)
# decide to show main or echo dialog
global _MAIN_FORM
try:
if parsed_args.echo:
_MAIN_FORM = init_echo_dialog(name, masteruri, parsed_args.echo[0],
parsed_args.echo[1], parsed_args.hz,
parsed_args.ssh)
else:
_MAIN_FORM = init_main_window(name, masteruri, parsed_args.file)
except Exception as err:
sys.exit("%s" % err)
exit_code = 0
# resize and show the qt window
if not rospy.is_shutdown():
# change path for access to the images of descriptions
os.chdir(settings().PACKAGE_DIR)
# _MAIN_FORM.resize(1024, 720)
screen_size = QApplication.desktop().availableGeometry()
if (_MAIN_FORM.size().width() >= screen_size.width() or
_MAIN_FORM.size().height() >= screen_size.height() - 24):
_MAIN_FORM.showMaximized()
else:
_MAIN_FORM.show()
exit_code = -1
rospy.on_shutdown(finish)
exit_code = _QAPP.exec_()
return exit_code
|
manager.py
|
#!/usr/bin/env python3
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from typing import Dict, List
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR, PARAMS
from common.android import ANDROID
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1140
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
if __name__ == "__main__" and ANDROID:
from common.spinner import Spinner
from common.text_window import TextWindow
else:
from common.spinner import FakeSpinner as Spinner
from common.text_window import FakeTextWindow as TextWindow
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner()
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("Openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common import android
from common.apk import update_apks, pm_apply_packages, start_offroad
from common.manager_helpers import print_cpu_usage
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader", # delete able
"deleter": "selfdrive.loggerd.deleter", # delete able
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.controls.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]), # delete able
"logmessaged": "selfdrive.logmessaged", # delete able
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]), # delete able
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": ("selfdrive/locationd", ["./paramsd"]),
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated", # delete able
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"driverview": "selfdrive.controls.lib.driverview", # delete able
"appd": "selfdrive.kyd.appd.appd",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord', 'paramsd']
# processes to end if thermal conditions exceed Green parameters
green_temp_processes = ['uploader']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
]
if ANDROID:
persistent_processes += [
'logcatd',
'tombstoned',
'updated',
'deleter',
'appd',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'dmonitoringd',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'ubloxd',
'locationd',
]
if WEBCAM:
car_started_processes += [
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'sensord',
'clocksd',
'gpsd',
'dmonitoringmodeld',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc],
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
if os.getenv("GET_CPU_USAGE"):
proc_sock = messaging.sub_sock('procLog', conflate=True)
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
params = Params()
EnableDriverMonitoring = int(params.get('OpkrEnableDriverMonitoring'))
EnableLogger = int(params.get('OpkrEnableLogger'))
if not EnableDriverMonitoring:
car_started_processes.remove( 'dmonitoringd' )
car_started_processes.remove( 'dmonitoringmodeld' )
if not EnableLogger:
car_started_processes.remove( 'loggerd' )
persistent_processes.remove( 'logmessaged' )
persistent_processes.remove( 'uploader' )
persistent_processes.remove( 'logcatd' )
persistent_processes.remove( 'updated' )
persistent_processes.remove( 'deleter' )
else:
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
logger_dead = False
start_t = time.time()
first_proc = None
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# heavyweight batch processes are gated on favorable thermal conditions
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
for p in green_temp_processes:
if p in persistent_processes:
kill_managed_process(p)
else:
for p in green_temp_processes:
if p in persistent_processes:
start_managed_process(p)
if msg.thermal.freeSpace < 0.1:
logger_dead = True
if msg.thermal.started and "driverview" not in running:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in reversed(car_started_processes):
kill_managed_process(p)
# this is ugly
if "driverview" not in running and params.get("IsDriverViewEnabled") == b"1":
start_managed_process("driverview")
elif "driverview" in running and params.get("IsDriverViewEnabled") == b"0":
kill_managed_process("driverview")
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
if os.getenv("GET_CPU_USAGE"):
dt = time.time() - start_t
# Get first sample
if dt > 30 and first_proc is None:
first_proc = messaging.recv_sock(proc_sock)
# Get last sample and exit
if dt > 90:
last_proc = messaging.recv_sock(proc_sock, wait=True)
cleanup_all_processes(None, None)
sys.exit(print_cpu_usage(first_proc, last_proc))
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
android.reboot(reason="recovery")
def main():
os.environ['PARAMS_PATH'] = PARAMS
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("IsGeofenceEnabled", "-1"),
("SpeedLimitOffset", "0"),
("LongitudinalControl", "0"),
("LimitSetSpeed", "0"),
("LimitSetSpeedNeural", "0"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
("IsOpenpilotViewEnabled", "0"),
("OpkrAutoShutdown", "0"),
("OpkrAutoScreenOff", "0"),
("OpkrUIBrightness", "0"),
("OpkrEnableDriverMonitoring", "1"),
("OpkrEnableLogger", "0"),
("OpkrEnableGetoffAlert", "1"),
("OpkrEnableLearner", "1"),
("OpkrAutoResume", "1"),
("OpkrTraceSet", "1"),
("OpkrWhoisDriver", "0"),
("OpkrTuneStartAt", "0"),
("OpkrAccelProfile", "0"), #악셀프로파일 0:미사용, 1:브드럽게,2:보통,3:빠르게
("OpkrAutoLanechangedelay", "0"),
("OpkrRunMixplorer", "0"),
("OpkrRunQuickedit", "0"),
("OpkrRunSoftkey", "0"),
("OpkrRunNavigation", "0"),
("OpkrBootNavigation", "0"),
("FingerprintIssuedFix", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except SystemExit:
raise
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(3)
error = "Manager failed to start\n \n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
Device.py
|
# -*- coding: utf-8 -*-
"""
This file contains functions that are capable to access internal device
information and features.
-SSHFS information
-Username
-Shared directory
-File access
-Capability
-Linux and iOS
-Bluetooth
-Camera
-Battery
-Camera access
"""
from selenium import webdriver
from geopy.geocoders import Nominatim
from selenium.common.exceptions import NoSuchWindowException
import getpass
import os
import json
import requests
import sys
import facebook
import urllib
import threading, time
import threading
utf8_convert = {'ğ':'g', 'Ğ':'G', 'ç':'c', 'Ç':'C', 'ş':'s', 'Ş':'S', 'ı':'i', 'İ':'I', 'ö':'o', 'Ö':'O', 'Ü':'U', 'ü':'u'}
unidict = {k.decode('utf8'): v.decode('utf8') for k, v in utf8_convert.items()}
def convert(let):
if let in unidict.keys():
return unidict[let]
else:
return let
def string_normalizer(strr):
return ''.join([convert(i) for i in strr])
def get_location():
send_url = "http://ip-api.com/json"
response = requests.get(send_url)
data = json.loads(response.text)
geolocator = Nominatim()
location = geolocator.reverse(str(data["lat"]) + " , " + str(data["lon"]))
return json.dumps({'location': location.address})
def get_sshfs_info(rootdir='/../shared'):
"""
Returns ssh-username and shared directory in json
"""
user_name = getpass.getuser()
path = os.getcwd() + rootdir
direc = {'username': user_name, 'path': path}
return json.dumps(direc)
def get_directory_structure(dict=None, rootdir='../shared'):
"""
Return directory structure in format
{
"Dirname": directory_name,
"SubDirs": [list_of_subfolder],
"File":
[
{
"filename": file_name
"path: absolute_path
},
....
]
"""
if not os.path.isfile(rootdir):
try:
dict["DirName"] = rootdir.split('/')[-1]
n_dic = {"DirName": '', "SubDirs": [], "Files": []}
for f in os.listdir(rootdir):
if not os.path.isfile(os.path.join(rootdir, f)):
subfolder = os.path.join(rootdir, f)
n_dic = get_directory_structure(dict=n_dic, rootdir=subfolder)
if os.path.isdir(subfolder):
dict["SubDirs"].append(n_dic)
else:
# f_name = rootdir.split('/')[-1]
f_name = f
dict["Files"].append({"filename": f_name, "path": os.path.abspath(rootdir)})
except Exception as e:
print 'Error occurred: ', str(e)
return dict
def create_virtual_filesystem(dict=None, path='./deneme'):
"""
Creating a meta directory structure from dictionary
"""
path = os.path.join(os.getcwd(), path)
try:
dir_name = dict['DirName']
dir_path = os.path.join(path, dir_name)
os.mkdir(dir_path)
for f in dict['Files']:
f_name = f['filename']
f_path = os.path.join(dir_path, f_name)
fdesc = open(f_path, 'wb')
fdesc.close()
for d in dict['SubDirs']:
create_virtual_filesystem(dict=d, path=dir_path)
except Exception as e:
print 'Error occurred: ', str(e)
def get_file(path):
"""
Returns requested file descriptor
"""
try:
requested_file = (open(path, "rb")).read()
return requested_file
except Exception as e:
print e
def get_capability(platform='Darwin'):
"""
Returns platform specific capabilities in json
"""
def formatter(cap):
if cap != '':
return (cap[:-1]).split(': ')[-1]
else:
return 'NOT FOUND'
try:
if platform == 'Darwin':
command_list = {
'model_name': 'system_profiler SPHardwareDataType | grep "Model Identifier"',
'camera': 'system_profiler SPCameraDataType | grep "Model ID"',
'charge_remaining': 'system_profiler SPPowerDataType | grep "Charge Remaining"',
'charge_full': 'system_profiler SPPowerDataType | grep "Full Charge Capacity"',
'bluetooth_addr': 'system_profiler SPBluetoothDataType | grep "Address"',
'bluetooth_le': 'system_profiler SPBluetoothDataType | grep "Bluetooth Low Energy Supported"',
}
model_name = formatter(os.popen(command_list['model_name']).read())
camera = formatter(os.popen(command_list['camera']).read())
charge_remaining = formatter(os.popen(command_list['charge_remaining']).read())
charge_full = formatter(os.popen(command_list['charge_full']).read())
bluetooth_addr = formatter(os.popen(command_list['bluetooth_addr']).read())
bluetooth_le = formatter(os.popen(command_list['bluetooth_le']).read())
charge = (int((int(charge_remaining)/float(charge_full))*100))
elif platform == 'Linux':
model_name_command = 'cat /sys/devices/virtual/dmi/id/product_name'
model_name = os.popen(model_name_command).read()[:-1]
battery_dir = os.popen('upower -e | grep BAT0').read()[:-1]
charge_command = 'upower -i ' + battery_dir + ' | grep percentage'
charge = os.popen(charge_command).read()[:-2].split(': ')[-1]
bluetooth_addr_command = 'hciconfig | grep BD'
bluetooth_addr = os.popen(bluetooth_addr_command).read()[13:30]
# Works with 1 camera. Command returns list of all cameras.
camera_command = 'for I in /sys/class/video4linux/*; do cat $I/name; done'
camera = os.popen(camera_command).read()[:-1]
bluetooth_le = "NOT FOUND"
elif platform == 'Windows':
txt = open("WindowsDependencies/capabilities.txt")
capabilitiesJson = txt.read()
dictionary = json.loads(capabilitiesJson)
model_name = dictionary["model_name"]
charge = dictionary["charge"]
bluetooth_addr = dictionary["bluetooth_addr"]
camera = dictionary["camera"]
bluetooth_le = dictionary["bluetooth_le"]
except Exception as e:
print e
dict = {
"model_name": model_name, "camera": camera, "charge": charge,
"bluetooth_addr": bluetooth_addr, "bluetooth_le": bluetooth_le,
"sensors": [], "type": "desktop"
}
return str(dict)
def get_camera_webrtc(browser='FIREFOX', room_id=''):
"""
Returns WebRTC roomd id for online webcam session
For client, it only connects to room
For server, it generates a room and connect to it
"""
if browser == 'FIREFOX':
address = "http://apprtc.appspot.com/r/" + room_id
# address = "http://app.rtc/r/" + room_id
profile = webdriver.FirefoxProfile()
profile.set_preference('media.navigator.permission.disabled', True)
profile.update_preferences()
driver = webdriver.Firefox(profile)
driver.get(address)
driver.find_element_by_id('confirm-join-button').click()
def facebook_login(app_key='OBFUSCATED', scope='user_friends'):
auth_url = 'https://www.facebook.com/dialog/oauth?client_id=%s&redirect_uri=%s&response_type=token&scope=%s' \
% (urllib.quote(app_key),
'https://www.facebook.com/connect/login_success.html', urllib.quote(scope))
try:
profile = webdriver.FirefoxProfile()
profile.set_preference('webdriver.load.strategy', 'unstable')
driver = webdriver.Firefox(profile)
driver.set_page_load_timeout(5)
driver.get(auth_url)
st = time.time()
is_login = True
if 'dialog' not in driver.current_url:
is_login = False
while 'dialog' in driver.current_url and is_login:
if time.time() - st > 60:
is_login = False
break
if is_login:
raw_token = driver.current_url
token = raw_token[raw_token.index('=') + 1:raw_token.index('&')]
driver.close()
graph = facebook.GraphAPI(access_token=token, version='2.2')
return graph
driver.close()
return 'NOT SUCCESSFUL'
except NoSuchWindowException:
print 'Error occured during Facebook login: Browser is closed.'
except Exception as e:
print 'Error occured during Facebook login: ', str(e)
"""
TODO: Need thread otherwise it blocks main app
def facebook_login(scope='user_friends'):
t = threading.Thread(target=facebook_login_body)
t.start()
"""
|
testprogram.py
|
#! /usr/bin/python3
# The MIT License (MIT)
#
# Copyright(c) 2021, Damien Feneyrou <dfeneyrou@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import math
import threading
import asyncio
# If palanteer module is not found, it imports the stub (in ./tools) which defines all APIs as no-op
# This way, a program can be distributed without any Palanteer installation requirement
try:
from palanteer import *
except ModuleNotFoundError:
from palanteer_stub import *
# This file is a test program with multiple purposes:
# - show an example of Python instrumentation
# - have a way to measure speed performance in a specific case
# - be a part of Palanteer internal tests, by using all instrumentation APIs and features
# =======================
# Random number generator
# =======================
class RandomLCM:
def __init__(self):
self.lastValue = 14695981039346656037
self.mx = threading.Lock()
@plFunction
def get(self, minValue, maxValue):
plLockWait("Random generator")
self.mx.acquire()
plLockState("Random generator", True)
generatedNumber = int(
minValue + (self._next() * (maxValue - minValue) / (1 << 32))
)
plLockState("Random generator", False)
self.mx.release()
plData("Number", generatedNumber)
return generatedNumber
def _next(self):
# It is a really dummy random generator
x = self.lastValue
x ^= x << 13
x ^= x >> 17
x ^= x << 5
self.lastValue = ((self.lastValue ^ x) * 1099511628211) & 0x7FFFFFFFFFFFFFFF
return x & 0xFFFFFFFF
# ==============================
# Global context
# ==============================
class GroupSynchro:
def __init__(self, name):
self.name = "%ssynchro" % name.replace("/", " ")
self.mx = threading.Lock()
self.cv = threading.Condition(self.mx)
self.message = None
globalSharedMx = threading.Lock()
globalRandomGenerator = RandomLCM()
# =============
# Crash helpers
# =============
def crashSubContractor(crashKind):
if crashKind == 0:
printf("%d", 1 / zero)
elif crashKind == 1:
a = range(5)[6]
elif crashKind == 2:
assert 0, "This is an assertion-based crash" % (zero, crashKind)
elif crashKind == 3:
sys.exit(1)
def doCrash_Please(crashKind):
crashSubContractor(crashKind)
# ===================================
# Functions of the "associated" task
# ===================================
def busyWait(kRoundQty):
cosSum = 14.0
for i in range(100 * kRoundQty):
cosSum += math.cos(0.1 * i)
return cosSum
# Thread entry point
def associatedTask(synchro, crashKind):
plDeclareThread(threading.current_thread().name)
dummyValue = 0.0
iterationNbr = 0
while 1:
# Wait for an order to do something
plLockWait(synchro.name)
synchro.mx.acquire()
synchro.cv.wait_for(lambda x=synchro: synchro.message != None)
plLockState(synchro.name, True)
# Get the command from the "control" thread of the group
command, synchro.message = synchro.message, None
plLockState(synchro.name, False)
synchro.mx.release()
if command == "stop":
break # End of thread
# Marker of a great event
if iterationNbr == 4:
plMarker("important", "5th iteration reached!")
# Do something
plBegin("SomeWork")
dummyValue += busyWait(globalRandomGenerator.get(1500, 4000))
# Crash if required
if crashKind >= 0 and iterationNbr == 3:
doCrash_Please(crashKind) # Crash at 3rd iteration if crash required
iterationNbr += 1
plEnd("SomeWork")
plBegin("Final result")
plData("Dummy value", dummyValue)
plEnd("Final result")
# ==============================
# Functions of the "control" task
# ==============================
@plFunction
def otherSubTask(taskNbr, iterNbr):
plData("taskNbr", taskNbr)
plData("iterNbr", iterNbr)
# Allocate something
dummyAlloc = [1] * globalRandomGenerator.get(1000, 5000)
# Compute something
dummyValue = busyWait(globalRandomGenerator.get(500, 1000))
# Deallocate (no real effect in Python as objects go back to internal object pools)
dummyAlloc = None
plBegin("doSomethingUseful")
dummyValue += busyWait(globalRandomGenerator.get(100, 500))
for i in range((7 * taskNbr * iterNbr) % 3):
plBegin("Partial work")
dummyValue += busyWait(globalRandomGenerator.get(100, 500))
plEnd("Partial work")
plEnd("doSomethingUseful")
# Log something visual
x = 0.2 * (0.25 * taskNbr + iterNbr) + 0.1
plData("exp(x)/x", math.exp(x) / x)
return dummyValue
@plFunction
def subTaskUsingSharedResource(taskNbr, iterNbr):
fruits = ["apple", "orange", "durian", "banana", "grenada"]
vegetables = ["carrot", "onion", "bean", "patato"]
plData(
"input value##hexa", taskNbr
) # This "hexa" unit is special, this integer value will be displayed in hexadecimal on viewer
# Compute something
dummyValue = busyWait(150)
# Allocate something
dummyAlloc = [1] * globalRandomGenerator.get(100, 500)
superList = []
for i in range(5000):
plBegin("Add fruit")
superList.append(fruits[(taskNbr + i * 7) % 5])
plEnd()
plData("Last one", superList[-1])
plData(
"Ingredient for the soup##ingredient", vegetables[(taskNbr + iterNbr * 7) % 4]
) # The unit is declared as "ingredient"
# Log something visual
plData(
"Computation output##parsec", math.cos(1.5 * (0.25 * taskNbr + iterNbr))
) # The unit is declared as "parsec"
return dummyValue
# Thread entry point
@plFunction
def controlTask(synchro, durationMultipler):
someStrings = ["Even", "Odd"]
plDeclareThread(threading.current_thread().name)
iterationQty = 10 * durationMultipler
dummyValue = 0
allocationList = []
plFreezePoint()
for iterNbr in range(iterationQty):
if globalRandomGenerator.get(0, 100) >= 45:
# Allocate a new list
allocationList.append([1] * globalRandomGenerator.get(2000, 10000))
else:
# Deallocate
if allocationList:
del allocationList[0]
# Wait a bit
time.sleep(0.001 * globalRandomGenerator.get(20, 60))
# Prepare the work
plBegin("Iteration")
plData("iterNbr", iterNbr)
plData("iterationQty", iterationQty)
# Dynamic but still external string compatible markers
plMarker("Count", someStrings[iterNbr % 2])
taskQty = globalRandomGenerator.get(1, 4)
dummyValue += busyWait(globalRandomGenerator.get(500, 2500))
for taskNbr in range(taskQty):
plBegin("Task")
plData("Task number", taskNbr)
# Work with some shared resource
dummyValue += busyWait(globalRandomGenerator.get(300, 1000))
plLockWait("Shared resource")
globalSharedMx.acquire()
plLockState("Shared resource", True)
dummyValue += subTaskUsingSharedResource(taskNbr, iterNbr)
plLockState("Shared resource", False)
globalSharedMx.release()
dummyValue += busyWait(globalRandomGenerator.get(10, 200))
dummyValue += otherSubTask(taskNbr, iterNbr)
plEnd("Task")
# Send a signal to the associated task
synchro.mx.acquire()
synchro.message = (
"stop" if (iterNbr == iterationQty - 1) else "work, lazy thread!"
)
plLockNotify(synchro.name)
synchro.cv.notify()
synchro.mx.release()
plEnd("Iteration")
plBegin("Final result")
plData("Dummy value", dummyValue)
plEnd("Final result")
# End of the thread
# ==============================
# AsyncIO worker task
# ==============================
async def baseFunc():
busyWait(globalRandomGenerator.get(10, 25))
await asyncio.sleep(0.01 * globalRandomGenerator.get(1, 3))
busyWait(globalRandomGenerator.get(10, 25))
await asyncio.sleep(0.01 * globalRandomGenerator.get(1, 3))
async def loadTexture():
await baseFunc()
async def updateParticules():
await baseFunc()
async def animateChainsaw():
await baseFunc()
async def skeletonInterpolation():
await baseFunc()
async def fogOfWarGeneration():
await baseFunc()
async def freeArenaMemoryPools():
await baseFunc()
async def asyncRunner():
jobKinds = (
loadTexture,
updateParticules,
animateChainsaw,
skeletonInterpolation,
fogOfWarGeneration,
freeArenaMemoryPools,
)
for i in range(30):
await jobKinds[globalRandomGenerator.get(0, len(jobKinds))]()
time.sleep(0.01)
async def asyncWaitAllTasks():
await asyncio.gather(*(asyncRunner() for i in range(3)))
def asyncWorkerTask():
asyncio.run(asyncWaitAllTasks())
# ==============================
# CLI handlers
# ==============================
def delayedAssertThread(condValue):
plDeclareThread("Crash thread")
time.sleep(1.0)
assert condValue, "Assertion called by CLI"
return 0
def cliHandlerAsyncAssert(condValue):
threading.Thread(target=lambda x=condValue: delayedAssertThread(x)).start()
return 0
def cliHandlerCreateMarker(msg):
plMarker("test_marker", msg)
return 0
def cliHandlerPoetryGetter():
return 0, "To bug, or not to bug, that is the question"
def cliHandlerWithParameters(param1, param2, param3):
# "Complex" handling in order to stimulate important parts of the API
if param1 <= -1000:
return (
1,
"This text will not be erased\nError: Very negative first parameter. Not great.",
)
elif param1 <= -100:
return 1, "Error: Mildly negative first parameter. Not great."
elif param1 <= 0:
return 1, "Error: First parameter shall be strictly positive (%d seen)" % param1
# Build the response
response = "Strictly positive integer value is: %d\n" % param1
response += "Float value is: %f\n" % param2
response += "String value is: %s\n" % param3
return 0, response
def cliHandlerQuit():
sys.exit(0)
# ==============================
# Event collection program
# ==============================
def collectInterestingData(
mode,
buildName,
durationMultiplier,
serverPort,
with_c_calls,
threadGroupQty,
crashKind,
):
# Start the logging
startSec = time.time()
if mode != "inactive":
plInitAndStart(
"Python example",
record_filename="example_record.pltraw" if mode == "file storage" else None,
build_name=buildName,
server_port=serverPort,
with_c_calls=with_c_calls,
)
# Give a name to this thread (after the library initialization)
plDeclareThread("Main")
# CLI registration
# On purpose *after* the call to plInitAndStart in order to better test the freeze point.
# Reminder: it is recommended to register them *before* the Palanteer initialization in order to remove any race condition
# in remote script about calling a not yet registered CLI after connection
plRegisterCli(
cliHandlerWithParameters,
"test::parameters",
"first=int second_param=float third=string",
"Uses the 3 types of parameters",
)
plRegisterCli(
cliHandlerWithParameters,
"test::parametersDft",
"first=int[[31415926]] second_param=float[[-3.14159265359]] third=string[[no string provided]] fourth=int[[0]]",
"Uses the 3 types of parameters with default values and a 4th one",
)
plRegisterCli(
cliHandlerAsyncAssert,
"async_assert",
"condvalue=int",
"Call asynchronously an assertion with the provided value after a 1s timeout",
)
plRegisterCli(
cliHandlerCreateMarker,
"test::marker",
"msg=string",
"Create a marker with the provided string",
)
plRegisterCli(cliHandlerPoetryGetter, "get_poetry", "", "Returns some poetry.")
plRegisterCli(cliHandlerQuit, "quit", "", "Exit the program")
# Freeze points just before starting, and in particular after declaring all CLIs (so that they can be used at this point)
# These steps are used by Palanter testing
plFreezePoint()
plBegin("Freeze control test")
plData("Freeze", "Before first freeze")
plFreezePoint()
plData("Freeze", "After first freeze")
plFreezePoint()
plData("Freeze", "After second freeze")
plEnd("Freeze control test")
# Launch some active threads
threadGroupNames = [
"",
"Workers/",
"Real time/",
"Database Cluster/",
"Helpers/",
"Engine/",
"Compute Grid/",
"Hub/",
"Idlers/",
]
crashThreadGroupNbr = (
None if crashKind == None else int(time.time()) % threadGroupQty
) # Random selection of the thread which shall crash
controlThreadList = []
for threadGroupNbr in range(threadGroupQty):
groupName = threadGroupNames[threadGroupNbr]
groupSynchro = GroupSynchro(groupName)
t1 = threading.Thread(
name="%sControl" % groupName,
target=lambda grp=groupSynchro, durMult=durationMultiplier: controlTask(
grp, durMult
),
)
t2 = threading.Thread(
name="%sAssociate" % groupName,
target=lambda grp=groupSynchro, ck=crashKind if crashThreadGroupNbr == threadGroupNbr else -1: associatedTask(
grp, ck
),
)
t1.start()
t2.start()
controlThreadList.append(t1)
controlThreadList.append(t2)
# Add some asynchronous jobs (virtual threads / green threads)
tAsync = threading.Thread(target=asyncWorkerTask)
tAsync.start()
controlThreadList.append(tAsync)
# Wait for threads completion
for t in controlThreadList:
t.join() # Join order does not matter
plMarker("Threading", "All tasks are completed! Joy!")
# Stop the recording
plStopAndUninit()
# Display the statistics
durationSec = time.time() - startSec
print("Statistics:")
print(" Execution time: %d ms" % (1000.0 * durationSec))
# =========================
# Performance evaluation
# =========================
def evaluatePerformance(mode, buildName, durationMultipler, serverPort):
# Start the logging
if mode != "inactive":
plInitAndStart(
"Python perf example",
record_filename="example_record.pltraw" if mode == "file storage" else None,
build_name=buildName,
server_port=serverPort,
)
# Give a name to this thread (after the library initialization)
plDeclareThread("Main")
iterationQty = 250000 # 4 events per loop
loopQty = iterationQty * durationMultipler
startCollectSec = time.time()
# Logging in loop, 4 events per cycle
for i in range(loopQty):
plBegin("TestLoop")
plData("Iteration", i)
plData("Still to go", loopQty - i - 1)
plEnd("TestLoop")
endCollectSec = time.time()
plStopAndUninit()
endSendingSec = time.time()
# Console display
print(
"Collection duration : %.2f ms for %d events"
% (1000.0 * (endCollectSec - startCollectSec), loopQty * 4.0)
)
print(
"Collection unit cost: %.0f ns"
% (1e9 * (endCollectSec - startCollectSec) / (loopQty * 4.0))
)
print(
"Processing duration : %.2f ms (w/ %s)"
% (
1000.0 * (endSendingSec - startCollectSec),
"disk file writing"
if mode == "file storage"
else "transmission and server processing",
)
)
print(
"Processing rate : %.3f million event/s"
% (4e-6 * loopQty / (endSendingSec - startCollectSec))
)
# =========================
# Main
# =========================
def displayUsage():
print("\nUsage: %s <parameter> [options]" % sys.argv[0])
print(" Palanteer Python instrumentation test program")
print("")
print(" Parameter:")
print(" 'collect' : Data collection")
print(" 'crash-assert' : Data collection with a planned failed assertion")
print(" 'crash-zerodiv': Data collection with a planned zero division")
print(" 'crash-segv' : Data collection with a planned seg fault")
print(" 'crash-abort' : Data collection with a planned abort call")
print(" 'perf' : Estimation of the logging performances in a loop")
print("")
print(" Options to selection the collection mode (exclusive):")
print(" <Default>: Use remote Palanteer connection")
print(" '-f' : Save the record in a file 'example_record.plt'")
print(" '-n' : No data collection (event recording not enabled at run time)")
print(" '-c' : Do profile the C functions")
print("")
print(" Options to configure the program behavior:")
print(
" '-t <1-9> : Defines the quantity of groups of threads (2 threads per group)"
)
print(" '-l' : Run time length multiplier (default is 1)")
print(
" '-b <name>' : Provide a build name for the current program (default is none)"
)
print(" '--port <port>': Use the provided socket port (default is 59059)")
print("")
print("To start, you can try this (and look at the testProgram.cpp code too):")
print(
" %s perf -f (no need for palanteer, events are stored in the file example_record.plt) "
% sys.argv[0]
)
print(
" %s collect -c (no need for palanteer, events are displayed on console) "
% sys.argv[0]
)
print(
" %s collect (requires the prior launch of 'palanteer' viewer) "
% sys.argv[0]
)
def main():
# Command line parsing and program behavior selection
doDisplayUsage, doEstimateCost, crashKind = False, False, None
# Get the main type of execution
if len(sys.argv) > 1:
if sys.argv[1] == "collect":
pass
elif sys.argv[1] == "perf":
doEstimateCost = True
elif sys.argv[1] == "crash-zerodiv":
crashKind = 0
elif sys.argv[1] == "crash-segv":
crashKind = 1
elif sys.argv[1] == "crash-assert":
crashKind = 2
elif sys.argv[1] == "crash-abort":
crashKind = 3
else:
doDisplayUsage = True
else:
doDisplayUsage = True
# Get the options
mode, buildName, serverPort, with_c_calls, threadGroupQty, durationMultiplier = (
"connected",
None,
59059,
False,
1,
1,
)
argCount = 2
while not doDisplayUsage and argCount < len(sys.argv):
w = sys.argv[argCount]
if w in ["-n", "--n"]:
mode = "inactive"
elif w in ["-f", "--f"]:
mode = "file storage"
elif w in ["-c", "--c"]:
with_c_calls = True
elif w in ["-b", "--b"] and argCount + 1 < len(sys.argv):
buildName = sys.argv[argCount + 1]
argCount += 1
print("Build name is: %s" % buildName)
elif w == "--port" and argCount + 1 < len(sys.argv):
serverPort = int(sys.argv[argCount + 1])
argCount += 1
print("Socket port: %d" % serverPort)
elif w in ["-t", "--t"] and argCount + 1 < len(sys.argv):
threadGroupQty = int(sys.argv[argCount + 1])
argCount += 1
print("Thread group qty: %d" % threadGroupQty)
elif w in ["-l", "--l"] and argCount + 1 < len(sys.argv):
durationMultiplier = int(sys.argv[argCount + 1])
argCount += 1
print("Duration multiplier: %d" % durationMultiplier)
else:
print("Error: unknown argument '%s'" % sys.argv[argCount])
doDisplayUsage = True
argCount += 1
# Sanity checks
if threadGroupQty <= 0 or durationMultiplier <= 0:
doDisplayUsage = True
# Display usage and quit
if doDisplayUsage:
displayUsage()
sys.exit(1)
print("Mode '%s'" % mode)
if doEstimateCost:
# Estimate the cost of the logging
evaluatePerformance(mode, buildName, durationMultiplier, serverPort)
else:
# Collect events for a multi-threaded test program
# The purposes are:
# - to show an example of instrumentation
# - to test all instrumentation APIs
collectInterestingData(
mode,
buildName,
durationMultiplier,
serverPort,
with_c_calls,
threadGroupQty,
crashKind,
)
sys.exit(0)
# Bootstrap
if __name__ == "__main__":
main()
|
conftest.py
|
import sys
from http.server import HTTPServer
from threading import Event
from threading import Thread
import pytest
from library.python.envoy_requests.common.engine import Engine
from test.python.echo_server import EchoServerHandler
@pytest.fixture(scope="session")
def http_server_url():
Engine.build()
ip = "127.0.0.1"
port = 9876
kill_server = Event()
def _run_http_server():
server = HTTPServer((ip, port), EchoServerHandler)
server.timeout = 0.25
while True:
if kill_server.is_set():
break
try:
server.handle_request()
except Exception as e:
print(f"Encountered exception: {str(e)}", file=sys.stderr)
server = Thread(target=_run_http_server)
server.start()
yield f"http://{ip}:{port}/"
kill_server.set()
server.join()
|
test_util.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 theloop, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""util functions for unittest"""
import leveldb
import logging
import multiprocessing
import os
import random
import time
from sys import platform
import grpc
import loopchain
import loopchain.utils as util
from loopchain import configure as conf
from loopchain.baseservice import ObjectManager, StubManager
from loopchain.components import SingletonMetaClass
from loopchain.blockchain import Transaction
from loopchain.container import ScoreService
from loopchain.peer import PeerService, PeerAuthorization
from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc
from loopchain.radiostation import RadioStationService
util.set_log_level_debug()
def run_peer_server(port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None, score=None):
ObjectManager().peer_service = PeerService(group_id, conf.IP_RADIOSTATION, radiostation_port)
if score is not None:
ObjectManager().peer_service.set_chain_code(score)
conf.DEFAULT_SCORE_REPOSITORY_PATH = \
os.path.join(os.path.dirname(__file__), '..', '..', 'resources', 'test_score_repository')
try:
ObjectManager().peer_service.serve(port, conf.DEFAULT_SCORE_PACKAGE)
except FileNotFoundError:
logging.debug("Score Load Fail")
# test 코드 실행 위치에 따라서(use IDE or use console) 경로 문제가 발생 할 수 있다.
# ObjectManager().peer_service.serve(port, score_path)
def run_black_peer_server(port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None):
ObjectManager().peer_service = BlackService(group_id, conf.IP_RADIOSTATION, radiostation_port)
conf.DEFAULT_SCORE_REPOSITORY_PATH = \
os.path.join(os.path.dirname(__file__), '..', '..', 'resources', 'test_score_repository')
try:
ObjectManager().peer_service.serve(port)
except FileNotFoundError:
# test 코드 실행 위치에 따라서(use IDE or use console) 경로 문제가 발생 할 수 있다.
ObjectManager().peer_service.serve(port, "loopchain/default")
def run_radio_station(port):
RadioStationService().serve(port)
def run_score_server(port):
ScoreService(port)
def run_peer_server_as_process(port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None, score=None):
process = multiprocessing.Process(target=run_peer_server, args=(port, radiostation_port, group_id, score,))
process.start()
time.sleep(1)
return process
def run_peer_server_as_process_and_stub(port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None, score=None):
process = run_peer_server_as_process(port, radiostation_port, group_id, score)
channel = grpc.insecure_channel('localhost:' + str(port))
stub = loopchain_pb2_grpc.PeerServiceStub(channel)
util.request_server_in_time(stub.GetStatus, loopchain_pb2.StatusRequest(request=""))
return process, stub
def run_peer_server_as_process_and_stub_manager(
port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None, score=None):
process = run_peer_server_as_process(port, radiostation_port, group_id, score)
stub_manager = StubManager.get_stub_manager_to_server(
'localhost:' + str(port), loopchain_pb2_grpc.PeerServiceStub)
return process, stub_manager
def run_black_peer_server_as_process(port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None):
process = multiprocessing.Process(target=run_black_peer_server, args=(port, radiostation_port, group_id,))
process.start()
time.sleep(1)
return process
def run_black_peer_server_as_process_and_stub(port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None):
process = run_black_peer_server_as_process(port, radiostation_port, group_id)
channel = grpc.insecure_channel('localhost:' + str(port))
stub = loopchain_pb2_grpc.PeerServiceStub(channel)
return process, stub
def run_radio_station_as_process(port):
process = multiprocessing.Process(target=run_radio_station, args=(port,))
process.start()
time.sleep(1)
return process
def run_radio_station_as_process_and_stub_manager(port):
process = run_radio_station_as_process(port)
stub_manager = StubManager.get_stub_manager_to_server(
'localhost:' + str(port), loopchain_pb2_grpc.RadioStationStub)
util.request_server_in_time(stub_manager.stub.GetStatus, loopchain_pb2.StatusRequest(request=""))
return process, stub_manager
def run_radio_station_as_process_and_stub(port):
process = run_radio_station_as_process(port)
channel = grpc.insecure_channel('localhost:' + str(port))
stub = loopchain_pb2_grpc.RadioStationStub(channel)
util.request_server_in_time(stub.GetStatus, loopchain_pb2.StatusRequest(request=""))
return process, stub
def run_score_server_as_process(port):
process = multiprocessing.Process(target=run_score_server, args=(port,))
process.start()
time.sleep(1)
return process
def print_testname(testname):
print("\n======================================================================")
print("Test %s Start" % testname)
print("======================================================================")
def make_level_db(db_name=""):
db_default_path = './' + (db_name, "db_test")[db_name == ""]
db_path = db_default_path
blockchain_db = None
retry_count = 0
while blockchain_db is None and retry_count < conf.MAX_RETRY_CREATE_DB:
try:
blockchain_db = leveldb.LevelDB(db_path, create_if_missing=True)
logging.debug("make level db path: " + db_path)
except leveldb.LevelDBError:
db_path = db_default_path + str(retry_count)
retry_count += 1
return blockchain_db
def close_open_python_process():
# ubuntu patch
if platform == "darwin":
os.system("pkill -f python")
os.system("pkill -f Python")
else:
os.system("pgrep -f python | tail -$((`pgrep -f python | wc -l` - 1)) | xargs kill -9")
def clean_up_temp_db_files(kill_process=True):
module_root_path = os.path.dirname(loopchain.__file__) + "/.."
if kill_process:
close_open_python_process()
os.system(f'rm -rf $(find {module_root_path} -name db_*)')
os.system(f'rm -rf $(find {module_root_path} -name *test_db*)')
os.system(f'rm -rf $(find {module_root_path} -name *_block)')
os.system("rm -rf ./testcase/db_*")
os.system("rm -rf ./storage/db_*")
os.system("rm -rf chaindb_*")
os.system("rm -rf ./blockchain_db*")
os.system("rm -rf ./testcase/chaindb_*")
os.system("rm -rf sample_score")
os.system("rm -rf ./testcase/sample_score")
os.system("rm -rf certificate_db")
os.system("rm -rf ./resources/test_score_deploy")
os.system("rm -rf ./resources/test_score_repository/loopchain")
time.sleep(1)
def create_basic_tx(peer_id: str, peer_auth: PeerAuthorization) -> Transaction:
""" create basic tx data is "{args:[]}"
:param peer_id: peer_id
:param peer_auth:
:return: transaction
"""
tx = Transaction()
tx.put_meta('peer_id', peer_id)
tx.put_data("{args:[]}")
tx.sign_hash(peer_auth)
return tx
def create_peer_auth() -> PeerAuthorization:
peer_auth = PeerAuthorization(public_file=conf.PUBLIC_PATH,
pri_file=conf.PRIVATE_PATH,
cert_pass=conf.DEFAULT_PW)
return peer_auth
class TestServerManager(metaclass=SingletonMetaClass):
"""
"""
def __init__(self):
self.__test_port_diff = random.randrange(1, 30) * -50
self.__radiostation_port = conf.PORT_RADIOSTATION + self.__test_port_diff
# rs and peer info is tuple (process, stub_manager, port)
self.__rs_info = ()
self.__peer_info = {} # {num:peer_info}
self.__score = None
def start_servers(self, peer_count, score=None):
"""Start BlockChain network rs and peer
:param peer_count: num of peers but 0 means start only RS.
:return:
"""
logging.debug("TestServerManager start servers")
self.__score = score
# run radio station
process, stub_manager = run_radio_station_as_process_and_stub_manager(self.__radiostation_port)
self.__rs_info = (process, stub_manager, self.__radiostation_port)
time.sleep(2)
for i in range(peer_count):
peer_port = conf.PORT_PEER + (i * 7) + self.__test_port_diff
process, stub_manager = run_peer_server_as_process_and_stub_manager(
peer_port, self.__radiostation_port, score=score)
self.__peer_info[i] = (process, stub_manager, peer_port)
time.sleep(2)
def stop_all_server(self):
for i in self.__peer_info:
self.__peer_info[i][1].call_in_times(
"Stop",
loopchain_pb2.StopRequest(reason="TestServerManager"), conf.GRPC_TIMEOUT)
self.__rs_info[1].call_in_times(
"Stop",
loopchain_pb2.StopRequest(reason="TestServerManager"), conf.GRPC_TIMEOUT)
time.sleep(2)
for i in self.__peer_info:
self.__peer_info[i][0].join()
self.__rs_info[0].join()
def stop_peer(self, num):
self.__peer_info[num][1].call_in_times(
"Stop",
loopchain_pb2.StopRequest(reason="TestServerManager"), conf.GRPC_TIMEOUT)
time.sleep(2)
self.__peer_info[num][0].join()
def start_peer(self, num):
peer_port = conf.PORT_PEER + (num * 7) + self.__test_port_diff
process, stub_manager = run_peer_server_as_process_and_stub_manager(
peer_port, self.__radiostation_port, score=self.__score)
self.__peer_info[num] = (process, stub_manager, peer_port)
time.sleep(1)
def start_black_peers(self, peer_count):
pass
def add_peer(self):
num = 0
return num
def add_black_peer(self):
num = 0
return num
def get_stub_rs(self):
return self.__rs_info[1].stub
def get_stub_peer(self, num=0):
return self.__peer_info[num][1].stub
def get_port_rs(self):
return self.__radiostation_port
def get_port_peer(self, num):
return self.__peer_info[num][2]
def status(self):
"""
:return: json object for ServerManager status
"""
pass
|
script_animations.py
|
import random, time, sys, string, threading, os, queue
from alive_progress import alive_bar
def repeated_guesses(target):
chars = string.ascii_letters + string.digits + ' '
current_guess = len(target) * ['*']
for count, char in enumerate(target):
while current_guess[count] != char:
current_guess[count] = random.choice(chars)
sys.stdout.write(str("\rCracking : "+''.join(current_guess)))
# slight pause to make readable
time.sleep(0.005)
sys.stdout.flush()
print ("\n")
def bar_thing():
for x in progress_list:
with alive_bar(x) as bar:
for i in range(progress_to_be_done):
time.sleep(.005)
bar()
if __name__ == '__main__':
print("Enter Password")
password = input("> ")
password_length = len(password)
factorial = 13
if int(password_length) >= 1:
for i in range (1,int(password_length)+1):
factorial = factorial * i
cpu_num = os.cpu_count()
progress_to_be_done = round(factorial / cpu_num)
progress_list = [progress_to_be_done] * cpu_num
th0 = threading.Thread(target=repeated_guesses, args=(password,))
th1 = threading.Thread(target=bar_thing)
|
player.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: omi
# @Date: 2014-07-15 15:48:27
# @Last Modified by: omi
# @Last Modified time: 2015-01-30 18:05:08
'''
网易云音乐 Player
'''
# Let's make some noise
import subprocess
import threading
import time
import os
import random
import re
from ui import Ui
from storage import Storage
from api import NetEase
from cache import Cache
from config import Config
import logger
log = logger.getLogger(__name__)
class Player(object):
def __init__(self):
self.config = Config()
self.ui = Ui()
self.popen_handler = None
# flag stop, prevent thread start
self.playing_flag = False
self.pause_flag = False
self.process_length = 0
self.process_location = 0
self.process_first = False
self.storage = Storage()
self.info = self.storage.database['player_info']
self.songs = self.storage.database['songs']
self.playing_id = -1
self.cache = Cache()
self.notifier = self.config.get_item('notifier')
self.mpg123_parameters = self.config.get_item('mpg123_parameters')
self.end_callback = None
self.playing_song_changed_callback = None
def popen_recall(self, onExit, popenArgs):
'''
Runs the given args in subprocess.Popen, and then calls the function
onExit when the subprocess completes.
onExit is a callable object, and popenArgs is a lists/tuple of args
that would give to subprocess.Popen.
'''
def runInThread(onExit, arg):
para = ['mpg123', '-R']
para[1:1] = self.mpg123_parameters
self.popen_handler = subprocess.Popen(para,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.popen_handler.stdin.write('V ' + str(self.info[
'playing_volume']) + '\n')
if arg:
self.popen_handler.stdin.write('L ' + arg + '\n')
else:
self.next_idx()
onExit()
return
self.process_first = True
while True:
if self.playing_flag is False:
break
strout = self.popen_handler.stdout.readline()
if re.match('^\@F.*$', strout):
process_data = strout.split(' ')
process_location = float(process_data[4])
if self.process_first:
self.process_length = process_location
self.process_first = False
self.process_location = 0
else:
self.process_location = self.process_length - process_location # NOQA
continue
elif strout[:2] == '@E':
# get a alternative url from new api
sid = popenArgs['song_id']
new_url = NetEase().songs_detail_new_api([sid])[0]['url']
if new_url is None:
log.warning(('Song {} is unavailable '
'due to copyright issue').format(sid))
break
log.warning(
'Song {} is not compatible with old api.'.format(sid))
popenArgs['mp3_url'] = new_url
self.popen_handler.stdin.write('\nL ' + new_url + '\n')
self.popen_handler.stdout.readline()
elif strout == '@P 0\n':
self.popen_handler.stdin.write('Q\n')
self.popen_handler.kill()
break
if self.playing_flag:
self.next_idx()
onExit()
return
def getLyric():
if 'lyric' not in self.songs[str(self.playing_id)].keys():
self.songs[str(self.playing_id)]['lyric'] = []
if len(self.songs[str(self.playing_id)]['lyric']) > 0:
return
netease = NetEase()
lyric = netease.song_lyric(self.playing_id)
if lyric == [] or lyric == '未找到歌词':
return
lyric = lyric.split('\n')
self.songs[str(self.playing_id)]['lyric'] = lyric
return
def gettLyric():
if 'tlyric' not in self.songs[str(self.playing_id)].keys():
self.songs[str(self.playing_id)]['tlyric'] = []
if len(self.songs[str(self.playing_id)]['tlyric']) > 0:
return
netease = NetEase()
tlyric = netease.song_tlyric(self.playing_id)
if tlyric == [] or tlyric == '未找到歌词翻译':
return
tlyric = tlyric.split('\n')
self.songs[str(self.playing_id)]['tlyric'] = tlyric
return
def cacheSong(song_id, song_name, artist, song_url):
def cacheExit(song_id, path):
self.songs[str(song_id)]['cache'] = path
self.cache.add(song_id, song_name, artist, song_url, cacheExit)
self.cache.start_download()
if 'cache' in popenArgs.keys() and os.path.isfile(popenArgs['cache']):
thread = threading.Thread(target=runInThread,
args=(onExit, popenArgs['cache']))
else:
thread = threading.Thread(target=runInThread,
args=(onExit, popenArgs['mp3_url']))
cache_thread = threading.Thread(
target=cacheSong,
args=(popenArgs['song_id'], popenArgs['song_name'], popenArgs[
'artist'], popenArgs['mp3_url']))
cache_thread.start()
thread.start()
lyric_download_thread = threading.Thread(target=getLyric, args=())
lyric_download_thread.start()
tlyric_download_thread = threading.Thread(target=gettLyric, args=())
tlyric_download_thread.start()
# returns immediately after the thread starts
return thread
def get_playing_id(self):
return self.playing_id
def recall(self):
if self.info['idx'] >= len(self.info[
'player_list']) and self.end_callback is not None:
log.debug('Callback')
self.end_callback()
if self.info['idx'] < 0 or self.info['idx'] >= len(self.info[
'player_list']):
self.info['idx'] = 0
self.stop()
return
self.playing_flag = True
self.pause_flag = False
item = self.songs[self.info['player_list'][self.info['idx']]]
self.ui.build_playinfo(item['song_name'], item['artist'],
item['album_name'], item['quality'],
time.time())
if self.notifier:
self.ui.notify('Now playing', item['song_name'],
item['album_name'], item['artist'])
self.playing_id = item['song_id']
self.popen_recall(self.recall, item)
def generate_shuffle_playing_list(self):
del self.info['playing_list'][:]
for i in range(0, len(self.info['player_list'])):
self.info['playing_list'].append(i)
random.shuffle(self.info['playing_list'])
self.info['ridx'] = 0
def new_player_list(self, type, title, datalist, offset):
self.info['player_list_type'] = type
self.info['player_list_title'] = title
self.info['idx'] = offset
del self.info['player_list'][:]
del self.info['playing_list'][:]
self.info['ridx'] = 0
for song in datalist:
self.info['player_list'].append(str(song['song_id']))
if str(song['song_id']) not in self.songs.keys():
self.songs[str(song['song_id'])] = song
else:
database_song = self.songs[str(song['song_id'])]
if (database_song['song_name'] != song['song_name'] or
database_song['quality'] != song['quality']):
self.songs[str(song['song_id'])] = song
def append_songs(self, datalist):
for song in datalist:
self.info['player_list'].append(str(song['song_id']))
if str(song['song_id']) not in self.songs.keys():
self.songs[str(song['song_id'])] = song
else:
database_song = self.songs[str(song['song_id'])]
cond = any([database_song[k] != song[k]
for k in ('song_name', 'quality', 'mp3_url')])
if cond:
if 'cache' in self.songs[str(song['song_id'])].keys():
song['cache'] = self.songs[str(song['song_id'])][
'cache']
self.songs[str(song['song_id'])] = song
if len(datalist) > 0 and self.info['playing_mode'] == 3 or self.info[
'playing_mode'] == 4:
self.generate_shuffle_playing_list()
def play_and_pause(self, idx):
# if same playlists && idx --> same song :: pause/resume it
if self.info['idx'] == idx:
if self.pause_flag:
self.resume()
else:
self.pause()
else:
self.info['idx'] = idx
# if it's playing
if self.playing_flag:
self.switch()
# start new play
else:
self.recall()
# play another
def switch(self):
self.stop()
# wait process be killed
time.sleep(0.1)
self.recall()
def stop(self):
if self.playing_flag and self.popen_handler:
self.playing_flag = False
self.popen_handler.stdin.write('Q\n')
try:
self.popen_handler.kill()
except OSError as e:
log.error(e)
return
def pause(self):
if not self.playing_flag and not self.popen_handler:
return
self.pause_flag = True
self.popen_handler.stdin.write('P\n')
item = self.songs[self.info['player_list'][self.info['idx']]]
self.ui.build_playinfo(item['song_name'],
item['artist'],
item['album_name'],
item['quality'],
time.time(),
pause=True)
def resume(self):
self.pause_flag = False
self.popen_handler.stdin.write('P\n')
item = self.songs[self.info['player_list'][self.info['idx']]]
self.ui.build_playinfo(item['song_name'], item['artist'],
item['album_name'], item['quality'],
time.time())
self.playing_id = item['song_id']
def _swap_song(self):
plist = self.info['playing_list']
now_songs = plist.index(self.info['idx'])
plist[0], plist[now_songs] = plist[now_songs], plist[0]
def _is_idx_valid(self):
return 0 <= self.info['idx'] < len(self.info['player_list'])
def _inc_idx(self):
if self.info['idx'] < len(self.info['player_list']):
self.info['idx'] += 1
def _dec_idx(self):
if self.info['idx'] > 0:
self.info['idx'] -= 1
def _need_to_shuffle(self):
playing_list = self.info['playing_list']
ridx = self.info['ridx']
idx = self.info['idx']
if ridx >= len(playing_list) or playing_list[ridx] != idx:
return True
else:
return False
def next_idx(self):
if not self._is_idx_valid():
self.stop()
return
playlist_len = len(self.info['player_list'])
playinglist_len = len(self.info['playing_list'])
# Playing mode. 0 is ordered. 1 is orderde loop.
# 2 is single song loop. 3 is single random. 4 is random loop
if self.info['playing_mode'] == 0:
self._inc_idx()
elif self.info['playing_mode'] == 1:
self.info['idx'] = (self.info['idx'] + 1) % playlist_len
elif self.info['playing_mode'] == 2:
self.info['idx'] = self.info['idx']
elif self.info['playing_mode'] == 3 or self.info['playing_mode'] == 4:
if self._need_to_shuffle():
self.generate_shuffle_playing_list()
playinglist_len = len(self.info['playing_list'])
# When you regenerate playing list
# you should keep previous song same.
try:
self._swap_song()
except Exception as e:
log.error(e)
self.info['ridx'] += 1
# Out of border
if self.info['playing_mode'] == 4:
self.info['ridx'] %= playinglist_len
if self.info['ridx'] >= playinglist_len:
self.info['idx'] = playlist_len
else:
self.info['idx'] = self.info['playing_list'][self.info['ridx']]
else:
self.info['idx'] += 1
if self.playing_song_changed_callback is not None:
self.playing_song_changed_callback()
def next(self):
self.stop()
time.sleep(0.01)
self.next_idx()
self.recall()
def prev_idx(self):
if not self._is_idx_valid():
self.stop()
return
playlist_len = len(self.info['player_list'])
playinglist_len = len(self.info['playing_list'])
# Playing mode. 0 is ordered. 1 is orderde loop.
# 2 is single song loop. 3 is single random. 4 is random loop
if self.info['playing_mode'] == 0:
self._dec_idx()
elif self.info['playing_mode'] == 1:
self.info['idx'] = (self.info['idx'] - 1) % playlist_len
elif self.info['playing_mode'] == 2:
self.info['idx'] = self.info['idx']
elif self.info['playing_mode'] == 3 or self.info['playing_mode'] == 4:
if self._need_to_shuffle():
self.generate_shuffle_playing_list()
playinglist_len = len(self.info['playing_list'])
self.info['ridx'] -= 1
if self.info['ridx'] < 0:
if self.info['playing_mode'] == 3:
self.info['ridx'] = 0
else:
self.info['ridx'] %= playinglist_len
self.info['idx'] = self.info['playing_list'][self.info['ridx']]
else:
self.info['idx'] -= 1
if self.playing_song_changed_callback is not None:
self.playing_song_changed_callback()
def prev(self):
self.stop()
time.sleep(0.01)
self.prev_idx()
self.recall()
def shuffle(self):
self.stop()
time.sleep(0.01)
self.info['playing_mode'] = 3
self.generate_shuffle_playing_list()
self.info['idx'] = self.info['playing_list'][self.info['ridx']]
self.recall()
def volume_up(self):
self.info['playing_volume'] = self.info['playing_volume'] + 7
if (self.info['playing_volume'] > 100):
self.info['playing_volume'] = 100
if not self.playing_flag:
return
self.popen_handler.stdin.write('V ' + str(self.info[
'playing_volume']) + '\n')
def volume_down(self):
self.info['playing_volume'] = self.info['playing_volume'] - 7
if (self.info['playing_volume'] < 0):
self.info['playing_volume'] = 0
if not self.playing_flag:
return
self.popen_handler.stdin.write('V ' + str(self.info[
'playing_volume']) + '\n')
def update_size(self):
try:
self.ui.update_size()
item = self.songs[self.info['player_list'][self.info['idx']]]
if self.playing_flag:
self.ui.build_playinfo(item['song_name'], item['artist'],
item['album_name'], item['quality'],
time.time())
if self.pause_flag:
self.ui.build_playinfo(item['song_name'],
item['artist'],
item['album_name'],
item['quality'],
time.time(),
pause=True)
except Exception as e:
log.error(e)
pass
def cacheSong1time(self, song_id, song_name, artist, song_url):
def cacheExit(song_id, path):
self.songs[str(song_id)]['cache'] = path
self.cache.enable = False
self.cache.enable = True
self.cache.add(song_id, song_name, artist, song_url, cacheExit)
self.cache.start_download()
|
kbsync.py
|
# Keyboard Synchronization
import arachnoid as ara
#import win32api, win32con
from pynput import keyboard
import os, sys
print('PID: {}'.format(ara.get_pid()))
IP_ADDR = '192.168.1.94'#'43.33'
PORT_NUMBER = 5555
#KB_LAYOUT = win32api.GetKeyboardLayout()
'''
def get_key_strokes(from_=0, to=250):
return list(filter(lambda key: win32api.GetAsyncKeyState(key), range(from_, to)))
def simple_key_press(key):
win32api.keybd_event(key, 0, 0, 0)
ara.time.sleep(.05)
win32api.keybd_event(key, 0, win32con.KEYEVENTF_KEYUP, 0)
def hold_key_press(keys):
# press keys in order (e.g. control + shift + a)
for key in keys:
win32api.keybd_event(key, 0, 0, 0)
ara.time.sleep(.05)
# release keys in reverse order
for key in keys[::-1]:
win32api.keybd_event(key, 0, win32con.KEYEVENTF_KEYUP, 0)
ara.time.sleep(.1)
def change_keyboard_layout(layout):
win32api.LoadKeyboardLayout(layout, 1)
'''
def on_press(key):
global server
if type(key) == keyboard.Key:
key_code = key.value.vk
else:
assert type(key) == keyboard.KeyCode
key_code = key.vk
server.tasks.append(b'<KEY-PRESS %i>' % key_code)
#print('pressed {}'.format(key_code))
def on_release(key):
global server
if key == keyboard.Key.esc:
return False
#key_code = key.vk
#server.tasks.append(b'<KEY-RELEASE %i>' % key_code)
if not server.alive:
return False
def simple_key_press(key_code):
key = keyboard.KeyCode(key_code)
controller.press(key)
def sync_keyboard_client(client):
open('OK Flag.txt', 'w').write('1')
special_values = vars(keyboard.Key)
while client.alive and 'OK Flag.txt' in os.listdir():
if client.tasks:
msg = client.tasks[0]
client.tasks.pop(0)
else:
msg = client.bempty_flag
client.client.sendall(b'<>')
response = client.client.recv(client.max_buffer_size)
if response != client.bempty_flag:
response = response.decode('utf8')
tag = response[1:response.index(' ')]
if tag == 'KEY-PRESS':
key_code = int(response[10:-1])
simple_key_press(key_code)
#print('pressing key {}'.format(key_code))
# elif tag == 'KEY-COMBO': ...
else:
print('Unkown tag: {}'.format(tag))
def client_side():
client = ara.Spider(ip=IP_ADDR, port=PORT_NUMBER, verbose=1)
client.connect()
thread = ara.Thread(target=client.loop2)
thread.start()
thread2 = ara.Thread(target=sync_keyboard_client, args=(client,))
thread2.start()
input('stop')
thread.join()
thread2.join()
os.system('taskkill /f /pid {}'.format(ara.get_pid()))
def server_side():
global server
server = ara.Web(host=IP_ADDR, port=PORT_NUMBER, verbose=1)
server.tasks = []
server.init()
def read_keyboard_info(server):
if server.tasks:
return server.tasks.pop(0)
else:
None
'''
def send_keyboard_info(server): # send mouse info to server
global last_pos
MAX_KEY_CHUNK_SIZE = 30 # one key is defined by a group of n key, up to MAX_KEY_CHUNK_SIZE times the same key
while True:
key_strokes = get_key_strokes()
if key_strokes:
chunk_size = 0
last_key_stroke = key_strokes[0]
server.tasks.append(b'<KEY-PRESS %i>' % last_key_stroke)
print('counted {} times {} key'.format(key_strokes.count(last_key_stroke), last_key_stroke))
for key in key_strokes:
if key == last_key_stroke:
chunk_size += 1
else:
server.tasks.append(b'<KEY-PRESS %i>' % key)
last_key_stroke = key
chunk_size = 0
if chunk_size >= MAX_KEY_CHUNK_SIZE: # >= because if the next one is not last_key_stroke, the ky won't repeat. So, the key repeats only if chunk_size > MAX_KEY_CHUNK_SIZE (next iteration, if key == last_key_stroke)
chunk_size = 0
ara.time.sleep(.01)
'''
def send_keyboard_info():
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
thread = ara.Thread(target=lambda : server.start(read_f=lambda : read_keyboard_info(server)))
thread2 = ara.Thread(target=send_keyboard_info)
thread.start()
thread2.start()
input('stop')
os.system('taskkill /f /pid {}'.format(ara.get_pid()))
if __name__ == '__main__':
if sys.argv[-1] == 'server':
server_side()
elif sys.argv[-1] == 'client':
controller = keyboard.Controller()
client_side()
else:
raise ValueError('Unkown value for mouse sync role...')
|
python_cheatsheet.py
|
import sys
import math
import random
import threading
import time
from functools import reduce
# ----- INTRO -----
# Python tutorial from Derek Banas
# Python files end with the extension .py
# Print to the console
# Python statements terminate with a newline
print("Hello World")
# Accept user input and store it in a variable
# name = input("What is your name ")
# print("Hi ", name)
# If you want to extend a statement to multiple
# lines you can use parentheses or \
v1 = (
1 + 2
+ 3
)
v1 = 1 + 2 \
+ 3
# Put multiple statements on 1 line
v1 = 5;
v1 = v1 - 1
"""
Multi-line
Comment
"""
# ----- VARIABLES -----
# Variables are names assigned to values
# The 1st character must be _ or a letter
# Then you can use letters, numbers or _
# Variable names are type sensitive
v2 = 1
V2 = 2 # v1 is different from V1
# You can assign multiple variables
v3 = v4 = 20
# ----- DATA TYPES -----
# Data in Python is dynamically typed and that
# data type can change
# All data is an object which I cover later
# The basic types are integers, floats,
# complex numbers, strings, booleans
# Python doesn't have a character type
# How to get the type
print(type(10))
# There is no limit to the size of integers
# This is a way to get a practical max size
print(sys.maxsize)
# Floats are values with decimal values
# This is how to get a max float
print(sys.float_info.max)
# But, they are accurate to 15 digits
f1 = 1.1111111111111111
f2 = 1.1111111111111111
f3 = f1 + f2
print(f3)
# Complex numbers are [real part]+[imaginary part]
cn1 = 5 + 6j
# Booleans are either True or False
b1 = True
# Strings are surrounded by ' or "
str1 = "Escape Sequences \' \t \" \\ and \n"
str2 = '''Triple quoted strings can contain ' and "'''
# You can cast to different types with int, float,
# str, chr
print("Cast ", type(int(5.4))) # to int
print("Cast 2 ", type(str(5.4))) # to string
print("Cast 3 ", type(chr(97))) # to string
print("Cast 4 ", type(ord('a'))) # to int
# ----- OUTPUT -----
# You can define a separator for print
print(12, 21, 1974, sep='/')
# Eliminate newline
print("No Newline", end='')
# String formatting %e for exponent
print("\n%04d %s %.2f %c" % (1, "Derek", 1.234, 'A'))
# ----- MATH -----
print("5 + 2 =", 5 + 2)
print("5 - 2 =", 5 - 2)
print("5 * 2 =", 5 * 2)
print("5 / 2 =", 5 / 2)
print("5 % 2 =", 5 % 2)
print("5 ** 2 =", 5 ** 2)
print("5 // 2 =", 5 // 2)
# Shortcuts
i1 = 2
i1 += 1
print("i1 ", i1)
# Math Functions
print("abs(-1) ", abs(-1))
print("max(5, 4) ", max(5, 4))
print("min(5, 4) ", min(5, 4))
print("pow(2, 2) ", pow(2, 2))
print("ceil(4.5) ", math.ceil(4.5))
print("floor(4.5) ", math.floor(4.5))
print("round(4.5) ", round(4.5))
print("exp(1) ", math.exp(1)) # e**x
print("log(e) ", math.log(math.exp(1)))
print("log(100) ", math.log(100, 10)) # Base 10 Log
print("sqrt(100) ", math.sqrt(100))
print("sin(0) ", math.sin(0))
print("cos(0) ", math.cos(0))
print("tan(0) ", math.tan(0))
print("asin(0) ", math.asin(0))
print("acos(0) ", math.acos(0))
print("atan(0) ", math.atan(0))
print("sinh(0) ", math.sinh(0))
print("cosh(0) ", math.cosh(0))
print("tanh(0) ", math.tanh(0))
print("asinh(0) ", math.asinh(0))
print("acosh(pi) ", math.acosh(math.pi))
print("atanh(0) ", math.atanh(0))
print("hypot(0) ", math.hypot(10, 10)) # sqrt(x*x + y*y)
print("radians(0) ", math.radians(0))
print("degrees(pi) ", math.degrees(math.pi))
# Generate a random int
print("Random", random.randint(1, 101))
# ----- NaN & inf -----
# inf is infinity
print(math.inf > 0)
# NaN is used to represent a number that can't
# be defined
print(math.inf - math.inf)
# ----- CONDITIONALS -----
# Comparison Operators : < > <= >= == !=
# if, else & elif execute different code depending
# on conditions
age = 30
if age > 21:
# Python uses indentation to define all the
# code that executes if the above is true
print("You can drive a tractor trailer")
elif age >= 16:
print("You can drive a car")
else:
print("You can't drive")
# Make more complex conditionals with logical operators
# Logical Operators : and or not
if age < 5:
print("Stay Home")
elif (age >= 5) and (age <= 6):
print("Kindergarten")
elif (age > 6) and (age <= 17):
print("Grade %d", (age - 5))
else:
print("College")
# Ternary operator in Python
# condition_true if condition else condition_false
canVote = True if age >= 18 else False
# ----- STRINGS -----
# Raw strings ignore escape sequences
print(r"I'll be ignored \n")
# Combine strings with +
print("Hello " + "You")
# Get string length
str3 = "Hello You"
print("Length ", len(str3))
# Character at index
print("1st ", str3[0])
# Last character
print("Last ", str3[-1])
# 1st 3 chrs
print("1st 3 ", str3[0:3]) # Start, up to not including
# Get every other character
print("Every Other ", str3[0:-1:2]) # Last is a step
# You can't change an index value like this
# str3[0] = "a" because strings are immutable
# (Can't Change)
# You could do this
str3 = str3.replace("Hello", "Goodbye")
print(str3)
# You could also slice front and back and replace
# what you want to change
str3 = str3[:8] + "y" + str3[9:]
print(str3)
# Test if string in string
print("you" in str3)
# Test if not in
print("you" not in str3)
# Find first index for match or -1
print("You Index ", str3.find("you"))
# Trim white space from right and left
# also lstrip and rstrip
print(" Hello ".strip())
# Convert a list into a string and separate with
# spaces
print(" ".join(["Some", "Words"]))
# Convert string into a list with a defined separator
# or delimiter
print("A, string".split(", "))
# Formatted output with f-string
int1 = int2 = 5
print(f'{int1} + {int2} = {int1 + int2}')
# To lower and upper case
print("A String".lower())
print("A String".upper())
# Is letter or number
print("abc123".isalnum())
# Is characters
print("abc".isalpha())
# Is numbers
print("abc".isdigit())
# ----- LISTS -----
# Lists can contain mutable pieces of data of
# varying data types or even functions
l1 = [1, 3.14, "Derek", True]
# Get length
print("Length ", len(l1))
# Get value at index
print("1st", l1[0])
print("Last", l1[-1])
# Change value
l1[0] = 2
# Change multiple values
l1[2:4] = ["Bob", False]
# Insert at index without deleting
# Also l1.insert(2, "Paul")
l1[2:2] = ["Paul", 9]
# Add to end (Also l1.extend([5, 6]))
l2 = l1 + ["Egg", 4]
# Remove a value
l2.remove("Paul")
# Remove at index
l2.pop(0)
print("l2", l2)
# Add to beginning (Also l1.append([5, 6]))
l2 = ["Egg", 4] + l1
# Multidimensional list
l3 = [[1, 2], [3, 4]]
print("[1, 1]", l3[1][1])
# Does value exist
print("1 Exists", (1 in l1))
# Min & Max
print("Min ", min([1, 2, 3]))
print("Max ", max([1, 2, 3]))
# Slice out parts
print("1st 2", l1[0:2])
print("Every Other ", l1[0:-1:2])
print("Reverse ", l1[::-1])
# ----- LOOPS -----
# While : Execute while condition is True
w1 = 1
while w1 < 5:
print(w1)
w1 += 1
w2 = 0
while w2 <= 20:
if w2 % 2 == 0:
print(w2)
elif w2 == 9:
# Forces the loop to end all together
break
else:
# Shorthand for i = i + 1
w2 += 1
# Skips to the next iteration of the loop
continue
w2 += 1
# Cycle through list
l4 = [1, 3.14, "Derek", True]
while len(l4):
print(l4.pop(0))
# For Loop
# Allows you to perform an action a set number of times
# Range performs the action 10 times 0 - 9
# end="" eliminates newline
for x in range(0, 10):
print(x, ' ', end="")
print('\n')
# Cycle through list
l4 = [1, 3.14, "Derek", True]
for x in l4:
print(x)
# You can also define a list of numbers to
# cycle through
for x in [2, 4, 6]:
print(x)
# You can double up for loops to cycle through lists
num_list = [[1, 2, 3], [10, 20, 30], [100, 200, 300]]
# ----- ITERATORS -----
# You can pass an object to iter() which returns
# an iterator which allows you to cycle
l5 = [6, 9, 12]
itr = iter(l5)
print(next(itr)) # Grab next value
# ----- RANGES -----
# The range() function creates integer iterables
print(list(range(0, 5)))
# You can define step
print(list(range(0, 10, 2)))
for x in range(0, 3):
for y in range(0, 3):
print(num_list[x][y])
# ----- TUPLES -----
# Tuples are just like lists except they are
# immutable
t1 = (1, 3.14, "Derek", False)
# Get length
print("Length ", len(t1))
# Get value / values
print("1st", t1[0])
print("Last", t1[-1])
print("1st 2", t1[0:2])
print("Every Other ", t1[0:-1:2])
print("Reverse ", t1[::-1])
# Everything you can do with lists you can do with
# tuples as long as you don't change values
# ----- DICTIONARIES -----
# Dictionaries are lists of key / value pairs
# Keys and values can use any data type
# Duplicate keys aren't allowed
heroes = {
"Superman": "Clark Kent",
"Batman": "Bruce Wayne"
}
villains = dict([
("Lex Luthor", "Lex Luthor"),
("Loki", "Loki")
])
print("Length", len(heroes))
# Get value by key
# Also heroes.get("Superman")
print(heroes["Superman"])
# Add more
heroes["Flash"] = "Barry Allan"
# Change a value
heroes["Flash"] = "Barry Allen"
# Get list of tuples
print(list(heroes.items()))
# Get list of keys and values
print(list(heroes.keys()))
print(list(heroes.values()))
# Delete
del heroes["Flash"]
# Remove a key and return it
print(heroes.pop("Batman"))
# Search for key
print("Superman" in heroes)
# Cycle through a dictionary
for k in heroes:
print(k)
for v in heroes.values():
print(v)
# Formatted print with dictionary mapping
d1 = {"name": "Bread", "price": .88}
print("%(name)s costs $%(price).2f" % d1)
# ----- SETS -----
# Sets are lists that are unordered, unique
# and while values can change those values
# must be immutable
s1 = set(["Derek", 1])
s2 = {"Paul", 1}
# Size
print("Length", len(s2))
# Join sets
s3 = s1 | s2
print(s3)
# Add value
s3.add("Doug")
# Remove value
s3.discard("Derek")
# Remove random value
print("Random", s3.pop())
# Add values in s2 to s3
s3 |= s2
# Return common values (You can include multiple
# sets as attributes)
print(s1.intersection(s2))
# All unique values
print(s1.symmetric_difference(s2))
# Values in s1 but not in s2
print(s1.difference(s2))
# Clear values
s3.clear()
# Frozen sets can't be edited
s4 = frozenset(["Paul", 7])
# ----- FUNCTIONS -----
# Functions provide code reuse, organization
# and much more
# Add 2 values using 1 as default
# You can define the data type using function
# annotations
def get_sum(num1: int = 1, num2: int = 1):
return num1 + num2
print(get_sum(5, 4))
# Accept multiple values
def get_sum2(*args):
sum = 0
for arg in args:
sum += arg
return sum
print(get_sum2(1, 2, 3, 4))
# Return multiple values
def next_2(num):
return num + 1, num + 2
i1, i2 = next_2(5)
print(i1, i2)
# A function that makes a function that
# multiplies by the given value
def mult_by(num):
# You can create anonymous (unnamed functions)
# with lambda
return lambda x: x * num
print("3 * 5 =", (mult_by(3)(5)))
# Pass a function to a function
def mult_list(list, func):
for x in list:
print(func(x))
mult_by_4 = mult_by(4)
mult_list(list(range(0, 5)), mult_by_4)
# Create list of functions
power_list = [lambda x: x ** 2,
lambda x: x ** 3,
lambda x: x ** 4]
# ----- MAP -----
# Map is used to execute a function on a list
one_to_4 = range(1, 5)
times2 = lambda x: x * 2
print(list(map(times2, one_to_4)))
# ----- FILTER -----
# Filter selects items based on a function
# Print out the even values from a list
print(list(filter((lambda x: x % 2 == 0), range(1, 11))))
# ----- REDUCE -----
# Reduce receives a list and returns a single
# result
# Add up the values in a list
print(reduce((lambda x, y: x + y), range(1, 6)))
# ----- EXCEPTION HANDLING -----
# You can handle errors that would otherwise
# crash your program
# By giving the while a value of True it will
# cycle until a break is reached
while True:
# If we expect an error can occur surround
# potential error with try
try:
number = int(input("Please enter a number : "))
break
# The code in the except block provides
# an error message to set things right
# We can either target a specific error
# like ValueError
except ValueError:
print("You didn't enter a number")
# We can target all other errors with a
# default
except:
print("An unknown error occurred")
print("Thank you for entering a number")
# ----- FILE IO -----
# We can save and read data from files
# We start the code with with which guarantees
# the file will be closed if the program crashes
# mode w overwrites file
# mode a appends
# File Operators:
# 'r' - read file
# 'w' - write file
# 'a' - append file
with open("mydata.txt", mode="w", encoding="utf-8") \
as myFile:
# You can write to the file with write
# It doesn't add a newline
myFile.write("Some random text\nMore random text\nAnd some more")
# Open a file for reading
with open("mydata.txt", encoding="utf-8") as myFile:
# Use read() to get everything at once
print(myFile.read())
# Find out if the file is closed
print(myFile.closed)
# using with operator
# reading from text file all lines with auto closing using with
with open("test.txt") as f:
lines = f.readlines()
# reading a complete text file content to a string
with open("test.txt") as f:
lines = f.read()
f.close()
# get line by line of the text file to a string variable
with open("test.txt") as f:
for line in f:
print(line)
# write a list to a file
lines = ['Readme', 'How to write text files in Python']
with open("test2.txt", 'w') as f:
f.write('\n'.join(lines))
# note: '\n'.join(lines) handles the new line break in the file
# ----- CLASSES OBJECTS -----
# Real world objects have
# attributes : height, weight
# capabilities : run, eat
# Classes are blueprints for creating objects
class Square:
# init is used to set values for each Square
def __init__(self, height="0", width="0"):
self.height = height
self.width = width
# This is the getter
# self is used to refer to an object that
# we don't possess a name for
@property
def height(self):
print("Retrieving the height")
# Put a __ before this private field
return self.__height
# This is the setter
@height.setter
def height(self, value):
# We protect the height from receiving
# a bad value
if value.isdigit():
# Put a __ before this private field
self.__height = value
else:
print("Please only enter numbers for height")
# This is the getter
@property
def width(self):
print("Retrieving the width")
return self.__width
# This is the setter
@width.setter
def width(self, value):
if value.isdigit():
self.__width = value
else:
print("Please only enter numbers for width")
def get_area(self):
return int(self.__width) * int(self.__height)
# Create a Square object
square = Square()
square.height = "10"
square.width = "10"
print("Area", square.get_area())
# ----- INHERITANCE & POLYMORPHISM-----
# When a class inherits from another it gets all
# its fields and methods and can change as needed
class Animal:
def __init__(self, name="unknown", weight=0):
self.__name = name
self.__weight = weight
@property
def name(self, name):
self.__name = name
def make_noise(self):
return "Grrrrr"
# Used to cast to a string type
def __str__(self):
return "{} is a {} and says {}".format (self.__name, type(self).__name__, self.make_noise())
# Magic methods are used for operator
# overloading
# Here I'll define how to evaluate greater
# than between 2 Animal objects
def __gt__(self, animal2):
if self.__weight > animal2.__weight:
return True
else:
return False
# Other Magic Methods
# __eq__ : Equal
# __ne__ : Not Equal
# __lt__ : Less Than
# __gt__ : Greater Than
# __le__ : Less Than or Equal
# __ge__ : Greater Than or Equal
# __add__ : Addition
# __sub__ : Subtraction
# __mul__ : Multiplication
# __div__ : Division
# __mod__ : Modulus
# Dog inherits everything from Animal
class Dog(Animal):
def __init__(self, name="unknown", owner="unknown", weight=0):
# Have the super class handle initializing
Animal.__init__(self, name, weight)
self.__owner = owner
# Overwrite str
def __str__(self):
# How to call super class methods
return super().__str__() + " and is owned by " + \
self.__owner
animal = Animal("Spot", 100)
print(animal)
dog = Dog("Bowser", "Bob", 150)
print(dog)
# Test the magic method
print(animal > dog)
# Polymorphism in Python works differently from
# other languages in that functions accept any
# object and expect that object to provide the
# needed method
# This isn't something to dwell on. Just know
# that if you call on a method for an object
# that the method just needs to exist for
# that object to work.
# ----- THREADS -----
# Threads are blocks of code that takes turns
# executing
def execute_thread(i):
# strftime or string formatted time allows you to
# define how the time is displayed.
# You could include the date with
# strftime("%Y-%m-%d %H:%M:%S", gmtime())
# Print when the thread went to sleep
print("Thread {} sleeps at {}".format(i,
time.strftime("%H:%M:%S", time.gmtime())))
# Generate a random sleep period of between 1 and
# 5 seconds
rand_sleep_time = random.randint(1, 5)
# Pauses execution of code in this function for
# a few seconds
time.sleep(rand_sleep_time)
# Print out info after the sleep time
print("Thread {} stops sleeping at {}".format(i,
time.strftime("%H:%M:%S", time.gmtime())))
for i in range(10):
# Each time through the loop a Thread object is created
# You pass it the function to execute and any
# arguments to pass to that method
# The arguments passed must be a sequence which
# is why we need the comma with 1 argument
thread = threading.Thread(target=execute_thread, args=(i,))
thread.start()
# Display active threads
# The extra 1 is this for loop executing in the main
# thread
print("Active Threads :", threading.activeCount())
# Returns a list of all active thread objects
print("Thread Objects :", threading.enumerate())
# Regular expressions allow you to locate and change
# strings in very powerful ways.
# They work in almost exactly the same way in every
# programming language as well.
# ----- REGEX (Regular Expressions) -------
# Regular Expressions (Regex) are used to
# 1. Search for a specific string in a large amount of data
# 2. Verify that a string has the proper format (Email, Phone #)
# 3. Find a string and replace it with another string
# 4. Format data into the proper form for importing for example
# import the Regex module
import re
# ---------- Was a Match Found ----------
# Search for ape in the string
if re.search("ape", "The ape was at the apex"):
print("There is an ape")
# ---------- Get All Matches ----------
# findall() returns a list of matches
# . is used to match any 1 character or space
allApes = re.findall("ape.", "The ape was at the apex")
for i in allApes:
print(i)
# finditer returns an iterator of matching objects
# You can use span to get the location
theStr = "The ape was at the apex"
for i in re.finditer("ape.", theStr):
# Span returns a tuple
locTuple = i.span()
print(locTuple)
# Slice the match out using the tuple values
print(theStr[locTuple[0]:locTuple[1]])
# Here I'll show you how to work with SQLite databases
# in Python
# A database makes it easy for you to organize your
# data for storage and fast searching
import sqlite3
import sys
import csv
def printDB():
# To retrieve data from a table use SELECT followed
# by the items to retrieve and the table to
# retrieve from
try:
result = theCursor.execute("SELECT id, FName, LName, Age, Address, Salary, HireDate FROM Employees")
# You receive a list of lists that hold the result
for row in result:
print("ID :", row[0])
print("FName :", row[1])
print("LName :", row[2])
print("Age :", row[3])
print("Address :", row[4])
print("Salary :", row[5])
print("HireDate :", row[6])
except sqlite3.OperationalError:
print("The Table Doesn't Exist")
except:
print("Couldn't Retrieve Data From Database")
# ---------- END OF FUNCTIONS ----------
# connect() will open an SQLite database, or if it
# doesn't exist it will create it
# The file appears in the same directory as this
# Python file
db_conn = sqlite3.connect('test.db')
print("Database Created")
# A cursor is used to traverse the records of a result
theCursor = db_conn.cursor()
# execute() executes a SQL command
# We organize our data in tables by defining their
# name and the data type for the data
# We define the table name
# A primary key is a unique value that differentiates
# each row of data in our table
# The primary key will auto increment each time we
# add a new Employee
# If a piece of data is marked as NOT NULL, that means
# it must have a value to be valid
# NULL is NULL and stands in for no value
# INTEGER is an integer
# TEXT is a string of variable length
# REAL is a float
# BLOB is used to store binary data
# You can delete a table if it exists like this
# db_conn.execute("DROP TABLE IF EXISTS Employees")
# db_conn.commit()
try:
db_conn.execute("CREATE TABLE Employees(ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, FName TEXT NOT NULL, LName TEXT NOT NULL, Age INT NOT NULL, Address TEXT, Salary REAL, HireDate TEXT);")
db_conn.commit()
print("Table Created")
except sqlite3.OperationalError:
print("Table couldn't be Created")
# To insert data into a table we use INSERT INTO
# followed by the table name and the item name
# and the data to assign to those items
db_conn.execute("INSERT INTO Employees (FName, LName, Age, Address, Salary, HireDate)"
"VALUES ('Derek', 'Banas', 41, '123 Main St', '500,000', date('now'))")
db_conn.commit()
print("Employee Entered")
# Print out all the data in the database
printDB()
# Closes the database connection
db_conn.close()
# ——- myfunc.py ——-
# ---------- RECURSIVE FUNCTIONS ----------
# A function that refers to itself is a recursive function
# Calculating factorials is commonly done with a recursive
# function 3! = 3 * 2 * 1
def factorial(num):
# Every recursive function must contain a condition
# when it ceases to call itself
if num <= 1:
return 1
else:
result = num * factorial(num - 1)
return result
# 1st : result = 4 * factorial(3) = 4 * 6 = 24
# 2nd : result = 3 * factorial(2) = 3 * 2 = 6
# 3rd : result = 2 * factorial(1) = 2 * 1 = 2
# ——— MODULES ———
import myfunc
print(myfunc.factorial(4))
# OR
from myfunc import factorial
print(factorial(4))
# ——— GUI DEVELOPMENT WITH TKINTER ———
from tkinter import *
from tkinter import ttk
class Calculator:
# Stores the current value to display in the entry
calc_value = 0.0
# Will define if this was the last math button clicked
div_trigger = False
mult_trigger = False
add_trigger = False
sub_trigger = False
# Called anytime a number button is pressed
def button_press(self, value):
# Get the current value in the entry
entry_val = self.number_entry.get()
# Put the new value to the right of it
# If it was 1 and 2 is pressed it is now 12
# Otherwise the new number goes on the left
entry_val += value
# Clear the entry box
self.number_entry.delete(0, "end")
# Insert the new value going from left to right
self.number_entry.insert(0, entry_val)
# Returns True or False if the string is a float
def isfloat(self, str_val):
try:
# If the string isn't a float float() will throw a
# ValueError
float(str_val)
# If there is a value you want to return use return
return True
except ValueError:
return False
# Handles logic when math buttons are pressed
def math_button_press(self, value):
# Only do anything if entry currently contains a number
if self.isfloat(str(self.number_entry.get())):
# make false to cancel out previous math button click
self.add_trigger = False
self.sub_trigger = False
self.mult_trigger = False
self.div_trigger = False
# Get the value out of the entry box for the calculation
self.calc_value = float(self.entry_value.get())
# Set the math button click so when equals is clicked
# that function knows what calculation to use
if value == "/":
print("/ Pressed")
self.div_trigger = True
elif value == "*":
print("* Pressed")
self.mult_trigger = True
elif value == "+":
print("+ Pressed")
self.add_trigger = True
else:
print("- Pressed")
self.sub_trigger = True
# Clear the entry box
self.number_entry.delete(0, "end")
# Performs a mathematical operation by taking the value before
# the math button is clicked and the current value. Then perform
# the right calculation by checking what math button was clicked
# last
def equal_button_press(self):
# Make sure a math button was clicked
if self.add_trigger or self.sub_trigger or self.mult_trigger or self.div_trigger:
if self.add_trigger:
solution = self.calc_value + float(self.entry_value.get())
elif self.sub_trigger:
solution = self.calc_value - float(self.entry_value.get())
elif self.mult_trigger:
solution = self.calc_value * float(self.entry_value.get())
else:
solution = self.calc_value / float(self.entry_value.get())
print(self.calc_value, " ", float(self.entry_value.get())," ", solution)
# Clear the entry box
self.number_entry.delete(0, "end")
self.number_entry.insert(0, solution)
def __init__(self, root):
# Will hold the changing value stored in the entry
self.entry_value = StringVar(root, value="")
# Define title for the app
root.title("Calculator")
# Defines the width and height of the window
root.geometry("430x220")
# Block resizing of Window
root.resizable(width=False, height=False)
# Customize the styling for the buttons and entry
style = ttk.Style()
style.configure("TButton",
font="Serif 15",
padding=10)
style.configure("TEntry",
font="Serif 18",padding=10)
# Create the text entry box
self.number_entry = ttk.Entry(root,
textvariable=self.entry_value, width=50)
self.number_entry.grid(row=0, columnspan=4)
# ----- 1st Row -----
self.button7 = ttk.Button(root, text="7", command=lambda: self.button_press('7')).grid(row=1, column=0)
self.button8 = ttk.Button(root, text="8", command=lambda: self.button_press('8')).grid(row=1, column=1)
self.button9 = ttk.Button(root, text="9", command=lambda: self.button_press('9')).grid(row=1, column=2)
self.button_div = ttk.Button(root, text="/", command=lambda: self.math_button_press('/')).grid(row=1, column=3)
# ----- 2nd Row -----
self.button4 = ttk.Button(root, text="4", command=lambda: self.button_press('4')).grid(row=2, column=0)
self.button5 = ttk.Button(root, text="5", command=lambda: self.button_press('5')).grid(row=2, column=1)
self.button6 = ttk.Button(root, text="6", command=lambda: self.button_press('6')).grid(row=2, column=2)
self.button_mult = ttk.Button(root, text="*", command=lambda: self.math_button_press('*')).grid(row=2, column=3)
# ----- 3rd Row -----
self.button1 = ttk.Button(root, text="1", command=lambda: self.button_press('1')).grid(row=3, column=0)
self.button2 = ttk.Button(root, text="2", command=lambda: self.button_press('2')).grid(row=3, column=1)
self.button3 = ttk.Button(root, text="3", command=lambda: self.button_press('3')).grid(row=3, column=2)
self.button_add = ttk.Button(root, text="+", command=lambda: self.math_button_press('+')).grid(row=3, column=3)
# ----- 4th Row -----
self.button_clear = ttk.Button(root, text="AC", command=lambda: self.button_press('AC')).grid(row=4, column=0)
self.button0 = ttk.Button(root, text="0", command=lambda: self.button_press('0')).grid(row=4, column=1)
self.button_equal = ttk.Button(root, text="=", command=lambda: self.equal_button_press()).grid(row=4, column=2)
self.button_sub = ttk.Button(root, text="-", command=lambda: self.math_button_press('-')).grid(row=4, column=3)
# Get the root window object
root = Tk()
# Create the calculator
calc = Calculator(root)
# Run the app until exited
root.mainloop()
# ---- DATE TIME ------
from datetime import date
today = date.today()
# dd/mm/YY
d1 = today.strftime("%d/%m/%Y")
print("d1 =", d1)
# Textual month, day and year
d2 = today.strftime("%B %d, %Y")
print("d2 =", d2)
# mm/dd/y
d3 = today.strftime("%m/%d/%y")
print("d3 =", d3)
# Month abbreviation, day and year
d4 = today.strftime("%b-%d-%Y")
print("d4 =", d4)
from datetime import datetime
# datetime object containing current date and time
now = datetime.now()
print("now =", now)
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print("date and time =", dt_string)
# ---- ZIP --------
import zipfile, os
from pathlib import Path
# archiving data to a new zip archive
myPath = Path("../") / "from"
print(myPath)
# use "w" for new and "a" to append to a zip archive
newZip = zipfile.ZipFile(Path("/Users/stephan_wink/Downloads") / "example.zip", "w")
for folderName, subfolders, filenames in os.walk(Path.cwd()):
print(f"The current folder is: {folderName}")
for filename in filenames:
print(f"Zipping file: {filename}")
newZip.write(filename, compress_type=zipfile.ZIP_DEFLATED)
newZip.close()
# archive information
print(newZip.namelist())
fileName = newZip.namelist()[0]
# file information
info = newZip.getinfo(fileName)
print(info.file_size) # original file size
print(info.compress_size) # compressed file size
print(info.compress_type) # compression type
# ----- ARG Parse -----
# commandline argument parsing
# example 1
import argparse
parser = argparse.ArgumentParser()
parser.parse_args()
#this results in the following:
#$ python3 prog.py --help
#usage: prog.py [-h]
#options:
# -h, --help show this help message and exit
#---------------------------------------------------------------
# example 2
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("echo", help="echo the string you use here")
args = parser.parse_args()
print(args.echo)
#this results in the following:
#$ python3 prog.py
#usage: prog.py [-h] echo
#prog.py: error: the following arguments are required: echo
#$ python3 prog.py --help
#usage: prog.py [-h] echo
#positional arguments:
# echo echo the string you use here
#options:
# -h, --help show this help message and exit
#$ python3 prog.py foo
#foo
#---------------------------------------------------------------
# example 3
import argparse
parser = argparse.ArgumentParser(description = "this is the description of the program")
parser.add_argument("-e", "--emul", help="emulation mode active", action="store_true")
parser.add_argument("-i", "--input", type=str, help="input file")
parser.add_argument("-o", "--output", type=str, help="output file")
parser.add_argument("-t", "--target", type=str, help="target selection")
args = parser.parse_args()
if args.emul:
print("emulation mode on")
if args.input:
print(args.input)
if args.output:
print(args.output)
if args.target:
print(args.target)
#this results in the following output:
#stephan_wink@winkste-mbp scripts % python3 cc_argparse.py -e -i "c:\input" -o "c:\output" -t "x86"
#emulation mode on
#c:\input
#c:\output
#x86
# --- Path / Pathlib / OS -----
# special directories
# "./" -> current directory
# "../" -> one folder up (parent directory)
from pathlib import Path
# connect pathes with the "/" operator
p = Path('spam') / 'bacon' / 'egg'
# Note: one item has to be of class Path
# How to join a path structure always OS system conform
var = os.path.join(os.getcwd(), 'temp', 'file.txt')
print(var)
#T his can also be splitted again
print(os.path.split(var))
# getting the current working directory
Path.cwd()
# getting the home directory
Path.home()
# items of a path
p = Path.cwd() / "test" / "spam.c"
print(f"Path: {p}")
print(f"Anchor: {p.anchor}")
print(f"Parent: {p.parent}")
print(f"Parents: {p.parents[0]}, {p.parents[1]}")
print(f"Stem: {p.stem}")
print(f"Suffix: {p.suffix}")
print(f"Drive: {p.drive}")
# glob function to search in folders, search can use
# * for any, or ? for one character placeholder: *.?x?
# matches .exe, .txt
l = list(Path.cwd().glob('*.py'))
# checking if path or files exist:
p = Path.cwd() / "test" / "spam.c"
p.exists() # true if path exists
p.is_file() # true if exists and is file
p.is_dir() # true if exists and is folder
import os
# changing the working directory
os.chdir("../")
# create a new directory
os.mkdir("./test")
# Note: generates "FileExistsError" exception if the file already
# exists.
# check that a path is absolute
Path.cwd().is_absolute() # == True
Path("../").is_absolute() # == False
# separation of path in dir and file(base)
print(f"OS Base: {os.path.basename(p)}")
print(f"Dir: {os.path.dirname(p)}")
# separation of dir and file in a tuple
os.path.split(p)
# the os related separator
print(os.sep)
# list all elements in a folder
os.listdir(Path.pwd()) # here the current folder
# this can be used to iterate it
# get the size of a file
os.path.getsize(Path.pwd() / "file.txt")
# Calculating the size of a folder
print(os.path.getsize(os.getcwd()))
# --- SHUTIL ----
import shutil, os
from pathlib import Path
# copy one file to another location and renames it
p = Path.cwd()
src = Path("../") / "from" / "test.txt"
dest = Path("../") / "to" / "copytest2.txt"
shutil.copy(src, dest)
# moves the file to another location and renames it
shutil.move(src, dest)
# copy a complete folder to a different location
p = Path.cwd()
src = Path("../") / "from"
dest = Path("../") / "to" / "copytest_backup"
shutil.copytree(src, dest)
# methods for PERMANENTLY deleting files and folders
os.unlink(src) # will delete the file src
os.rmdir(src) # will delete the folder at src, folder must be empty
shutil.rmtree(src) # will remove the folder and all files
# --- send2trash ----
# delete files
# send files to trash using send2trash
import send2trash
p = Path.cwd()
src = Path("../") / "from" / "test.txt"
send2trash.send2trash(src)
# walking a folder and its subfolders with os.walk function
import os
from pathlib import Path
src = Path.cwd()
for folderName, subfolders, filenames in os.walk(src):
print(f"The current folder is: {folderName}")
for subfolder in subfolders:
print(f"SUBFOLDER OF {folderName}: {subfolder}")
for filename in filenames:
print(f"FILE INSIDE {folderName}: {filename}")
print("")
# ---- SHELVE ------
# import shelve from standard library
import shelve
# using the with operator
with shelve.open('test') as db:
db['color'] = 'blue, no yelloowwwww!'
# deleting a key and data from a shelve file
with shelve.open('test') as db:
del db['color']
# deleting all keys in a shelve file
with shelve.open('test') as db:
db.clear()
# open a new or existing shelve file:
sf = shelve.open("test")
# add any parameter with a key to the shelve file
sf["l"] = ["one", "two", "three"]
# list all keys stored in a shelve file
list(sf.keys())
# read parameter from shelve file using the key:
lr= sf["l"]
# close the shelve file
sf.close()
# --- pretty print / pprint ----
# import the pretty print module
import pprint
# define the data you want to make available as python file
ml = [{'name:': "Tim"}, {'name:': "Tobi"}]
# open the new file
f = open("test.py", 'w')
# write the data to the python file using pprint pformat function
f.write("names = " + pprint.pformat(ml) + '\n')
# close the python file
f.close()
# --- pyinputplus / improved user input ---
#pip install pyinputplus
import pyinputplus as pyip
# input for a string value
help(pyip.inputStr)
resp = pyip.inputStr("Enter string: ")
#input for a number value
help(pyip.inputNum)
resp = pyip.inputNum("Enter number: ")
#check range, accepted entries: 4,5
resp = pyip.inputNum("Enter number: ", min=4, lessThan=6)
#check range, accepted entries: 4,5
resp = pyip.inputNum("Enter number: ", max=5, greaterThan=3)
#for input as optional, use argument blank=True
resp = pyip.inputNum("Enter number: ", blank=True)
# limit, timeout and default
resp = pyip.inputNum(">", min=3, limit=2, default=3)
# without "default", an exception will be raised!
resp = pyip.inputNum(">", min=3, timeout=2, default=3)
#input for a password value
help(pyip.inputPassword)
resp = pyip.inputPassword("Enter PWD: ")
#input for a password value
help(pyip.inputPassword)
resp = pyip.inputPassword("Enter PWD: ")
#guided inputs thru menu or choice
resp = pyip.inputMenu(['cat', 'dog', 'moose'], numbered=True)
resp = pyip.inputChoice(['cat', 'dog', 'moose'])
#yes,no decision check
response = pyip.inputYesNo('Do you want to erase (y/n)? ')
# clipboard with pyperclip
import pyperclip
# copy to clipboard
pyperclip.copy('The text to be copied to the clipboard.')
# get from clipboard
var = pyperclip.paste()
# --- CALL/EXECUTE/RUN other programs from python -----
import os
# ussing the os.system function, this has got several limitations
os.system("python scripts/zipper.py")
# using the os.popen function
dir = os.popen("ls").readlines()
print(dir)
# using the subprocess function
import subprocess
x = subprocess.Popen(['touch', 'xyz'])
print(x) # <subprocess.Popen object at 0x7f29882f6f50>
x.poll()
print(x.returncode)
# pipe the output of the process
process = subprocess.Popen(['ls','-l'], stdout=subprocess.PIPE)
print(process.stdout.read())
# wait for the subprocess to finish
process.wait()
|
plot_from_pp_geop_height_pot_temp_and_wind_diff_mean_state.py
|
"""
Load pp, plot and save
8km difference
"""
import os, sys
#%matplotlib inline
#%pylab inline
import matplotlib
#matplotlib.use('Agg')
# Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
#from matplotlib import figure
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.unit as unit
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
from dateutil import tz
#import multiprocessing as mp
import gc
import types
import scipy.interpolate
import pdb
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py')
model_name_convert_legend = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_legend.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/unrotate_pole.py')
pp_file_contourf = 'temp_on_p_levs_mean'
pp_file_contour ='408_on_p_levs_mean'
plot_diag='temp'
#plot_diags=['sp_hum']
plot_levels = [925]
#experiment_ids = ['dkmbq', 'dklyu']
experiment_ids = ['djzny', 'djznw', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
#Experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
#experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklwu', 'dklzq', 'dkbhu',] # All 12
#experiment_ids = ['dkbhu', 'dkjxq']
#experiment_ids = ['dkmbq', 'dklyu', 'djznw', 'djzny', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
#experiment_ids = ['dklyu, dkmgw']
experiment_ids = ['dkmgw', 'dklyu']
#experiment_ids = ['dklyu']
diff_id='dkmbq'
#min_contour = 0
#max_contour = 3
#tick_interval=0.3
#clevs = np.linspace(min_contour, max_contour,64)
#cmap=cm.s3pcpn_l
cmap = plt.cm.RdBu_r
#ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
pp_file_path = '/nfs/a90/eepdw/Data/EMBRACE/'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
from iris.coord_categorisation import add_categorised_coord
# def add_hour_of_day(cube, coord, name='hour'):
# add_categorised_coord(cube, name, coord,
# lambda coord, x: coord.units.num2date(x).hour)
figprops = dict(figsize=(8,8), dpi=100)
#cmap=cm.s3pcpn_l
un = unit.Unit('hours since 1970-01-01 00:00:00',calendar='gregorian')
dx, dy = 10, 10
divisor=10 # for lat/lon rounding
lon_high = 101.866
lon_low = 64.115
lat_high = 33.
lat_low =-6.79
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
def main():
for p_level in plot_levels:
# Set pressure height contour min/max
if p_level == 925:
clev_min = -72.
clev_max = 72.
elif p_level == 850:
clev_min = -72.
clev_max = 72.
elif p_level == 700:
clev_min = -72.
clev_max = 72.
elif p_level == 500:
clev_min = -72.
clev_max = 72.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p_level == 925:
clevpt_min = -3.
clevpt_max = 3.
elif p_level == 850:
clevpt_min = -3.
clevpt_max = 3.
elif p_level == 700:
clevpt_min = -3.
clevpt_max = 3.
elif p_level == 500:
clevpt_min = -3.
clevpt_max = 3.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p_level == 925:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 850:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 700:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 500:
clevsh_min = -0.0025
clevsh_max = 0.0025
else:
print 'Specific humidity min/max not set for this pressure level'
#clevs_col = np.arange(clev_min, clev_max)
clevs_lin = np.arange(clev_min, clev_max, 4.)
p_level_constraint = iris.Constraint(pressure=p_level)
#for plot_diag in plot_diags:
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
diffmin1 = diff_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_%s.pp' % (expmin1, experiment_id, experiment_id, pp_file_contourf)
pfile_diff = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_%s.pp' % (diffmin1, diff_id, diff_id, pp_file_contourf)
pcube_contourf = iris.load_cube(pfile, p_level_constraint)
#pcube_contourf=iris.analysis.maths.multiply(pcube_contourf,3600)
pcube_contourf_diff = iris.load_cube(pfile_diff, p_level_constraint)
#pcube_contourf_diff=iris.analysis.maths.multiply(pcube_contourf_diff,3600)
#pdb.set_trace()
height_pp_file = '%s_%s.pp' % (experiment_id, pp_file_contour)
height_pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, height_pp_file)
height_pp_file_diff = '%s_%s.pp' % (diff_id, pp_file_contour)
height_pfile_diff = '%s%s/%s/%s' % (pp_file_path, diffmin1, diff_id, height_pp_file_diff)
pcube_contour = iris.load_cube(height_pfile, p_level_constraint)
pcube_contour_diff = iris.load_cube(height_pfile_diff, p_level_constraint)
#pdb.set_trace()
pcube_contourf=pcube_contourf-pcube_contourf_diff
pcube_contour=pcube_contour-pcube_contour_diff
del pcube_contourf_diff, pcube_contour_diff
#pdb.set_trace()
#time_coords = pcube_contourf.coord('time')
#iris.coord_categorisation.add_day_of_year(pcube_contourf, time_coords, name='day_of_year')
#time_coords = pcube_contour.coord('time')
#iris.coord_categorisation.add_day_of_year(pcube_contour, time_coords, name='day_of_year')
fu = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_30201_mean.pp' \
% (expmin1, experiment_id, experiment_id)
fu_diff = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_30201_mean.pp' \
% (diffmin1, diff_id, diff_id)
#pdb.set_trace()
u_wind,v_wind = iris.load(fu, p_level_constraint)
u_wind_diff,v_wind_diff = iris.load(fu_diff, p_level_constraint)
u_wind = u_wind - u_wind_diff
v_wind = v_wind - v_wind_diff
del u_wind_diff, v_wind_diff
for t, time_cube in enumerate(pcube_contourf.slices(['grid_latitude', 'grid_longitude'])):
#pdb.set_trace()
#height_cube_slice = pcube_contour.extract(iris.Constraint(day_of_year=time_cube.coord('day_of_year').points))
height_cube_slice = pcube_contour
u_wind_slice = u_wind
v_wind_slice = v_wind
#pdb.set_trace()
# Get time of averagesfor plot title
#h = un.num2date(np.array(time_cube.coord('time').points, dtype=float)[0]).strftime('%d%b')
#Convert to India time
# from_zone = tz.gettz('UTC')
# to_zone = tz.gettz('Asia/Kolkata')
# h_utc = un.num2date(np.array(time_cube.coord('day_of_year').points, dtype=float)[0]).replace(tzinfo=from_zone)
# h_local = h_utc.astimezone(to_zone).strftime('%H%M')
### Winds
cs_w = u_wind_slice.coord_system('CoordSystem')
lat_w = u_wind_slice.coord('grid_latitude').points
lon_w = u_wind_slice.coord('grid_longitude').points
lons_w, lats_w = np.meshgrid(lon_w, lat_w)
lons_w,lats_w = iris.analysis.cartography.unrotate_pole(lons_w,lats_w, cs_w.grid_north_pole_longitude, cs_w.grid_north_pole_latitude)
lon_w=lons_w[0]
lat_w=lats_w[:,0]
### Regrid winds to 2 degree spacing
lat_wind_1deg = np.arange(lat_low,lat_high, 2)
lon_wind_1deg = np.arange(lon_low,lon_high, 2)
#pdb.set_trace()
lons_wi, lats_wi = np.meshgrid(lon_wind_1deg, lat_wind_1deg)
fl_la_lo = (lats_w.flatten(),lons_w.flatten())
p_levs = u_wind_slice.coord('pressure').points
sc = np.searchsorted(p_levs, p_level)
u = scipy.interpolate.griddata(fl_la_lo, u_wind_slice.data.flatten(), (lats_wi, lons_wi), method='linear')
v = scipy.interpolate.griddata(fl_la_lo, v_wind_slice.data.flatten(), (lats_wi, lons_wi), method='linear')
################################### # PLOT ##############################################
fig = plt.figure(**figprops)
#cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high, rsphere = 6371229)
#pdb.set_trace()
# lat = pcube_contourf.coord('grid_latitude').points
# lon = pcube_contourf.coord('grid_longitude').points
# cs = cube.coord_system('CoordSystem')
# lons, lats = np.meshgrid(lon, lat)
# lons, lats = iris.analysis.cartography.unrotate_pole\
# (lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
# x,y = m(lons,lats)
#x_w,y_w = m(lons_wi, lats_wi)
if plot_diag=='temp':
min_contour = clevpt_min
max_contour = clevpt_max
cb_label='K'
main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), potential temperature (colours),\
and wind (vectors)'
tick_interval=2
clev_number=max_contour-min_contour+1
elif plot_diag=='sp_hum':
min_contour = clevsh_min
max_contour = clevsh_max
cb_label='kg/kg'
main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), specific humidity (colours),\
and wind (vectors)'
tick_interval=0.002
clev_number=max_contour-min_contour+0.001
clevs = np.linspace(min_contour, max_contour, clev_number)
clevs = np.linspace(min_contour, max_contour, 32)
#clevs=np.linspace(-10.,10.,32)
# #clevs = np.linspace(-3, 3, 32)
# cont = plt.contourf(x,y,time_cube.data, clevs, cmap=cmap, extend='both')
#cont = iplt.contourf(time_cube, clevs, cmap=cmap, extend='both')
lat = time_cube.coord('grid_latitude').points
lon = time_cube.coord('grid_longitude').points
lons, lats = np.meshgrid(lon, lat)
cs = time_cube.coord_system('CoordSystem')
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
cont = plt.contourf(lons, lats, time_cube.data, clevs, cmap=cmap, extend='both')
#pdb.set_trace()
cs_lin = plt.contour(lons, lats, height_cube_slice.data, clevs_lin,colors='#262626',linewidths=1.)
plt.clabel(cs_lin, fontsize=14, fmt='%d', color='black')
#del time_cube
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
x_w,y_w = m(lons_wi, lats_wi)
wind = m.quiver(x_w,y_w, u, v,scale=75, color='#262626' )
qk = plt.quiverkey(wind, 0.1, 0.1, 1, '5 m/s', labelpos='W')
cbar = fig.colorbar(cont, orientation='horizontal', pad=0.05, extend='both')
cbar.set_label('%s' % cb_label, fontsize=10, color='#262626')
cbar.set_label(time_cube.units, fontsize=10, color='#262626')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['${%.1f}$' % i for i in ticks])
cbar.ax.tick_params(labelsize=10, color='#262626')
#main_title='Mean Rainfall for EMBRACE Period -%s UTC (%s IST)' % (h, h_local)
#main_title=time_cube.standard_name.title().replace('_',' ')
#model_info = re.sub(r'[(\']', ' ', model_info)
#model_info = re.sub(r'[\',)]', ' ', model_info)
#print model_info
file_save_name = '%s_minus_%s_%s_and_%s_%s_hPa_geop_height_and_wind' \
% (experiment_id, diff_id, pp_file_contour, pp_file_contourf, p_level)
save_dir = '%s%s/%s_and_%s' % (save_path, experiment_id, pp_file_contour, pp_file_contourf)
if not os.path.exists('%s' % save_dir): os.makedirs('%s' % (save_dir))
#plt.show()
plt.title('%s-%s' % (str(model_name_convert_legend.main(experiment_id)), str(model_name_convert_legend.main(diff_id))))
fig.savefig('%s/%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
#fig.savefig('%s/%s_short_title.png' % (save_dir, file_save_name) , format='png', bbox_inches='tight')
#plt.show()
#model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
#plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#fig.savefig('%s/%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
fig.clf()
plt.close()
#del time_cube
gc.collect()
if __name__ == '__main__':
main()
#proc=mp.Process(target=worker)
#proc.daemon=True
#proc.start()
#proc.join()
|
parallel.py
|
# Parallel implementation for sampling a multi-order echelle spectrum. Because the likelihood
# calculation is independent for each order, the runtime is essentially constant regardless
# of how large a spectral range is used.
# Additionally, one could use this to fit multiple stars at once.
import argparse
parser = argparse.ArgumentParser(prog="parallel.py", description="Run Starfish"
" fitting model in parallel.")
parser.add_argument("input", help="*.yaml file specifying parameters.")
parser.add_argument("-r", "--run_index", help="Which run (of those running "
"concurrently) is this? All data will be written into this directory, "
"overwriting any that exists.")
parser.add_argument("-p", "--perturb", type=float, help="Randomly perturb the "
"starting position of the chain, as a multiple of the jump parameters.")
args = parser.parse_args()
from multiprocessing import Process, Pipe
import os
import numpy as np
from Starfish.model import StellarSampler, NuisanceSampler
from Starfish.spectrum import DataSpectrum, Mask, ChebyshevSpectrum
from Starfish.grid_tools import SPEX, TRES
from Starfish.emulator import Emulator
import Starfish.constants as C
from Starfish.covariance import get_dense_C, make_k_func
from scipy.special import j1
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.linalg import cho_factor, cho_solve
from numpy.linalg import slogdet
from astropy.stats.funcs import sigma_clip
import gc
import logging
from itertools import chain
from collections import deque
from operator import itemgetter
import yaml
import shutil
f = open(args.input)
config = yaml.load(f)
f.close()
outdir = config['outdir']
name = config['name']
base = outdir + name + "run{:0>2}/"
# This code is necessary for multiple simultaneous runs on odyssey
# so that different runs do not write into the same output directory
if args.run_index == None:
run_index = 0
while os.path.exists(base.format(run_index)) and (run_index < 40):
print(base.format(run_index), "exists")
run_index += 1
outdir = base.format(run_index)
else:
run_index = args.run_index
outdir = base.format(run_index)
#Delete this outdir, if it exists
if os.path.exists(outdir):
print("Deleting", outdir)
shutil.rmtree(outdir)
print("Creating ", outdir)
os.makedirs(outdir)
# Determine how many filenames are in config['data']. Always load as a list, even len == 1.
# If there are multiple datasets, this list will be longer than length 1
data = config["data"]
if type(data) != list:
data = [data]
print("loading data spectra {}".format(data))
orders = config["orders"] #list of which orders to fit
order_ids = np.arange(len(orders))
DataSpectra = [DataSpectrum.open(data_file, orders=orders) for data_file in data]
# Number of different data sets we are fitting. Used for indexing purposes.
spectra = np.arange(len(DataSpectra))
INSTRUMENTS = {"TRES": TRES, "SPEX": SPEX}
#Instruments are provided as one per dataset
Instruments = [INSTRUMENTS[key]() for key in config["instruments"]]
masks = config.get("mask", None)
if masks is not None:
for mask, dataSpec in zip(masks, DataSpectra):
myMask = Mask(mask, orders=orders)
dataSpec.add_mask(myMask.masks)
for model_number in range(len(DataSpectra)):
for order in config['orders']:
order_dir = "{}{}/{}".format(outdir, model_number, order)
print("Creating ", order_dir)
os.makedirs(order_dir)
# Copy yaml file to outdir for archiving purposes
shutil.copy(args.input, outdir + "/input.yaml")
# Set up the logger
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", filename="{}log.log".format(
outdir), level=logging.DEBUG, filemode="w", datefmt='%m/%d/%Y %I:%M:%S %p')
def perturb(startingDict, jumpDict, factor=3.):
'''
Given a starting parameter dictionary loaded from a config file, perturb the
values as a multiple of the jump distribution. This is designed so that
not all chains start at exactly the same place.
Modifies the startingDict
'''
for key in startingDict.keys():
startingDict[key] += factor * np.random.normal(loc=0, scale=jumpDict[key])
stellar_Starting = config['stellar_params']
stellar_tuple = C.dictkeys_to_tuple(stellar_Starting)
# go through each item in stellar_tuple, and assign the appropriate covariance to it
stellar_MH_cov = np.array([float(config["stellar_jump"][key]) for key in stellar_tuple])**2 \
* np.identity(len(stellar_Starting))
fix_logg = config.get("fix_logg", None)
# Updating specific covariances to speed mixing
if config["use_cov"]:
stellar_cov = config["stellar_cov"]
factor = stellar_cov["factor"]
stellar_MH_cov[0, 1] = stellar_MH_cov[1, 0] = stellar_cov['temp_logg'] * factor
stellar_MH_cov[0, 2] = stellar_MH_cov[2, 0] = stellar_cov['temp_Z'] * factor
stellar_MH_cov[1, 2] = stellar_MH_cov[2, 1] = stellar_cov['logg_Z'] * factor
if fix_logg is None:
stellar_MH_cov[0, 5] = stellar_MH_cov[5, 0] = stellar_cov['temp_logOmega'] * factor
else:
stellar_MH_cov[0, 4] = stellar_MH_cov[4, 0] = stellar_cov['temp_logOmega'] * factor
def info(title):
'''
Print process information useful for debugging.
'''
print(title)
print('module name:', __name__)
if hasattr(os, 'getppid'): # only available on Unix
print('parent process:', os.getppid())
print('process id:', os.getpid())
class OrderModel:
def __init__(self, debug=False):
'''
This object contains all of the variables necessary for the partial
lnprob calculation for one echelle order. It is designed to first be
instantiated within the main processes and then forked to other
subprocesses. Once operating in the subprocess, the variables specific
to the order are loaded with an `INIT` message call, which tells which key
to initialize on in the `self.initialize()`.
'''
self.lnprob = -np.inf
self.lnprob_last = -np.inf
self.func_dict = {"INIT": self.initialize,
"DECIDE": self.decide_stellar,
"INST": self.instantiate,
"LNPROB": self.stellar_lnprob,
"GET_LNPROB": self.get_lnprob,
"FINISH": self.finish
}
self.debug = debug
def initialize(self, key):
'''
Initialize the OrderModel to the correct chunk of data (echelle order).
:param key: (spectrum_id, order_id)
:param type: (int, int)
This should only be called after all subprocess have been forked.
'''
self.id = key
self.spectrum_id, self.order_id = self.id
self.logger.info("Initializing model on Spectrum {}, order {}.".format(self.spectrum_id, self.order_id))
self.instrument = Instruments[self.spectrum_id]
self.DataSpectrum = DataSpectra[self.spectrum_id]
self.wl = self.DataSpectrum.wls[self.order_id]
self.fl = self.DataSpectrum.fls[self.order_id]
self.sigma = self.DataSpectrum.sigmas[self.order_id]
self.npoints = len(self.wl)
self.mask = self.DataSpectrum.masks[self.order_id]
self.order = self.DataSpectrum.orders[self.order_id]
self.logger = logging.getLogger("{} {}".format(self.__class__.__name__, self.order))
if self.debug:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
self.npoly = config["cheb_degree"]
self.ChebyshevSpectrum = ChebyshevSpectrum(self.DataSpectrum, self.order_id, npoly=self.npoly)
self.resid_deque = deque(maxlen=500) #Deque that stores the last residual spectra, for averaging
self.counter = 0
self.Emulator = Emulator.open(config["PCA_path"]) # Returns mu and var vectors
self.Emulator.determine_chunk_log(self.wl) # Truncates the grid to this wl format, power of 2
pg = self.Emulator.PCAGrid
self.wl_FFT = pg.wl
self.ncomp = pg.ncomp
self.PCOMPS = np.vstack((pg.flux_mean[np.newaxis,:], pg.flux_std[np.newaxis,:], pg.pcomps))
self.min_v = self.Emulator.min_v
self.ss = np.fft.rfftfreq(len(self.wl_FFT), d=self.min_v)
self.ss[0] = 0.01 # junk so we don't get a divide by zero error
self.pcomps = np.empty((self.ncomp, self.npoints))
self.flux_mean = np.empty((self.npoints,))
self.flux_std = np.empty((self.npoints,))
self.mus, self.vars = None, None
self.C_GP = None
self.data_mat = None
self.sigma_matrix = self.sigma**2 * np.eye(self.npoints)
self.prior = 0.0 # Modified and set by NuisanceSampler.lnprob
self.nregions = 0
self.exceptions = []
#TODO: perturb
#if args.perturb:
#perturb(stellar_Starting, config["stellar_jump"], factor=args.perturb)
cheb_MH_cov = float(config["cheb_jump"])**2 * np.ones((self.npoly,))
cheb_tuple = ("logc0",)
# add in new coefficients
for i in range(1, self.npoly):
cheb_tuple += ("c{}".format(i),)
# set starting position to 0
cheb_Starting = {k:0.0 for k in cheb_tuple}
# Design cov starting
cov_Starting = config['cov_params']
cov_tuple = C.dictkeys_to_cov_global_tuple(cov_Starting)
cov_MH_cov = np.array([float(config["cov_jump"][key]) for key in cov_tuple])**2
nuisance_MH_cov = np.diag(np.concatenate((cheb_MH_cov, cov_MH_cov)))
nuisance_starting = {"cheb": cheb_Starting, "cov": cov_Starting, "regions":{}}
# Because this initialization is happening on the subprocess, I think
# the random state should be fine.
# Update the outdir based upon id
self.noutdir = outdir + "{}/{}/".format(self.spectrum_id, self.order)
# Create the nuisance parameter sampler to run independently
self.sampler = NuisanceSampler(OrderModel=self, starting_param_dict=nuisance_starting, cov=nuisance_MH_cov,
debug=True, outdir=self.noutdir, order=self.order)
self.p0 = self.sampler.p0
# Udpate the nuisance parameters to the starting values so that we at
# least have a self.data_mat
self.logger.info("Updating nuisance parameter data products to starting values.")
self.update_nuisance(nuisance_starting)
self.lnprob = None
def instantiate(self, *args):
'''
Clear the old NuisanceSampler, instantiate the regions using the stored
residual spectra, and create a new NuisanceSampler.
'''
# threshold for sigma clipping
sigma=config["sigma_clip"]
# array that specifies if a pixel is already covered.
# to start, it should be all False
covered = np.zeros((self.npoints,), dtype='bool')
#average all of the spectra in the deque together
residual_array = np.array(self.resid_deque)
if len(self.resid_deque) == 0:
raise RuntimeError("No residual spectra stored yet.")
else:
residuals = np.average(residual_array, axis=0)
# run the sigma_clip algorithm until converged, and we've identified the outliers
filtered_data = sigma_clip(residuals, sig=sigma, iters=None)
mask = filtered_data.mask
wl = self.wl
sigma0 = config['region_priors']['sigma0']
logAmp = config["region_params"]["logAmp"]
sigma = config["region_params"]["sigma"]
# Sort in decreasing strength of residual
self.nregions = 0
regions = {}
region_mus = {}
for w, resid in sorted(zip(wl[mask], np.abs(residuals[mask])), key=itemgetter(1), reverse=True):
if w in wl[covered]:
continue
else:
# check to make sure region is not *right* at the edge of the echelle order
if w <= np.min(wl) or w >= np.max(wl):
continue
else:
# instantiate region and update coverage
# Default amp and sigma values
regions[self.nregions] = {"logAmp":logAmp, "sigma":sigma, "mu":w}
region_mus[self.nregions] = w # for evaluating the mu prior
self.nregions += 1
# determine the stretch of wl covered by this new region
ind = (wl >= (w - sigma0)) & (wl <= (w + sigma0))
# update the covered regions
covered = covered | ind
# Take the current nuisance positions as a starting point, and add the regions
starting_dict = self.sampler.params.copy()
starting_dict["regions"] = regions
region_mus = np.array([region_mus[i] for i in range(self.nregions)])
# Setup the priors
region_priors = config["region_priors"]
region_priors.update({"mus":region_mus})
prior_params = {"regions":region_priors}
# do all this crap again
cheb_MH_cov = float(config["cheb_jump"])**2 * np.ones((self.npoly,))
cov_MH_cov = np.array([float(config["cov_jump"][key]) for key in self.sampler.cov_tup])**2
region_MH_cov = [float(config["region_jump"][key])**2 for key in C.cov_region_parameters]
regions_MH_cov = np.array([region_MH_cov for i in range(self.nregions)]).flatten()
nuisance_MH_cov = np.diag(np.concatenate((cheb_MH_cov, cov_MH_cov, regions_MH_cov)))
print(starting_dict)
print("cov shape {}".format(nuisance_MH_cov.shape))
# Initialize a new sampler, replacing the old one
self.sampler = NuisanceSampler(OrderModel=self, starting_param_dict=starting_dict, cov=nuisance_MH_cov,
debug=True, outdir=self.noutdir, prior_params=prior_params, order=self.order)
self.p0 = self.sampler.p0
# Update the nuisance parameters to the starting values so that we at least have a self.data_mat
print("Updating nuisance parameter data products to starting values.")
self.update_nuisance(starting_dict)
self.lnprob = self.evaluate()
# To speed up convergence, try just doing a bunch of nuisance runs before
# going into the iteration pattern
print("Doing nuisance burn-in for {} samples".format(config["nuisance_burn"]))
self.independent_sample(config["nuisance_burn"])
def get_lnprob(self, *args):
'''
Return the *current* value of lnprob.
Intended to be called from the master process (StellarSampler.sample), to
query the child processes for their current value of lnprob.
'''
return self.lnprob
def stellar_lnprob(self, params):
'''
Update the model to the parameters and then evaluate the lnprob.
Intended to be called from the master process via the command "LNPROB".
'''
try:
self.update_stellar(params)
lnp = self.evaluate() # Also sets self.lnprob to new value
return lnp
except C.ModelError:
self.logger.debug("ModelError in stellar parameters, sending back -np.inf {}".format(params))
return -np.inf
def evaluate(self):
'''
Return the lnprob using the current version of the DataCovariance matrix
and other intermediate products.
'''
self.lnprob_last = self.lnprob
X = (self.ChebyshevSpectrum.k * self.flux_std * np.eye(self.npoints)).dot(self.pcomps.T)
CC = X.dot(self.C_GP.dot(X.T)) + self.data_mat
R = self.fl - self.ChebyshevSpectrum.k * self.flux_mean - X.dot(self.mus)
try:
factor, flag = cho_factor(CC)
except np.linalg.LinAlgError as e:
self.logger.debug("self.sampler.params are {}".format(self.sampler.params))
raise C.ModelError("Can't Cholesky factor {}".format(e))
logdet = np.sum(2 * np.log((np.diag(factor))))
self.lnprob = -0.5 * (np.dot(R, cho_solve((factor, flag), R)) + logdet) + self.prior
if self.counter % 100 == 0:
self.resid_deque.append(R)
self.counter += 1
return self.lnprob
def revert_stellar(self):
'''
Revert the status of the model from a rejected stellar proposal.
'''
self.logger.debug("Reverting stellar parameters")
self.lnprob = self.lnprob_last
self.flux_mean = self.flux_mean_last
self.flux_std = self.flux_std_last
self.pcomps = self.pcomps_last
self.mus, self.vars = self.mus_last, self.vars_last
self.C_GP = self.C_GP_last
def update_stellar(self, params):
'''
Update the model to the current stellar parameters.
'''
self.logger.debug("Updating stellar parameters to {}".format(params))
# Store the current accepted values before overwriting with new proposed values.
self.flux_mean_last = self.flux_mean
self.flux_std_last = self.flux_std
self.pcomps_last = self.pcomps
self.mus_last, self.vars_last = self.mus, self.vars
self.C_GP_last = self.C_GP
#TODO: Possible speedups:
# 1. Store the PCOMPS pre-FFT'd
# Shift the velocity
vz = params["vz"]
# Local, shifted copy
wl_FFT = self.wl_FFT * np.sqrt((C.c_kms + vz) / (C.c_kms - vz))
# FFT and convolve operations
vsini = params["vsini"]
if vsini < 0.2:
raise C.ModelError("vsini must be positive")
FF = np.fft.rfft(self.PCOMPS, axis=1)
# Determine the stellar broadening kernel
ub = 2. * np.pi * vsini * self.ss
sb = j1(ub) / ub - 3 * np.cos(ub) / (2 * ub ** 2) + 3. * np.sin(ub) / (2 * ub ** 3)
# set zeroth frequency to 1 separately (DC term)
sb[0] = 1.
# institute velocity and instrumental taper
FF_tap = FF * sb
# do ifft
pcomps_full = np.fft.irfft(FF_tap, len(wl_FFT), axis=1)
# Spectrum resample operations
if min(self.wl) < min(wl_FFT) or max(self.wl) > max(wl_FFT):
raise RuntimeError("Data wl grid ({:.2f},{:.2f}) must fit within the range of wl_FFT ({"
":.2f},{:.2f})".format(min(self.wl), max(self.wl), min(wl_FFT), max(wl_FFT)))
# Take the output from the FFT operation (pcomps_full), and stuff them
# into respective data products
for lres, hres in zip(chain([self.flux_mean, self.flux_std], self.pcomps), pcomps_full):
interp = InterpolatedUnivariateSpline(wl_FFT, hres, k=5)
lres[:] = interp(self.wl)
del interp
gc.collect()
# Adjust flux_mean and flux_std by Omega
Omega = 10**params["logOmega"]
self.flux_mean *= Omega
self.flux_std *= Omega
# Now update the parameters from the emulator
pars = np.array([params["temp"], params["logg"], params["Z"]])
# If pars are outside the grid, Emulator will raise C.ModelError
self.mus, self.vars = self.Emulator(pars)
self.C_GP = self.vars * np.eye(self.ncomp)
def decide_stellar(self, yes):
'''
Interpret the decision from the master process to either revert the
stellar model (rejected parameters) or move on (accepted parameters).
'''
if yes:
# accept and move on
self.logger.debug("Deciding to accept stellar parameters")
else:
# revert and move on
self.logger.debug("Deciding to revert stellar parameters")
self.revert_stellar()
# Proceed with independent sampling
self.independent_sample(1)
def update_nuisance(self, params):
'''
Update the nuisance parameters and data covariance matrix.
:param params: large dictionary containing cheb, cov, and regions
'''
self.logger.debug("Updating nuisance parameters to {}".format(params))
# Read off the Chebyshev parameters and update
self.ChebyshevSpectrum.update(params["cheb"])
# Create the full data covariance matrix.
l = params["cov"]["l"]
sigAmp = params["cov"]["sigAmp"]
# Check to make sure the global covariance parameters make sense
if sigAmp < 0.1:
raise C.ModelError("sigAmp shouldn't be lower than 0.1, something is wrong.")
max_r = 6.0 * l # [km/s]
# Check all regions, take the max
if self.nregions > 0:
regions = params["regions"]
keys = sorted(regions)
sigmas = np.array([regions[key]["sigma"] for key in keys]) #km/s
#mus = np.array([regions[key]["mu"] for key in keys])
max_reg = 4.0 * np.max(sigmas)
#If this is a larger distance than the global length, replace it
max_r = max_reg if max_reg > max_r else max_r
#print("Max_r now set by regions {}".format(max_r))
# print("max_r is {}".format(max_r))
# Create a partial function which returns the proper element.
k_func = make_k_func(params)
# Store the previous data matrix in case we want to revert later
self.data_mat_last = self.data_mat
self.data_mat = get_dense_C(self.wl, k_func=k_func, max_r=max_r) + sigAmp*self.sigma_matrix
def revert_nuisance(self, *args):
'''
Revert all products from the nuisance parameters, including the data
covariance matrix.
'''
self.logger.debug("Reverting nuisance parameters")
self.lnprob = self.lnprob_last
self.ChebyshevSpectrum.revert()
self.data_mat = self.data_mat_last
def clear_resid_deque(self):
'''
Clear the accumulated residual spectra.
'''
self.resid_deque.clear()
def independent_sample(self, niter):
'''
Do the independent sampling specific to this echelle order, using the
attached self.sampler (NuisanceSampler).
:param niter: number of iterations to complete before returning to master process.
'''
self.logger.debug("Beginning independent sampling on nuisance parameters")
if self.lnprob:
# If we have a current value, pass it to the sampler
self.p0, self.lnprob, state = self.sampler.run_mcmc(pos0=self.p0, N=niter, lnprob0=self.lnprob)
else:
# Otherwise, start from the beginning
self.p0, self.lnprob, state = self.sampler.run_mcmc(pos0=self.p0, N=niter)
self.logger.debug("Finished independent sampling on nuisance parameters")
# Don't return anything to the master process.
def finish(self, *args):
'''
Wrap up the sampling and write the samples to disk.
'''
print(self.sampler.acceptance_fraction)
print(self.sampler.acor)
self.sampler.write()
self.sampler.plot() # triangle_plot=True
print("There were {} exceptions.".format(len(self.exceptions)))
# print out the values of each region key.
for exception in self.exceptions:
regions = exception["regions"]
keys = sorted(regions)
for key in keys:
print(regions[key])
cov = exception["cov"]
print(cov)
print("\n\n")
def brain(self, conn):
'''
The infinite loop of the subprocess, which continues to listen for
messages on the pipe.
'''
self.conn = conn
alive = True
while alive:
#Keep listening for messages put on the Pipe
alive = self.interpret()
#Once self.interpret() returns `False`, this loop will die.
self.conn.send("DEAD")
def interpret(self):
'''
Interpret the messages being put into the Pipe, and do something with
them. Messages are always sent in a 2-arg tuple (fname, arg)
Right now we only expect one function and one argument but this could
be generalized to **args.
'''
#info("brain")
fname, arg = self.conn.recv() # Waits here to receive a new message
self.logger.debug("{} received message {}".format(os.getpid(), (fname, arg)))
func = self.func_dict.get(fname, False)
if func:
response = func(arg)
else:
self.logger.info("Given an unknown function {}, assuming kill signal.".format(fname))
return False
# Functions only return a response other than None when they want them
# communicated back to the master process.
# Some commands sent to the child processes do not require a response
# to the main process.
if response:
self.logger.debug("{} sending back {}".format(os.getpid(), response))
self.conn.send(response)
return True
# We create one OrderModel in the main process. When the process forks, each
# subprocess now has its own independent OrderModel instance.
# Then, each forked model will be customized using an INIT command passed
# through the PIPE.
model = OrderModel(debug=True)
# Comment out these following lines to profile
# Fork a subprocess for each key: (spectra, order)
pconns = {} # Parent connections
cconns = {} # Child connections
ps = {}
for spectrum in spectra:
for order_id in order_ids:
pconn, cconn = Pipe()
key = (spectrum, order_id)
pconns[key], cconns[key] = pconn, cconn
p = Process(target=model.brain, args=(cconn,))
p.start()
ps[key] = p
# Initialize all of the orders to a specific DataSpectrum and echelle order
for key, pconn in pconns.items():
pconn.send(("INIT", key))
# From here on, this script operates on the master process only.
if args.perturb:
perturb(stellar_Starting, config["stellar_jump"], factor=args.perturb)
def profile_code():
'''
Test hook designed to be used by cprofile or kernprof. Does not include any
network latency from communicating or synchronizing between processes
because we run on just one process.
'''
#Evaluate one complete iteration from delivery of stellar parameters from master process
#Master proposal
stellar_Starting.update({"logg":4.29})
model.stellar_lnprob(stellar_Starting)
#Assume we accepted
model.decide_stellar(True)
#Right now, assumes Kurucz order 23
def main():
# Uncomment these lines to profile
# #Initialize the current model for profiling purposes
# model.initialize((0, 0))
# import cProfile
# cProfile.run("profile_code()", "prof")
# import sys; sys.exit()
mySampler = StellarSampler(pconns=pconns, starting_param_dict=stellar_Starting,
cov=stellar_MH_cov, outdir=outdir, debug=True, fix_logg=fix_logg)
mySampler.run_mcmc(mySampler.p0, config['burn_in'])
#mySampler.reset()
self.logger.info("Instantiating Regions")
# Now that we are burned in, instantiate any regions
for key, pconn in pconns.items():
pconn.send(("INST", None))
mySampler.run_mcmc(mySampler.p0, config['samples'])
print(mySampler.acceptance_fraction)
print(mySampler.acor)
mySampler.write()
mySampler.plot() #triangle_plot = True
# Kill all of the orders
for pconn in pconns.values():
pconn.send(("FINISH", None))
pconn.send(("DIE", None))
# Join on everything and terminate
for p in ps.values():
p.join()
p.terminate()
import sys;sys.exit()
if __name__=="__main__":
main()
# All subprocesses will inherit pipe file descriptors created in the master process.
# http://www.pushingbits.net/posts/python-multiprocessing-with-pipes/
# thus, to really close a pipe, you need to close it in every subprocess.
|
AddSquaresMultiThreadingMany.py
|
import time, os
import threading as th
import multiprocessing as mp
import random
#Create 18 random numbers
NUMBEROFNUMBERS = 8
def square_number(idx, number, results):
pid = os.getpid()
threadName = th.current_thread().name
processName = mp.current_process().name
#Print Starting Statement
print(f"{pid} * {processName} * {threadName} ---> Starting....")
#Multiply Numbers Together
results[idx]=number*number
#Waste time counting to a very large number ;)
n=5000000
while n>0:
n -= 1
#Print ending statement
print(f"{pid} * {processName} * {threadName} ---> Finished. Square is: ", str(number * number))
if __name__=="__main__":
#Start Timer
start = time.time()
#Numebr List To Proces
NUMBERLIST = []
#Create random number to process
for i in range(0,NUMBEROFNUMBERS):
n = random.randint(1,10)
NUMBERLIST.append(n)
#Store details of each job ready to run
jobs = []
#Create Empty Array For Results
results = mp.Array('i', len(NUMBERLIST))
#Calculate the Square of Each Number
for idx, num in enumerate(NUMBERLIST):
p = mp.Process(target=square_number, args=(idx, num, results))
p.start()
jobs.append(p)
#Wait for all processes to complete
for p in jobs:
p.join()
#Output Sum of Squares
print("Sum of Squares is: ", sum(results[:]))
#Stop Timer
end = time.time()
#Output Total Time
print('Time taken in seconds -', end - start)
|
bot.py
|
import os
import re
import time
from functools import wraps
from threading import Thread
from slackclient import SlackClient
import redis as _redis
from hermes.util import Util
class Bot:
default_config = {
"SLACK_TOKEN": os.environ.get("SLACK_TOKEN"),
"REDIS_HOST": os.environ.get("REDIS_HOST", "localhost"),
"REDIS_PORT": int(os.environ.get("REDIS_PORT", 6379)),
"REDIS_DB": int(os.environ.get("REDIS_DB", 0)),
}
def __init__(self, config={}):
self.commands = {}
self.jobs = {}
self.regexes = {}
self.config = self.default_config
if not self.config["SLACK_TOKEN"]:
raise Exception("SLACK_TOKEN must be set")
self.slack_client = SlackClient(self.config["SLACK_TOKEN"])
self.connect_redis()
self.util = Util(self)
self.util.update_users()
self.util.update_channels()
def connect_redis(self):
self.redis = _redis.StrictRedis(
host=self.config["REDIS_HOST"],
port=self.config["REDIS_PORT"],
db=self.config["REDIS_DB"],
)
def _job_runner(self):
while True:
for timer in self.jobs.keys():
if time.time() % timer == 0:
for job in self.jobs[timer]:
try:
job()
except Exception as e:
print(f"Error: {e}")
time.sleep(1)
def run(self):
"""Run the bot
Creates a worker thread for jobs, then connects to Slack's RTM API
"""
self.job_runner = Thread(target=self._job_runner)
self.job_runner.start()
if self.slack_client.rtm_connect(with_team_state=False, auto_reconnect=True):
while True:
for event in self.slack_client.rtm_read():
if event["type"] == "message" and "text" in event:
if event.get("subtype") != "bot_message":
# Process the message.
words = event.get("text").split()
# Command mode, find a matching command.
if words and words[0].startswith("!"):
command = self.commands.get(words[0][1:])
if command:
try:
command(
event, words[1:] if len(words) > 1 else []
)
except Exception as e:
print(f"Error: {e}")
for regex in self.regexes:
matches = re.findall(regex, " ".join(words))
if matches:
for match in matches:
if not match:
# Throw away empty matches
continue
try:
self.regexes[regex](event, match)
except Exception as e:
print(f"Error: {e}")
def register_command(self, name, f):
print(f"Registering command: {name}")
self.commands[name] = f
def command(self):
"""Defines a command function.
Command functions take two arguments:
- the slack event itself
- a (possibly empty) list of arguments.
"""
def decorator(f):
register_command(f.__name__, f)
return f
return decorator
def require_perm(self, level):
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
if self.util.get_perm(args[0].get("user", "")) >= level:
return func(*args, **kwargs)
else:
channel = args[0].get("channel", "")
if channel:
self.slack_client.api_call(
"chat.postMessage",
channel=channel,
text="You aren't allowed to do that.",
as_user=True,
)
return lambda args, kwargs: None
return wrapper
return decorate
def register_job(self, timer, f):
print(f"Registering job {f.__name__} to run every {timer} seconds")
self.jobs.setdefault(timer, []).append(f)
def job(self, timer):
"""Defines a job function
Job functions run, roughly, on a timer.
"""
def decorator(f):
self.jobs.register_job(timer, f)
return f
return decorator
def register_regex(self, regex, f):
print(f"Registering regex {regex}")
self.regexes[regex] = f
def regex(self, regex):
"""Defines a regex function
These are like command functions, and take two arguments:
- the slack event itself
- a single string containing the matched string
For a single event, this may be called multiple times with multiple matches.
"""
def decorator(f):
self.register_regex(regex, f)
return f
return decorator
|
pool_thread.py
|
from multiprocessing import Pool
import datetime
et = datetime.datetime.now()
# def f(x):
# return x*x
# if __name__ == '__main__':
# p = Pool(5)
# print(p.map(f, [1, 2, 3]))
#from __future__ import print_function
# from multiprocessing import Process
# import os
# def info(title):
# print (title)
# print ('module name:', __name__)
# if hasattr(os, 'getppid'): # only available on Unix
# print ('parent process:', os.getppid())
# print ('process id:', os.getpid())
# def f(name):
# info('function f')
# print ('hello', name)
# if __name__ == '__main__':
# info('main line')
# p = Process(target=f, args=('bob',))
# p.start()
# p.join()
# from multiprocessing import Process, Queue
# def f(q):
# q.put([42, None, 'hello'])
# if __name__ == '__main__':
# q = Queue()
# p = Process(target=f, args=(q,))
# p.start()
# print (q.get()) # prints "[42, None, 'hello']"
# p.join()
# from multiprocessing import Process, Lock
# def f(l, i):
# global K,LOCK
# l.acquire()
# K = K+i
# print(K)
# l.release()
# if __name__ == '__main__':
# global K
# K = 0
# lock = Lock()
# LOCK = 1
# for num in range(3):
# Process(target=f, args=(lock, num)).start()
# print("K = ",K)
# print("\nMicro-Seconds : ",(et-datetime.datetime.now()).microseconds)
# print("K = ",K)
|
__init__.py
|
#
# Copyright (C) 2018-2019 Nippon Telegraph and Telephone Corporation.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from grpc import aio
from . import taish_pb2
from . import taish_pb2_grpc
import asyncio
from queue import Queue
from threading import Thread
import time
import functools
import inspect
from typing import Any, Optional
DEFAULT_SERVER_ADDRESS = "localhost"
DEFAULT_SERVER_PORT = "50051"
def is_async_func(func: Any) -> bool:
if inspect.iscoroutinefunction(func):
return True
if isinstance(func, functools.partial):
return is_async_func(func.func)
return False
def set_default_serialize_option(req):
req.serialize_option.human = True
req.serialize_option.value_only = True
req.serialize_option.json = False
class TAIException(Exception):
def __init__(self, code, msg):
self.code = code
self.msg = msg
def check_metadata(metadata):
code = int(metadata.get("tai-status-code", 0))
if code:
msg = metadata.get("tai-status-msg", "")
raise TAIException(code, msg)
class TAIObject(object):
def __init__(self, client, object_type, obj):
self.client = client
self.object_type = object_type
self.obj = obj
@property
def oid(self):
return self.obj.oid
def list_attribute_metadata(self):
return self.client.list_attribute_metadata(self.object_type, self.oid)
def get_attribute_metadata(self, attr):
return self.client.get_attribute_metadata(self.object_type, attr, oid=self.oid)
def get_attribute_capability(self, attr, json=False):
return self.client.get_attribute_capability(self.oid, attr, json)
def set(self, attr_id, value):
return self.client.set(self.object_type, self.oid, attr_id, value)
def set_multiple(self, attributes):
return self.client.set_multiple(self.object_type, self.oid, attributes)
def get(self, attr_id, with_metadata=False, value=None, json=False):
return self.client.get(
self.object_type, self.oid, attr_id, with_metadata, value, json
)
def get_multiple(self, attributes, with_metadata=False, json=False):
return self.client.get_multiple(
self.object_type, self.oid, attributes, with_metadata, json
)
def monitor(self, attr_id, callback, json=False):
return self.client.monitor(self, attr_id, callback, json)
class NetIf(TAIObject):
def __init__(self, client, obj, module):
super().__init__(client, taish_pb2.NETIF, obj)
self._module = module
@property
def module(self):
return self._module
@property
def index(self):
return self.obj.index
class HostIf(TAIObject):
def __init__(self, client, obj, module):
super().__init__(client, taish_pb2.HOSTIF, obj)
self._module = module
@property
def module(self):
return self._module
@property
def index(self):
return self.obj.index
class Module(TAIObject):
def __init__(self, client, obj):
super().__init__(client, taish_pb2.MODULE, obj)
@property
def netifs(self):
return [NetIf(self.client, obj, self) for obj in self.obj.netifs]
@property
def hostifs(self):
return [HostIf(self.client, obj, self) for obj in self.obj.hostifs]
@property
def present(self):
return self.obj.present
@property
def location(self):
return self.obj.location
def _get_if(self, index, objs, cls):
for obj in objs:
if obj.index == index:
return cls(self.client, obj, self)
return None
def get_netif(self, index=0):
obj = self._get_if(index, self.obj.netifs, NetIf)
if not obj:
raise TAIException(-1, f"netif {index} not found")
return obj
def get_hostif(self, index=0):
obj = self._get_if(index, self.obj.hostifs, HostIf)
if not obj:
raise TAIException(-1, f"hostif {index} not found")
return obj
async def create_netif(self, index=0, attrs=None):
if attrs is None:
attrs = []
attrs.append(("index", index))
await self.client.create(taish_pb2.NETIF, attrs, self.oid)
self.obj = (await self.client.list())[self.location]
return self.get_netif(index)
async def create_hostif(self, index=0, attrs=None):
if attrs is None:
attrs = []
attrs.append(("index", index))
await self.client.create(taish_pb2.HOSTIF, attrs, self.oid)
self.obj = (await self.client.list())[self.location]
return self.get_hostif(index)
class AsyncClient(object):
def __init__(self, address=DEFAULT_SERVER_ADDRESS, port=DEFAULT_SERVER_PORT):
self.channel = aio.insecure_channel(f"{address}:{port}")
self.stub = taish_pb2_grpc.TAIStub(self.channel)
async def close(self):
await self.channel.close()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.close()
async def list(self):
req = taish_pb2.ListModuleRequest()
ret = {}
async for res in self.stub.ListModule(req):
ret[res.module.location] = Module(self, res.module)
return ret
async def list_attribute_metadata(self, object_type, oid=0, location=""):
req = taish_pb2.ListAttributeMetadataRequest()
req.object_type = object_type
req.oid = oid
req.location = location
return [res.metadata async for res in self.stub.ListAttributeMetadata(req)]
async def get_attribute_capability(self, oid, attr, json=False):
if type(attr) == int:
attr_id = attr
elif type(attr) == str:
meta = await self.get_attribute_metadata(0, attr, oid=oid)
attr_id = meta.attr_id
else:
attr_id = attr.attr_id
req = taish_pb2.GetAttributeCapabilityRequest()
set_default_serialize_option(req)
req.oid = oid
req.attr_id = attr_id
req.serialize_option.json = json
c = self.stub.GetAttributeCapability(req)
res = await c
check_metadata(await c.trailing_metadata())
return res.capability
async def get_attribute_metadata(self, object_type, attr, oid=0, location=""):
req = taish_pb2.GetAttributeMetadataRequest()
req.object_type = object_type
req.oid = oid
req.location = location
set_default_serialize_option(req)
if type(attr) == int:
req.attr_id = attr
elif type(attr) == str:
req.attr_name = attr
else:
raise Exception("invalid argument")
c = self.stub.GetAttributeMetadata(req)
res = await c
check_metadata(await c.trailing_metadata())
return res.metadata
async def get_module(self, location):
req = taish_pb2.ListModuleRequest()
ret = {}
async for res in self.stub.ListModule(req):
m = res.module
if m.location != location:
continue
if not m.present:
raise TAIException(-1, f"module {location} not present")
if not m.oid:
raise TAIException(-1, f"module {location} not created yet")
return Module(self, m)
raise TAIException(-1, f"no module {location} found")
async def create_module(self, location, attrs=None):
if attrs is None:
attrs = []
attrs.append(("location", location))
await self.create(taish_pb2.MODULE, attrs)
return await self.get_module(location)
async def create(self, object_type, attrs, module_id=0):
if module_id != 0:
location = await self.get(taish_pb2.MODULE, module_id, "location")
else:
for attr in attrs:
key, value = attr
if key == "location":
location = value
break
else:
raise TAIException(0xE, "mandatory-attribute-missing")
if type(object_type) == str:
if object_type == "module":
object_type = taish_pb2.MODULE
elif object_type == "netif":
object_type = taish_pb2.NETIF
elif object_type == "hostif":
object_type = taish_pb2.HOSTIF
req = taish_pb2.CreateRequest()
req.object_type = object_type
req.module_id = module_id
set_default_serialize_option(req)
for attr in attrs:
attr_id, value = attr
meta = await self.get_attribute_metadata(
object_type, attr_id, location=location
)
attr_id = meta.attr_id
a = taish_pb2.Attribute()
a.attr_id = attr_id
a.value = str(value)
req.attrs.append(a)
c = self.stub.Create(req)
res = await c
check_metadata(await c.trailing_metadata())
return res.oid
async def remove(self, oid):
req = taish_pb2.RemoveRequest()
req.oid = oid
c = self.stub.Remove(req)
res = await c
check_metadata(await c.trailing_metadata())
async def set(self, object_type, oid, attr_id, value):
return await self.set_multiple(object_type, oid, [(attr_id, value)])
async def set_multiple(self, object_type, oid, attributes):
req = taish_pb2.SetAttributeRequest()
req.oid = oid
for attr in attributes:
attr_id = attr[0]
if type(attr_id) == int:
pass
elif type(attr_id) == str:
metadata = await self.get_attribute_metadata(
object_type, attr_id, oid=oid
)
attr_id = metadata.attr_id
else:
attr_id = attr_id.attr_id
a = taish_pb2.Attribute()
a.attr_id = attr_id
a.value = str(attr[1])
req.attributes.append(a)
set_default_serialize_option(req)
c = self.stub.SetAttribute(req)
await c
check_metadata(await c.trailing_metadata())
async def get(
self, object_type, oid, attr, with_metadata=False, value=None, json=False
):
v = await self.get_multiple(
object_type, oid, [(attr, value)], with_metadata, json
)
return v[0]
async def get_multiple(
self, object_type, oid, attributes, with_metadata=False, json=False
):
req = taish_pb2.GetAttributeRequest()
req.oid = oid
for attr in attributes:
value = None
if type(attr) == tuple:
value = attr[1]
attr = attr[0]
attr_id = attr
if type(attr) == int:
if with_metadata:
meta = await self.get_attribute_metadata(
object_type, attr_id, oid=oid
)
elif type(attr) == str:
meta = await self.get_attribute_metadata(object_type, attr, oid=oid)
attr_id = meta.attr_id
else:
attr_id = attr.attr_id
meta = attr
if value:
a.value = str(value)
a = taish_pb2.Attribute()
a.attr_id = attr_id
req.attributes.append(a)
set_default_serialize_option(req)
req.serialize_option.json = json
c = self.stub.GetAttribute(req)
res = await c
check_metadata(await c.trailing_metadata())
ret = []
for attr in res.attributes:
value = attr.value
if with_metadata:
ret.append((value, meta))
else:
ret.append(value)
return ret
async def monitor(self, obj, attr_id, callback, json=False):
m = await self.get_attribute_metadata(obj.object_type, attr_id, oid=obj.oid)
if m.usage != "<notification>":
raise Exception(
"the type of attribute {} is not notification".format(attr_id)
)
req = taish_pb2.MonitorRequest()
req.oid = obj.oid
req.notification_attr_id = m.attr_id
set_default_serialize_option(req)
req.serialize_option.json = json
c = self.stub.Monitor(req)
async for msg in c:
if is_async_func(callback):
await callback(obj, m, msg)
else:
callback(obj, m, msg)
async def set_log_level(self, l, api="unspecified"):
if l == "debug":
level = taish_pb2.DEBUG
elif l == "info":
level = taish_pb2.INFO
elif l == "notice":
level = taish_pb2.NOTICE
elif l == "warn":
level = taish_pb2.WARN
elif l == "error":
level = taish_pb2.ERROR
elif l == "critical":
level = taish_pb2.CRITICAL
else:
raise Exception(
"invalid log level: {}. choose from [debug, info, notice, warn, error, critical]".format(
l
)
)
if api == "module":
api = taish_pb2.MODULE_API
elif api == "netif":
api = taish_pb2.NETIF_API
elif api == "hostif":
api = taish_pb2.HOSTIF_API
elif api == "unspecified":
api = taish_pb2.UNSPECIFIED_API
else:
raise Exception(
"invalid api type. choose from [module, netif, hostif, unspecified]"
)
req = taish_pb2.SetLogLevelRequest()
req.level = level
req.api = api
await self.stub.SetLogLevel(req)
class Client(object):
def __init__(self, addr=DEFAULT_SERVER_ADDRESS, port=DEFAULT_SERVER_PORT):
self.in_q = Queue()
self.out_q = Queue()
self.t = Thread(target=self.loop, args=(addr, port))
self.t.daemon = True
self.t.start()
self.addr = addr
self.port = port
def loop(self, addr, port):
async def _loop():
c = AsyncClient(addr, port)
while True:
i = self.in_q.get()
if i == None:
return
fname, args, kwargs = i
try:
f = getattr(c, fname)
ret = await f(*args, **kwargs)
except Exception as e:
ret = e
if isinstance(ret, TAIObject):
ret.client = self
self.out_q.put(ret)
asyncio.run(_loop())
def close(self):
self.in_q.put(None)
self.t.join()
def monitor(self, *args, **kwargs):
try:
c = AsyncClient(self.addr, self.port)
loop = asyncio.get_event_loop()
task = loop.create_task(c.monitor(*args, **kwargs))
return loop.run_forever()
except KeyboardInterrupt:
task.cancel()
def __getattr__(self, name):
def f(*args, **kwargs):
self.in_q.put((name, args, kwargs))
ret = self.out_q.get()
if isinstance(ret, Exception):
raise ret
return ret
f.__name__ = name
return f
|
utils.py
|
# From http://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
import cv2
from threading import Thread
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
src = -1
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
ue_mac.py
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import threading
from typing import List
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, set_ev_cls
from ryu.lib.packet import packet
from ryu.lib.packet import ether_types, dhcp
from ryu.ofproto.inet import IPPROTO_TCP, IPPROTO_UDP
from lte.protos.pipelined_pb2 import FlowResponse, SetupFlowsResult, \
UEMacFlowRequest
from magma.pipelined.app.base import MagmaController, ControllerType
from magma.pipelined.app.inout import INGRESS
from magma.pipelined.directoryd_client import update_record
from magma.pipelined.imsi import encode_imsi, decode_imsi
from magma.pipelined.openflow import flows
from magma.pipelined.app.ipfix import IPFIXController
from magma.pipelined.bridge_util import BridgeTools
from magma.pipelined.openflow.exceptions import MagmaOFError
from magma.pipelined.openflow.magma_match import MagmaMatch
from magma.pipelined.openflow.registers import IMSI_REG, load_passthrough
class UEMacAddressController(MagmaController):
"""
UE MAC Address Controller
This controller controls table 0 which is the first table every packet
touches. It matches on UE MAC address and sets IMSI metadata
"""
APP_NAME = "ue_mac"
APP_TYPE = ControllerType.SPECIAL
def __init__(self, *args, **kwargs):
super(UEMacAddressController, self).__init__(*args, **kwargs)
self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
self.next_table = \
self._service_manager.get_table_num(INGRESS)
self.arpd_controller_fut = kwargs['app_futures']['arpd']
self.arp_contoller = None
self._loop = kwargs['loop']
self._datapath = None
tbls = self._service_manager.allocate_scratch_tables(self.APP_NAME, 2)
self._passthrough_set_tbl = tbls[0]
self._dhcp_learn_scratch = tbls[1]
self._li_port = None
self._imsi_set_tbl_num = \
self._service_manager.INTERNAL_IMSI_SET_TABLE_NUM
self._ipfix_sample_tbl_num = \
self._service_manager.INTERNAL_IPFIX_SAMPLE_TABLE_NUM
self._app_set_tbl_num = self._service_manager.INTERNAL_APP_SET_TABLE_NUM
if 'li_local_iface' in kwargs['config']:
self._li_port = \
BridgeTools.get_ofport(kwargs['config']['li_local_iface'])
self._dpi_port = \
BridgeTools.get_ofport(kwargs['config']['dpi']['mon_port'])
def initialize_on_connect(self, datapath):
self.delete_all_flows(datapath)
self._datapath = datapath
self._install_default_flows()
def cleanup_on_disconnect(self, datapath):
self.delete_all_flows(datapath)
def handle_restart(self, ue_requests: List[UEMacFlowRequest]
) -> SetupFlowsResult:
"""
Setup current check quota flows.
"""
# TODO Potentially we can run a diff logic but I don't think there is
# benefit(we don't need stats here)
self.delete_all_flows(self._datapath)
self._install_default_flows()
for ue_req in ue_requests:
self.add_ue_mac_flow(ue_req.sid.id, ue_req.mac_addr)
self._loop.call_soon_threadsafe(self._setup_arp, ue_requests)
self.init_finished = True
return SetupFlowsResult(result=SetupFlowsResult.SUCCESS)
def _setup_arp(self, ue_requests: List[UEMacFlowRequest]):
if self.arp_contoller or self.arpd_controller_fut.done():
if not self.arp_contoller:
self.arp_contoller = self.arpd_controller_fut.result()
self.arp_contoller.handle_restart(ue_requests)
def delete_all_flows(self, datapath):
flows.delete_all_flows_from_table(datapath, self.tbl_num)
flows.delete_all_flows_from_table(datapath, self._passthrough_set_tbl)
flows.delete_all_flows_from_table(datapath, self._dhcp_learn_scratch)
flows.delete_all_flows_from_table(datapath, self._imsi_set_tbl_num)
def add_ue_mac_flow(self, sid, mac_addr):
# TODO report add flow result back to sessiond
if self._datapath is None:
return FlowResponse(result=FlowResponse.FAILURE)
uplink_match = MagmaMatch(eth_src=mac_addr)
self._add_resubmit_flow(sid, uplink_match,
priority=flows.UE_FLOW_PRIORITY,
next_table=self._passthrough_set_tbl)
downlink_match = MagmaMatch(eth_dst=mac_addr)
self._add_resubmit_flow(sid, downlink_match,
priority=flows.UE_FLOW_PRIORITY,
next_table=self._passthrough_set_tbl)
# For handling internal ipfix pkt sampling
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
self._add_resubmit_flow(sid, uplink_match,
priority=flows.UE_FLOW_PRIORITY,
tbl_num=self._imsi_set_tbl_num,
next_table=self._ipfix_sample_tbl_num)
self._add_resubmit_flow(sid, downlink_match,
priority=flows.UE_FLOW_PRIORITY,
tbl_num=self._imsi_set_tbl_num,
next_table=self._ipfix_sample_tbl_num)
return FlowResponse(result=FlowResponse.SUCCESS)
def delete_ue_mac_flow(self, sid, mac_addr):
# TODO report add flow result back to sessiond
if self._datapath is None:
return
uplink_match = MagmaMatch(eth_src=mac_addr)
self._delete_resubmit_flow(sid, uplink_match)
downlink_match = MagmaMatch(eth_dst=mac_addr)
self._delete_resubmit_flow(sid, downlink_match)
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
self._delete_resubmit_flow(sid, uplink_match,
tbl_num=self._imsi_set_tbl_num)
self._delete_resubmit_flow(sid, downlink_match,
tbl_num=self._imsi_set_tbl_num)
def add_arp_response_flow(self, imsi, yiaddr, chaddr):
if self.arp_contoller or self.arpd_controller_fut.done():
if not self.arp_contoller:
self.arp_contoller = self.arpd_controller_fut.result()
self.arp_contoller.add_ue_arp_flows(self._datapath,
yiaddr, chaddr)
self.logger.debug("From DHCP learn: IMSI %s, has ip %s and mac %s",
imsi, yiaddr, chaddr)
# Associate IMSI to IPv4 addr in directory service
threading.Thread(target=update_record, args=(str(imsi),
yiaddr)).start()
else:
self.logger.error("ARPD controller not ready, ARP learn FAILED")
def _add_resubmit_flow(self, sid, match, action=None,
priority=flows.DEFAULT_PRIORITY,
next_table=None, tbl_num=None):
parser = self._datapath.ofproto_parser
if action is None:
actions = []
else:
actions = [action]
if next_table is None:
next_table = self.next_table
if tbl_num is None:
tbl_num = self.tbl_num
# Add IMSI metadata
if sid:
actions.append(parser.NXActionRegLoad2(dst=IMSI_REG,
value=encode_imsi(sid)))
flows.add_resubmit_next_service_flow(self._datapath, tbl_num,
match, actions=actions,
priority=priority,
resubmit_table=next_table)
def _delete_resubmit_flow(self, sid, match, action=None, tbl_num=None):
parser = self._datapath.ofproto_parser
if action is None:
actions = []
else:
actions = [action]
if tbl_num is None:
tbl_num = self.tbl_num
# Add IMSI metadata
actions.append(
parser.NXActionRegLoad2(dst=IMSI_REG, value=encode_imsi(sid)))
flows.delete_flow(self._datapath, tbl_num, match, actions=actions)
def _add_dns_passthrough_flows(self):
parser = self._datapath.ofproto_parser
# Set so packet skips enforcement and send to egress
action = load_passthrough(parser)
# Install UDP flows for DNS
ulink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_dst=53)
self._add_resubmit_flow(None, ulink_match_udp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
dlink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=53)
self._add_resubmit_flow(None, dlink_match_udp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
# Install TCP flows for DNS
ulink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_dst=53)
self._add_resubmit_flow(None, ulink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
dlink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_src=53)
self._add_resubmit_flow(None, dlink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
# Install TCP flows for DNS over tls
ulink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_dst=853)
self._add_resubmit_flow(None, ulink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
dlink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_src=853)
self._add_resubmit_flow(None, dlink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
def _add_dhcp_passthrough_flows(self):
ofproto, parser = self._datapath.ofproto, self._datapath.ofproto_parser
# Set so packet skips enforcement controller
action = load_passthrough(parser)
uplink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=68,
udp_dst=67)
self._add_resubmit_flow(None, uplink_match, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
downlink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=67,
udp_dst=68)
# Set so triggers packetin and we can learn the ip to do arp response
self._add_resubmit_flow(None, downlink_match, action,
flows.PASSTHROUGH_PRIORITY, next_table=self._dhcp_learn_scratch,
tbl_num=self._passthrough_set_tbl)
# Install default flow for dhcp learn scratch
flows.add_output_flow(self._datapath, self._dhcp_learn_scratch,
match=MagmaMatch(), actions=[],
priority=flows.PASSTHROUGH_PRIORITY,
output_port=ofproto.OFPP_CONTROLLER,
copy_table=self.next_table,
max_len=ofproto.OFPCML_NO_BUFFER)
def _add_uplink_arp_allow_flow(self):
arp_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_ARP)
flows.add_resubmit_next_service_flow(self._datapath, self.tbl_num,
arp_match, actions=[],
priority=flows.DEFAULT_PRIORITY,
resubmit_table=self.next_table)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _learn_arp_entry(self, ev):
"""
Learn action to process PacketIn DHCP packets, dhcp ack packets will
be used to learn the ARP entry for the UE to install rules in the arp
table. The DHCP packets will then be sent thorugh the pipeline.
"""
msg = ev.msg
if self._dhcp_learn_scratch != msg.table_id:
# Intended for other application
return
try:
encoded_imsi = _get_encoded_imsi_from_packetin(msg)
# Decode the imsi to properly save in directoryd
imsi = decode_imsi(encoded_imsi)
except MagmaOFError as e:
# No packet direction, but intended for this table
self.logger.error("Error obtaining IMSI from pkt-in: %s", e)
return
pkt = packet.Packet(msg.data)
dhcp_header = pkt.get_protocols(dhcp.dhcp)[0]
# DHCP yiaddr is the client(UE) ip addr
# chaddr is the client mac address
self.add_arp_response_flow(imsi, dhcp_header.yiaddr, dhcp_header.chaddr)
def _install_default_flows(self):
"""
Install default flows
"""
# Allows arp packets from uplink(no eth dst set) to go to the arp table
self._add_uplink_arp_allow_flow()
self._add_dhcp_passthrough_flows()
self._add_dns_passthrough_flows()
self._add_resubmit_flow(None, MagmaMatch(),
priority=flows.MINIMUM_PRIORITY,
tbl_num=self._passthrough_set_tbl)
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
self._add_resubmit_flow(None, MagmaMatch(in_port=self._dpi_port),
priority=flows.PASSTHROUGH_PRIORITY,
next_table=self._app_set_tbl_num)
if self._li_port:
match = MagmaMatch(in_port=self._li_port)
flows.add_resubmit_next_service_flow(self._datapath, self.tbl_num,
match, actions=[], priority=flows.DEFAULT_PRIORITY,
resubmit_table=self.next_table)
# TODO We might want a default drop all rule with min priority, but
# adding it breakes all unit tests for this controller(needs work)
def _get_encoded_imsi_from_packetin(msg):
"""
Retrieve encoded imsi from the Packet-In message, or raise an exception if
it doesn't exist.
"""
imsi = msg.match.get(IMSI_REG)
if imsi is None:
raise MagmaOFError('IMSI not found in OFPMatch')
return imsi
|
mavlink_gcs_tests_master.py
|
# mavlink_camera_tests.py
#
# Air Cam Pro 2021
# This is tests for python mavlink for camera control
# It uses snippets and code from the sources given below
# REV 1.1 23-12-2021 1700
#
# Mark Jacobsen
# mark@syriaairlift.org
#
# example mavlink GUI
# https://github.com/markdjacobsen/hellomav
#
# Joystick readers and mavlink over UDP
# https://github.com/Smolyarov/u2/blob/master/tools/joystick/joy2mavlink.py
# https://gist.github.com/wwj718/8ebd3dcae6d04f869cf256069ba0dd42
# https://habr.com/ru/post/130788/
#
# This is a simple demonstration of how to begin building a ground control station (GCS)
# in Python using a GUI. It allows a user to select a serial port and baud rate, connect
# to a MAVLINK device, then send a command to arm the motor. Note that this does not
# include any exception handling.
#
# The GUI is built using wxPython. MAVLINK communication is done through the pymavlink
# library.
# Acknowledgements:
# Thank you to Andrew Tridgell, the mastermind behind pymavlink and MAVProxy
# Thread code from http://stackoverflow.com/questions/730645/python-wxpython-doing-work-continuously-in-the-background
# Serial port code taken from http://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python
# UDP http://snakeproject.ru/rubric/article.php?art=python_udp_network_server_client
# AirCamPro :- 21/10/21 support android kivy serial driver
#
# when you install pymavlink you also need to use mavgen to generate the libraries
# instructions are shown here
# https://mavlink.io/en/mavgen_python/
# https://github.com/ArduPilot/pymavlink/blob/master/mavutil.py
# ref to multi-threading using asyncio
#
# https://python-scripts.com/sleep#threading-event
# vasile.buldumac@ati.utm.md
#
# sudo apt-get install python3-dev python3-opencv python3-wxgtk4.0 python3-pip python3-matplotlib python3-lxml
# sudo apt-get install libxml++2.6-dev
# sudo pip install dronekit
# ================== Compatible Joysticks =========================================
# X-Box 360 Controller (name: "Xbox 360 Controller")
# Playstation 4 Controller (name: "PS4 Controller")
# X-Box 360 Controller (name: "Controller (XBOX 360 For Windows)")
#
from pymavlink import mavutil # ref:- https://www.ardusub.com/developers/pymavlink.html
import wx
import sys, serial, glob, threading
# for serial message out packing
import struct
# this is included for android serial and to detect the android platform using kivy
# ref:- https://github.com/frmdstryr/kivy-android-serial
# install kivy with the following in your conda environment
# conda install kivy -c conda-forge
#`from kivy.utils import platform
# from kvserial.driver import CdcAcmSerialPort
# to list ports using the serial library
from serial.tools import list_ports
BUTTON_CONNECT = 10
BUTTON_ARM = 20
# ethernet UDP communication and joystick
#
# python3 -m pip install -U pygame --user
import socket
import pygame
JOYSTICK_UDP_PORT = 14556
JOY_SCALE = 1000
MAX_SCALE = 32767
X_MAX = MAX_SCALE
Y_MAX = MAX_SCALE
MAV_TARGET = 110
MAV_SOURCE = 30
# import pymavlink.dialects.v10.lapwing as mavlink
# this is a custom dialect which i cant find
from mavlink_python_libs import com1 as commonV1
import com1 as mavdefs
import math
import time
from mypymavlink import mavutilcust as custommav
#
# multithreading control via asyncio
#
import asyncio
import time
# ============== control Raspberry Pi IO ===============
# sudo apt-get install rpi.gpio
#
#import RPi.GPIO as GPIO
# to use Raspberry Pi board pin numbers
#GPIO.setmode(GPIO.BOARD)
# set up the GPIO channels - one input and one output here
#GPIO.setup(11, GPIO.IN)
#GPIO.setup(12, GPIO.OUT)
#---------------------------------------------------------------------------
class fifo(object):
def __init__(self):
self.buf = []
def write(self, data):
self.buf += data
return len(data)
def read(self):
return self.buf.pop(0)
# Create a wx.Frame object for the interface
class MAVFrame():
RCV_COMMAND = mavutil.mavlink.MAV_CMD_REQUEST_MESSAGE
RPM2 = 0
ACK_RESULT = 0
DEFAULT_SYS_ID = 1
# camera informations (default camera routines will retrieve this)
time_boot_ms = 1213
firmware_version = 12
focal_length = 11
sensor_size_h = 300
sensor_size_v = 400
flags = 4
resolution_h = 300
resolution_v = 400
cam_definition_version = 2
vendor_name = "sony"
model_name = "alpha750"
lens_id = 1
cam_definition_uri = "http://10.0.2.51/cam_defs"
# camera settings
mode_id = 3 # Camera mode
zoomLevel = 7 # Current zoom level (0.0 to 100.0, NaN if not known)*/
focusLevel = 9
# storage informations
total_capacity = 1.2 # [MiB] Total capacity. If storage is not ready (STORAGE_STATUS_READY) value will be ignored.
used_capacity = 1.1 # [MiB] Used capacity. If storage is not ready (STORAGE_STATUS_READY) value will be ignored.
available_capacity = 0.1 # [MiB] Available storage capacity. If storage is not ready (STORAGE_STATUS_READY) value will be ignored.
read_speed = 0.67 # [MiB/s] Read speed.
write_speed = 0.76 # [MiB/s] Write speed.
storage_id = 1 # Storage ID (1 for first, 2 for second, etc.)
storage_count = 2 # Number of storage devices
status = mavutil.mavlink.STORAGE_STATUS_READY
# camera capture status
image_interval = 3.3 # [s] Image capture interval
recording_time_ms = 10000 # [ms] Time since recording started
available_capacity = 0.34 # [MiB] Available storage capacity.
image_status = 1 # Current status of image capturing (0: idle, 1: capture in progress, 2: interval set but idle, 3: interval set and capture in progress)
video_status = 1 # Current status of video capturing (0: idle, 1: capture in progress)
image_count = 11
# video stream
framerate = 30.0 # [Hz] Frame rate.
bitrate = 3000 # [bits/s] Bit rate.
flags = 3 # Bitmap of stream status flags.
resolution_h = 300 # [pix] Horizontal resolution.
resolution_v = 400 # [pix] Vertical resolution.
rotation = 90 # [deg] Video image rotation clockwise.
hfov = 45 # [deg] Horizontal Field of view.
stream_id = 2 # Video Stream ID (1 for first, 2 for second, etc.)
count = 4 # Number of streams available.
type = mavutil.mavlink.VIDEO_STREAM_TYPE_MPEG_TS_H264 # Type of stream.
videoname = "vid_001"
video_uri = "http://10.0.0.56/vids/001.mov"
# camera image captured
time_utc = 667700 # [us] Timestamp (time since UNIX epoch) in UTC. 0 for unknown.
lat = 30 # [degE7] Latitude where image was taken
lon = 40 # [degE7] Longitude where capture was taken
alt = 11 # [mm] Altitude (MSL) where image was taken
relative_alt = 12 # [mm] Altitude above ground
q = [1,0,0,0] # Quaternion of camera orientation (w, x, y, z order, zero-rotation is 0, 0, 0, 0)
image_index = 4 # Zero based index of this image (image count since armed -1)
camera_id = 1 # Camera ID (1 for first, 2 for second, etc.)
capture_result = 1 # Boolean indicating success (1) or failure (0) while capturing this image.
file_url = "http://10.1.2.3/img/1.jpg"
ACK_ERROR = 0
errRCV_COMMAND = 0
errRPM2 = 0
# task control flag
task_control_1 = 0
# global constants
GOT_ERROR = 1
GOT_SUCCESS = 2
GOT_BAD = 3
GOT_UNFORMAT = 4
# used to decide what is being requested from the calling (GCS) station
type_of_msg = 0
def __del__(self):
class_name = self.__class__.__name__
print('{} Deleted'.format(class_name))
def on_click_connect(self,e):
#"""
#Process a click on the CONNECT button
#Attempt to connect to the MAV using the specified port and baud rate,
#then subscribe a function called check_heartbeat that will listen for
#a heartbeat message, as well as a function that will print all incoming
#MAVLink messages to the console.
#"""
port = self.cb_port.GetValue()
baud = int(self.cb_baud.GetValue())
self.textOutput.AppendText("Connecting to " + port + " at " + str(baud) + " baud\n")
self.master = mavutil.mavlink_connection(port, baud=baud)
self.thread = threading.Thread(target=self.process_messages)
self.thread.setDaemon(True)
self.thread.start()
self.master.message_hooks.append(self.check_heartbeat)
self.master.message_hooks.append(self.check_rcv_data_msg)
self.master.message_hooks.append(self.log_message)
print("Connecting to " + port + " at " + str(baud) + "baud")
self.textOutput.AppendText("Waiting for APM heartbeat\n")
return
def on_click_arm(self,e):
#"""
#Process a click on the ARM button
#Send an arm message to the MAV, then subscribe a function called
#check_arm_ack that will listen for a positive confirmation of arming.
# """
self.textOutput.AppendText("Arming motor\n")
print("******arming motor*********")
self.master.arducopter_arm()
self.master.message_hooks.append(self.check_arm_ack)
def log_message(self,caller,msg):
if msg.get_type() != 'BAD_DATA':
print(str(msg))
return
def process_messages(self):
#"""
#This runs continuously. The mavutil.recv_match() function will call mavutil.post_message()
#any time a new message is received, and will notify all functions in the master.message_hooks list.
#"""
while True:
msg = self.master.recv_match(blocking=True)
if not msg:
return
if msg.get_type() == "BAD_DATA":
if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush()
def check_heartbeat(self,caller,msg):
#"""
#Listens for a heartbeat message
#Once this function is subscribed to the dispatcher, it listens to every
#incoming MAVLINK message and watches for a 'HEARTBEAT' message. Once
#that message is received, the function updates the GUI and then
# unsubscribes itself.
#" ""
if msg.get_type() == 'HEARTBEAT':
self.textOutput.AppendText("Heartbeat received from APM (system %u component %u)\n" % (self.master.target_system, self.master.target_component))
self.master.message_hooks.remove(self.check_heartbeat)
def check_arm_ack(self, caller, msg):
#"""
#Listens for confirmation of motor arming
#Once this function is subscribed to the dispatcher, it listens to every
#incomign MAVLINK message and watches for the "Motor armed!" confirmation.
#Once the message is received, teh function updates the GUI and then
#unsubscribes itself.
#"""
if msg.get_type() == 'STATUSTEXT':
if "Throttle armed" in msg.text:
self.textOutput.AppendText("Motor armed!")
self.master.message_hooks.remove(self.check_arm_ack)
def check_rcv_data_msg(self, msg):
if msg.get_type() == 'RC_CHANNELS':
self.textOutput.AppendText("RC Channel message (system %u component %u)\n" % (self.master.target_system, self.master.target_component))
self.textOutput.AppendText("chan1 %u chan2 %u)\n" % (self.master.chan1_raw, self.master.chan2_raw))
self.master.message_hooks.remove(self.check_rcv_data_msg)
elif msg.get_type() == 'COMMAND_LONG':
self.textOutput.AppendText("Long message received (system %u component %u)\n" % (self.master.target_system, self.master.target_component))
self.textOutput.AppendText("Command %u p1 %u p2 %u p3 %u p4 %u \n" % (self.master.command, self.master.param1, self.master.param2, self.master.param3, self.master.param4))
self.textOutput.AppendText("p5 %u p6 %u p7 %u \n" % (self.master.param5, self.master.param6, self.master.param7))
self.master.message_hooks.remove(self.check_rcv_data_msg)
elif msg.get_type() == 'CAMERA_IMAGE_CAPTURED':
self.textOutput.AppendText("Cam Cap message received (system %u component %u)\n" % (self.master.target_system, self.master.target_component))
self.textOutput.AppendText("lat %u lon %u alt %u\n" % (self.master.lat, self.master.lon, self.master.alt))
self.textOutput.AppendText("URL %u)\n" % (self.master.file_url))
self.master.message_hooks.remove(self.check_rcv_data_msg)
def OnClose(self, e):
self._mgr.UnInit()
self.Close()
def serial_ports(self):
#"""Lists all available serial ports
#:raises EnvironmentError:
# On unsupported or unknown platforms
#:returns:
# A list of available serial ports
#"""
if 'ANDROID_BOOTLOGO' in os.environ: # detect android first as if using sys alone, it returns linux
#if platform == 'android': using kivy instead
ports = '/dev/ttyACM0'
else:
if sys.platform.startswith('win'):
ports = ['COM' + str(i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('linux2') or sys.platform.startswith('cygwin'): # check this shows /dev/ttyAMA0 on raspberry pi.
# this is to exclude your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'): # apple mac support if using darwin
ports = glob.glob('/dev/tty.*')
else:
ports = list_ports.comports() # Outputs list of available serial ports should do the rest e.g. riscos atheos os2 freebsd aix etc
if len(ports) == 0:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
if 'ANDROID_BOOTLOGO' in os.environ: # device android
s = CdcAcmSerialPort(port)
s.close()
result.append(port)
else:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
def print_red(self,text,value):
print("\033[31m %s : %6.3f"%(text,value))
def print_yellow(self,text,value):
print("\033[33m %s : %6.3f"%(text,value))
def print_2_yellow(self,text,value1,value2):
print("\033[33m %s : %6.3f %6.3f"%(text,value1,value2))
def print_3_yellow(self,text,value1,value2,value3):
print("\033[33m %s : %6.3f %6.3f %6.3f"%(text,value1,value2,value3))
def print_3_blue(self,text,value1,value2,value3):
print("\033[33m %s %6.3f %6.3f %6.3f"%(text,value1,value2,value3))
def print_blue(self,text,value):
print("\033[34m %s : %6.3f"%(text,value))
def joystickInit(self):
# Set the width and height of the screen [width,height]
size = [500, 700]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("----- My test of mavlink and joystick -----")
pygame.init()
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Initialize the joysticks
pygame.joystick.init()
joystick = pygame.joystick.Joystick(0)
joystick.init()
# Get ready to print
textPrint = TextPrint()
def initUDPSocket(self,bind):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# bind the socket if this is a server (pass bind==1)
if bind == 1:
host = 'localhost'
port = JOYSTICK_UDP_PORT
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
addr = (host,port)
sock.bind(addr)
sock.setblocking(0)
return sock
def closeUDPSocket(self,udp_socket):
udp_socket.close()
def serverReadUDPSocket(self,udp_socket,port):
conn, addr = udp_socket.recvfrom(port)
return conn,addr
def clientReadUDPSocket(self,udp_socket,port):
dataV = udp_socket.recvfrom(port)
return dataV
def joyMavlinkInit(self):
mav = mavutil.mavlink.MAVLink(fifo())
mav.srcSystem = MAV_SOURCE # set to master
def blockMouseDown(self,block_flag):
if block_flag:
pygame.event.set_blocked(MOUSEBUTTONDOWN)
else:
pygame.event.set_allowed(MOUSEBUTTONDOWN)
def blockMouseUp(self,block_flag):
if block_flag:
pygame.event.set_blocked(MOUSEBUTTONUP)
else:
pygame.event.set_allowed(MOUSEBUTTONUP)
def checkMouseDwnBlock(self):
print ('MOUSEBUTTONDOWN is block: ', pygame.event.get_blocked(MOUSEBUTTONDOWN))
def checkMouseUpBlock(self):
print ('MOUSEBUTTONUP is block: ', pygame.event.get_blocked(MOUSEBUTTONUP))
def write_mav_serial_data(self, serial, x ):
serial.write(struct.pack(x))
def write_pack_serial_data(self, serial, x, y, z, roll, pitch, yaw):
serial.write(struct.pack('<chhhhhh', 'S',x, y, z, roll, pitch, yaw))
def test_linear(self, serial, lenght=200, times=1000, delta=0.05):
for angle in xrange(1, times, 5):
a = angle * math.pi / 180
self.write_serial_data(serial, int(lenght * math.cos(a)), int(lenght * math.sin(a)),0,0,0,0)
time.sleep(delta)
self.write_serial_data(serial, 0,0,0,0,0,0)
def test_angles(self, serial, lenght=200, times=1000, delta=0.05):
for angle in xrange(1, times, 5):
a = angle * math.pi / 180
self.write_serial_data(0, 0,0,0,int(30 * math.cos(a)),int(30 * math.sin(-a)))
time.sleep(delta)
self.write_serial_data(serial, 0,0,0,0,0,0)
def test_yaw(self, serial, lenght=200, times=1000, delta=0.05):
for angle in xrange(1, times, 5):
a = angle * math.pi / 180
self.write_serial_data(serial, int(lenght * math.cos(a)),0,0,int(30 * math.sin(a)),0,0)
time.sleep(delta)
self.write_serial_data(serial, 0,0,0,0,0,0)
def processJoystickSendMavlink(self,sock):
msgbuf = None
# -------- Main Program Loop -----------
while done == False:
btns = 0
thrust = 0.0
rudder = 0.0
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
screen.fill(WHITE)
textPrint.reset()
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
# QUIT - none
# ACTIVEEVENT - gain, state
# KEYDOWN - unicode, key, mod
# KEYUP - key, mod
# MOUSEMOTION - pos, rel, buttons
# MOUSEBUTTONUP - pos, button
# MOUSEBUTTONDOWN - pos, button
# JOYAXISMOTION - joy, axis, value
# JOYBALLMOTION - joy, ball, rel
# JOYHATMOTION - joy, hat, value
# JOYBUTTONUP - joy, button
# JOYBUTTONDOWN - joy, button
# VIDEORESIZE - size, w, h
# VIDEOEXPOSE - none
# USEREVENT – code
if event.type == pygame.QUIT:
done=True
elif event.type == pygame.MOUSEBUTTONDOWN:
print_2_yellow("Mouse button down pressed.",event.button,event.pos)
elif event.type == pygame.MOUSEBUTTONUP:
print_2_yellow("Mouse button up pressed.",event.button,event.pos)
elif event.type == pygame.JOYBUTTONDOWN:
print_2_yellow("Joystick button down pressed.",event.button,event.joy)
elif event.type == pygame.JOYBUTTONUP:
print_2_yellow("Joystick button up released.",event.button,event.joy)
elif event.type == pygame.JOYAXISMOTION:
print_3_yellow("Joystick axis motion.",event.joy,event.axis,event.value)
elif event.type == pygame.JOYBALLMOTION:
print_3_yellow("Joystick ball motion.",event.joy,event.ball,event.rel)
elif event.type == pygame.JOYHATMOTION:
print_3_yellow("Joystick hat motion",event.joy,event.hat,event.value)
elif event.type == pygame.VIDEORESIZE:
print_3_blue("video re-size.",event.size,event.w,event.h)
elif event.type == pygame.KEYDOWN:
print_3_yellow("key down ",event.unicode,event.key,event.mod)
elif event.type == pygame.KEYUP:
print_2_yellow("key up ",event.key,event.mod)
# Get the name from the OS for the controller/joystick
name = joystick.get_name()
print("Joystick name: {}".format(name) )
# get the buttons
buttons = joystick.get_numbuttons()
for i in range( buttons ):
button = joystick.get_button( i )
print( "Button {:>2} value: {}".format(i,button) )
# get the hats
# Hat switch. All or nothing for direction, not like joysticks.
# Value comes back in an array.
hats = joystick.get_numhats()
print( "Number of hats: {}".format(hats) )
textPrint.indent()
for i in range( hats ):
hat = joystick.get_hat( i )
print( "Hat {} value: {}".format(i, str(hat)) )
# Getting available devices
for id in range(pygame.joystick.get_count()):
print( "devices list : %u %d %s" % (id, pygame.joystick.Joystick(id).get_name()))
# Get thrust and break first
# mix 2 shifts in single channels
thr = (joystick.get_axis(5) + 1) / 2
brk = -(joystick.get_axis(2) + 1) / 2
thrust = thr + brk
print_yellow("Thrust value ",thrust)
# this is the x axis
rudder = joystick.get_axis(0)
print_blue("Rudder value ",rudder)
# now collect all buttons
btns = 0
for i in range(joystick.get_numbuttons()):
btns |= joystick.get_button(i) << i
# Usually axis run in pairs, up/down for one, and left/right for
# the other.
axes = joystick.get_numaxes()
print( "Number of axes: {}".format(axes) )
textPrint.indent()
for i in range( axes ):
axis = joystick.get_axis( i )
print( "Axis {} value: {:>6.3f}".format(i, axis) )
textPrint.unindent()
# Update events in pygame
pygame.event.pump()
# pack acquired data and throw it to socket
msg = mavutil.mavlink.MAVLink_manual_control_message( target = MAV_TARGET, x = X_MAX, y = Y_MAX, z = round(thrust*JOY_SCALE), r = round(rudder*JOY_SCALE), buttons = btns)
msgbuf = msg.pack(mav)
try:
jid = joystick.get_instance_id()
except AttributeError:
# get_instance_id() is an SDL2 method
jid = joystick.get_id()
print( "Joystick {}".format(jid))
try:
guid = joystick.get_guid()
except AttributeError:
# get_guid() is an SDL2 method
pass
else:
print("GUID: {}".format(guid))
# Limit to 20 frames per second
clock.tick(25)
if msgbuf:
# send the message on the UDP Port
sock.sendto(msgbuf, ('', JOYSTICK_UDP_PORT))
# send the message on serial
# write_mav_serial_data(serial, msgbuf)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.joystick.quit()
pygame.quit()
# make a mavlink connection using mavutil like ardusub does....
#
# Create the connection and return it for use with the other functions
#
def makeMAVlinkConn(self):
try:
the_conection = mavutil.mavlink_connection('tcp:10.0.2.51:5760',autoreconnect=True, source_system=mavutil.mavlink.MAV_COMP_ID_CAMERA, source_component=mavutil.mavlink.MAV_COMP_ID_AUTOPILOT1)
return the_conection,True
except Exception as err_msg:
print("Failed to connect : %s" % (err_msg))
return the_conection,False
def makeNewMAVlinkConn(self,id):
try:
the_conection = mavutil.mavlink_connection('tcp:10.0.2.51:5760',autoreconnect=True, source_system=id, source_component=mavutil.mavlink.MAV_COMP_ID_AUTOPILOT1)
return the_conection,True
except Exception as err_msg:
print("Failed to connect : %s" % (err_msg))
return the_conection,False
# Send heartbeat from a GCS (types are define as enum in the dialect file).
#
def mavlink_send_GCS_heartbeat(self, the_conection):
print(" heartbeat.............................. %s\n"%(mavutil.mavlink.MAV_TYPE_CAMERA))
the_conection.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_CAMERA, mavutil.mavlink.MAV_AUTOPILOT_INVALID, 0, 0, mavutil.mavlink.MAV_STATE_ACTIVE)
# Send heartbeat from a MAVLink application.
#
def mavlink_send_OBC_heartbeat2(self, the_connection):
mavutil.mavlink.heartbeat_send(mavutil.mavlink.MAV_TYPE_CAMERA, mavutil.mavlink.MAV_AUTOPILOT_GENERIC, 0, 0, 0)
# Receive heartbeat from a MAVLink application.
#
def mavlink_rcv_heartbeat(self, the_connection):
the_connection.wait_heartbeat()
# Sets a value to the rc channel
#
def mavlink_set_rc_channel_pwm(self, the_connection, channel_id, pwm=1500):
#""" Set RC channel pwm value
#Args:
# channel_id (TYPE): Channel ID
# pwm (int, optional): Channel pwm value 1100-1900
#"""
if channel_id < 1 or channel_id > 18:
print("Channel does not exist.")
return
# Mavlink 2 supports up to 18 channels:
# https://mavlink.io/en/messages/common.html#RC_CHANNELS_OVERRIDE
rc_channel_values = [65535 for _ in range(18)]
rc_channel_values[channel_id - 1] = pwm
# target_system
# target_component
# RC channel list, in microseconds.
the_connection.mav.rc_channels_override_send( the_connection.target_system, the_connection.target_component, *rc_channel_values )
# drives a gimbal axis controller to the pitch roll yaw specified
#
def gimbal_move_to( self, the_connection, tilt, roll=0, pan=0):
#"""
#Moves gimbal to given position
#Args:
# tilt (float): tilt angle in centidegrees (0 is forward)
# roll (float, optional): pan angle in centidegrees (0 is forward)
# pan (float, optional): pan angle in centidegrees (0 is forward)
#"""
the_connection.mav.command_long_send(the_connection.target_system, the_connection.target_component, mavutil.mavlink.MAV_CMD_DO_MOUNT_CONTROL, 1, tilt, roll, pan, 0, 0, 0, mavutil.mavlink.MAV_MOUNT_MODE_MAVLINK_TARGETING)
def mavlink10(self,connID):
# '''return True if using MAVLink 1.0 or later'''
return float(connID.WIRE_PROTOCOL_VERSION) >= 1
def mavlink20(self,connID):
# '''return True if using MAVLink 2.0 or later'''
return float(connID.WIRE_PROTOCOL_VERSION) >= 2
def mavlink_set_relay(self, the_connection, relay_pin=0, state=True):
# Set relay_pin to value of state
#if self.mavlink10():
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_DO_SET_RELAY, # command
0, # Confirmation
relay_pin, # Relay Number
int(state), # state (1 to indicate arm)
0, # param3 (all other params meaningless)
0, # param4
0, # param5
0, # param6
0) # param7
#else:
# print("Setting relays not supported.")
# ref:- https://mavlink.io/en/messages/common.html#MAV_CMD
def mavlink_video_stop_capture(self, the_connection, streamNo):
#if self.mavlink10():
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_VIDEO_STOP_CAPTURE, # command
0, # Confirmation
streamNo, # stream number
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
def mavlink_video_start_capture(self, the_connection, streamNo, freq):
#if self.mavlink10():
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_VIDEO_START_CAPTURE, # command
0, # Confirmation
streamNo, # stream number
freq, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
def mavlink_image_stop_capture(self, the_connection):
#if self.mavlink10():
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_IMAGE_STOP_CAPTURE, # command
0, # Confirmation
0, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
def mavlink_image_start_capture(self, the_connection, interval, totalImages, seqNo):
#if self.mavlink10():
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_IMAGE_START_CAPTURE, # command
0, # Confirmation
0, # param1
interval, # Desired elapsed time between two consecutive pictures (in seconds)
totalImages, # Total number of images to capture. 0 to capture forever/until MAV_CMD_IMAGE_STOP_CAPTURE.
seqNo, # Capture sequence number starting from 1. This is only valid for single-capture (param3 == 1), otherwise set to 0. Increment the capture ID for each capture command to prevent double captures when a command is re-transmitted
0, # param5
0, # param6
0) # param7
def mavlink_video_stop_streaming(self, the_connection, streamNo):
#if self.mavlink10():
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_VIDEO_STOP_STREAMING, # command
0, # Confirmation
streamNo, # stream number
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
def mavlink_video_start_streaming(self, the_connection, streamNo):
#if self.mavlink10():
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_VIDEO_START_STREAMING, # command
0, # Confirmation
streamNo, # stream number
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
# suitable variables to drive CamMode
#
MAV_CAMERA_MODE_IMAGE = 0
MAV_CAMERA_MODE_VIDEO = 1
MAV_CAMERA_MODE_IMAGE_SURVEY = 2
def mavlink_video_set_camera_mode(self, the_connection, camMode):
#if self.mavlink10():
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_SET_CAMERA_MODE, # command
0, # Confirmation
0, # param1
CamMode, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
# suitable variables to drive CamZoomType
#
MAV_ZOOM_TYPE_STEP = 0 # Zoom one step increment (-1 for wide, 1 for tele)
MAV_ZOOM_TYPE_CONTINUOUS = 1 # Continuous zoom up/down until stopped (-1 for wide, 1 for tele, 0 to stop zooming)
MAV_ZOOM_TYPE_RANGE = 2 # Zoom value as proportion of full camera range (a value between 0.0 and 100.0)
MAV_ZOOM_TYPE_FOCAL_LENGTH = 3 # Zoom value/variable focal length in milimetres
def mavlink_video_set_camera_zoom(self, the_connection, camZoomType, camZoomValue):
#if self.mavlink10():
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_SET_CAMERA_MODE, # command
0, # Confirmation
CamZoomType, # param1
CamZoomValue, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
MAV_FOCUS_TYPE_STEP = 0 # Focus one step increment (-1 for focusing in, 1 for focusing out towards infinity).
MAV_FOCUS_TYPE_CONTINUOUS = 1 # Continuous focus up/down until stopped (-1 for focusing in, 1 for focusing out towards infinity, 0 to stop focusing)
MAV_FOCUS_TYPE_RANGE = 2 # Focus value as proportion of full camera focus range (a value between 0.0 and 100.0)
MAV_FOCUS_TYPE_METERS = 3 # Focus value in metres
def mavlink_video_set_camera_focus(self, the_connection, camFocusType, camFocusValue):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavdefs.MAV_CMD_SET_CAMERA_FOCUS, # command
0, # Confirmation
camFocusType, # param1
camFocusValue, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
except Exception as err_msg:
print("Failed to send message to set focus : %s" % (err_msg))
def mavlink_do_digicam_configure(self, the_connection, camMode, camShutterSpeed, camAperture, camISO, camExposure, camCommandIdentity, camEngineCutOff):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONFIGURE, # command
0, # Confirmation
camMode, # param1
camShutterSpeed, # param2
camAperture, # param3
camISO, # param4
camExposure, # param5
camCommandIdentity, # param6
camEngineCutOff) # param7
except Exception as err_msg:
print("Failed to send message to configure digicam : %s" % (err_msg))
def mavlink_do_digicam_control(self, the_connection, camSessionControl, camZoomAbsolute, camZoomRelative, camFocus, camShootCommand, camCommandIdentity, camShotID):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONTROL, # command
0, # Confirmation
camSessionControl, # param1
camZoomAbsolute, # param2
camZoomRelative, # param3
camFocus, # param4
camShootCommand, # param5
camCommandIdentity, # param6
camShotID) # param7
except Exception as err_msg:
print("Failed to send do_digicam_control : %s" % (err_msg))
def mavlink_do_video_control(self, the_connection, camID, camTransmission, camInterval, camRecording):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_DO_CONTROL_VIDEO, # command
0, # Confirmation
camID, # param1
camTransmission, # param2
camInterval, # param3
camRecording, # param4
0, # param5
0, # param6
0) # param7
except Exception as err_msg:
print("Failed to send do_video_control : %s" % (err_msg))
def mavlink_get_camera_settings(self, the_connection):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_SETTINGS, # command
0, # Confirmation
1, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
except Exception as err_msg:
print("Failed to send command long for camera_settings : %s" % (err_msg))
def mavlink_get_new_camera_data(self, the_connection):
#if self.mavlink10():
print("MAV_CMD_REQUEST message sent to sys %s comp 100"%(the_connection.target_system))
#the_connection.target_component, # target_component
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
100, # target_component
mavutil.mavlink.MAV_CMD_REQUEST_MESSAGE, # command
0, # Confirmation
259, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
print("sent the camera data........................ request")
except Exception as err_msg:
print("Failed to send command long for camera_information : %s" % (err_msg))
def mavlink_get_storage_info(self, the_connection, StoId):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_REQUEST_STORAGE_INFORMATION, # command
0, # Confirmation
StoId, # param1
1, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
except Exception as err_msg:
print("Failed to send command long for storage_information : %s" % (err_msg))
def mavlink_get_capture_status(self, the_connection):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_CAPTURE_STATUS, # command
0, # Confirmation
1, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
except Exception as err_msg:
print("Failed to send command long for capture_status_information : %s" % (err_msg))
def mavlink_get_stream_info(self, the_connection):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_REQUEST_VIDEO_STREAM_INFORMATION, # command
0, # Confirmation
1, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
except Exception as err_msg:
print("Failed to send command long for video_stream_information : %s" % (err_msg))
def mavlink_reset_camera(self, the_connection):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_RESET_CAMERA_SETTINGS, # command
0, # Confirmation
1, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
except Exception as err_msg:
print("Failed to send command long for reset_camera : %s" % (err_msg))
def mavlink_set_camera_trig_interval(self, the_connection, camTriggerCycle, camShutterIntegration):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_DO_SET_CAM_TRIGG_INTERVAL, # command
0, # Confirmation
camTriggerCycle, # param1
camShutterIntegration, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
except Exception as err_msg:
print("Failed to send command long for camera_trig_interval : %s" % (err_msg))
def mavlink_set_camera_to_quaternion(self, the_connection, q1, q2, q3, q4):
#if self.mavlink10():
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_DO_MOUNT_CONTROL_QUAT, # command
0, # Confirmation
q1, # param1
q2, # param2
q3, # param3
q4, # param4
0, # param5
0, # param6
0) # param7
def mavlink_send_camera_information(self, the_connection):
#if self.mavlink10():
the_connection.mav.camera_information_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_CAMERA_INFORMATION, # command
self.time_boot_ms,
self.firmware_version,
self.focal_length,
self.sensor_size_h,
self.sensor_size_v,
self.flags,
self.resolution_h,
self.resolution_v,
self.cam_definition_version,
self.vendor_name,
self.model_name,
self.lens_id,
self.cam_definition_uri)
def mavlink_send_camera_settings(self, the_connection):
#if self.mavlink10():
the_connection.mav.camera_settings_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_CAMERA_SETTINGS, # command
self.time_boot_ms,
self.mode_id, # Camera mode
self.zoomLevel, # Current zoom level (0.0 to 100.0, NaN if not known)*/
self.focusLevel)
def mavlink_send_storage_information(self, the_connection):
#if self.mavlink10():
the_connection.mav.storage_information_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_STORAGE_INFORMATION, # command
self.time_boot_ms,
self.total_capacity, # [MiB] Total capacity. If storage is not ready (STORAGE_STATUS_READY) value will be ignored.
self.used_capacity, # [MiB] Used capacity. If storage is not ready (STORAGE_STATUS_READY) value will be ignored.
self.available_capacity, # [MiB] Available storage capacity. If storage is not ready (STORAGE_STATUS_READY) value will be ignored.
self.read_speed, # [MiB/s] Read speed.
self.write_speed, # [MiB/s] Write speed.
self.storage_id, # Storage ID (1 for first, 2 for second, etc.)
self.storage_count, # Number of storage devices
self.status)
def mavlink_send_camera_capture_status(self, the_connection):
#if self.mavlink10():
the_connection.mav.camera_capture_status_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_CAMERA_CAPTURE_STATUS, # command
self.time_boot_ms, # [ms] Timestamp (time since system boot).*/
self.image_interval, # [s] Image capture interval*/
self.recording_time_ms, # [ms] Time since recording started*/
self.available_capacity, # [MiB] Available storage capacity.*/
self.image_status, # Current status of image capturing (0: idle, 1: capture in progress, 2: interval set but idle, 3: interval set and capture in progress)*/
self.video_status, # Current status of video capturing (0: idle, 1: capture in progress)*/
self.image_count)
def mavlink_send_video_stream_information(self, the_connection):
#if self.mavlink10():
the_connection.mav.video_stream_information_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_VIDEO_STREAM_INFORMATION, # command
self.framerate, #/*< [Hz] Frame rate.
self.bitrate, #/*< [bits/s] Bit rate.
self.flags, #/*< Bitmap of stream status flags.
self.resolution_h, #/*< [pix] Horizontal resolution.
self.resolution_v, #/*< [pix] Vertical resolution.
self.rotation, #/*< [deg] Video image rotation clockwise.
self.hfov, #/*< [deg] Horizontal Field of view.
self.stream_id, #/*< Video Stream ID (1 for first, 2 for second, etc.)
self.count, #/*< Number of streams available.
self.type, #/*< Type of stream.
self.videoname,
self.video_uri)
def mavlink_send_camera_image_captured(self, the_connection):
#if self.mavlink10():
the_connection.mav.video_stream_information_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_CAMERA_IMAGE_CAPTURED, # command
self.time_utc, # [us] Timestamp (time since UNIX epoch) in UTC. 0 for unknown.
self.time_boot_ms, # [ms] Timestamp (time since system boot).
self.lat, # [degE7] Latitude where image was taken
self.lon, # [degE7] Longitude where capture was taken
self.alt, # [mm] Altitude (MSL) where image was taken
self.relative_alt, # [mm] Altitude above ground
self.q[0], # Quaternion of camera orientation (w, x, y, z order, zero-rotation is 0, 0, 0, 0)
self.q[1],
self.q[2],
self.q[3],
self.image_index, # Zero based index of this image (image count since armed -1)
self.camera_id, # Camera ID (1 for first, 2 for second, etc.)
self.capture_result, # Boolean indicating success (1) or failure (0) while capturing this image.
self.file_url)
# process the incoming messages received
#
def process_messages_from_connection(self, the_connection):
#"""
#This runs continuously. The mavutil.recv_match() function will call mavutil.post_message()
#any time a new message is received, and will notify all functions in the master.message_hooks list.
#"""
loop = 4
while loop >= 1:
print("im receiving.............")
# wait heartbeat (only the GCS does this )
# m = the_connection.recv_match(type="HEARTBEAT", blocking=True, timeout=5)
#
# you can also use type lists like this
# type=['COMMAND_LONG,RC_CHANNELS']
#
msg = the_connection.recv_match(blocking=True, timeout=5)
if ( the_connection.target_system == msg.get_srcSystem() ): # check this and eliminate spurious messages if needed
print(f"data read {msg.get_type()}")
print(f"connection {the_connection.target_system} == {msg.get_srcSystem()}")
last_timestamp = msg._timestamp
if not msg:
return
if msg.get_type() == "BAD_DATA":
self.ACK_ERROR = self.GOT_BAD
self.errRCV_COMMAND = 0
self.errRPM2 = 0
if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush()
elif msg.get_type() == 'COMMAND_ACK':
print("========================= ACK RECEIVED ======================================== cmd=%s res=%s" % (msg.command,msg.result))
elif msg.get_type() == 'RC_CHANNELS':
print("RC Channel message (system %u component %u)\n" % (the_connection.target_system, the_connection.target_component))
elif msg.get_type() == 'COMMAND_LONG':
print("Long message received (system %u component %u)\n" % (the_connection.target_system, the_connection.target_component))
print("Command %u p1 %u p2 %u p3 %u p4 %u \n" % (msg.command, msg.param1, msg.param2, msg.param3, msg.param4))
print("p5 %u p6 %u p7 %u \n" % (msg.param5, msg.param6, msg.param7))
if (self.ACK_RESULT == 0):
self.RCV_COMMAND = msg.command
if (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_REQUEST_MESSAGE):
self.RPM2 = msg.param1
if (self.RPM2 == mavutil.mavlink.CAMERA_INFORMATION): #camera_information
self.type_of_msg = 6500
elif (self.RPM2 == mavutil.mavlink.CAMERA_SETTINGS): #camera_settings
self.type_of_msg = 6501
elif (self.RPM2 == mavutil.mavlink.STORAGE_INFORMATION): #storage information
self.type_of_msg = 6502
elif (self.RPM2 == mavutil.mavlink.CAMERA_CAPTURE_STATUS): #camera capture status
self.type_of_msg = 6503
elif (self.RPM2 == mavutil.mavlink.MAVLINK_MSG_ID_CAMERA_IMAGE_CAPTURED): #retrieve lost images
self.type_of_msg = 6504
self.Got_Param1 = the_connection.param2
elif (self.RPM2 == 269): #video stream
self.type_of_msg = 6505
else:
self.type_of_msg = 0
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_SET_RELAY):
self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_SET_RELAY;
self.Got_Param1 = msg.param1
self.Got_Param2 = the_connection.param2
elif (self.RCV_COMMAND == mavutil.mavlink. MAV_CMD_VIDEO_START_CAPTURE):
self.type_of_msg = mavutil.mavlink.MAV_CMD_VIDEO_START_CAPTURE;
self.Got_Param1 = msg.param1
elif (self.RCV_COMMAND == mavutil.mavlink. MAV_CMD_VIDEO_STOP_CAPTURE):
self.type_of_msg = mavutil.mavlink.MAV_CMD_VIDEO_START_CAPTURE;
self.Got_Param1 = msg.param1
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_IMAGE_START_CAPTURE):
self.type_of_msg = mavutil.mavlink.MAV_CMD_IMAGE_START_CAPTURE;
self.Got_Param1 = msg.param2
self.Got_Param2 = msg.param3
self.Got_Param3 = msg.param4
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_IMAGE_STOP_CAPTURE):
self.type_of_msg = mavutil.mavlink.MAV_CMD_IMAGE_STOP_CAPTURE;
self.Got_Param1 = msg.param3
self.Got_Param2 = msg.param4
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_VIDEO_START_STREAMING):
self.type_of_msg = mavutil.mavlink.MAV_CMD_VIDEO_START_STREAMING;
self.Got_Param1 = msg.param1
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_VIDEO_STOP_STREAMING):
self.type_of_msg = MAV_CMD_VIDEO_STOP_STREAMING;
self.Got_Param1 = msg.param1
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_SET_CAMERA_MODE):
self.type_of_msg = mavutil.mavlink.MAV_CMD_SET_CAMERA_MODE;
self.Got_Param1 = msg.param2
elif (self.RCV_COMMAND == mavdefs.MAV_CMD_SET_CAMERA_ZOOM):
self.type_of_msg = mavdefs.MAV_CMD_SET_CAMERA_ZOOM;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
elif (self.RCV_COMMAND == mavdefs.MAV_CMD_SET_CAMERA_FOCUS):
self.type_of_msg = mavdefs.MAV_CMD_SET_CAMERA_FOCUS;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
elif (self.RCV_COMMAND == mavdefs.MAV_CMD_DO_DIGICAM_CONFIGURE):
self.type_of_msg = mavdefs.MAV_CMD_DO_DIGICAM_CONFIGURE;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
self.Got_Param4 = msg.param4
self.Got_Param5 = msg.param5
self.Got_Param6 = msg.param6
self.Got_Param7 = msg.param7
elif (self.RCV_COMMAND == mavdefs.MAV_CMD_DO_DIGICAM_CONTROL):
self.type_of_msg = mavdefs.MAV_CMD_DO_DIGICAM_CONTROL;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
self.Got_Param4 = msg.param4
self.Got_Param5 = msg.param5
self.Got_Param6 = msg.param6
self.Got_Param7 = msg.param7
print("DO DIGICAM CONTROL")
elif (self.RCV_COMMAND == mavdefs.MAV_CMD_DO_CONTROL_VIDEO):
self.type_of_msg = mavdefs.MAV_CMD_DO_CONTROL_VIDEO;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
self.Got_Param4 = msg.param4
elif (self.RCV_COMMAND == mavdefs.MAV_CMD_DO_SET_CAM_TRIGG_INTERVAL):
self.type_of_msg = mavdefs.MAV_CMD_DO_SET_CAM_TRIGG_INTERVAL;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
elif (self.RCV_COMMAND == mavdefs.MAV_CMD_RESET_CAMERA_SETTINGS):
self.type_of_msg = mavdefs.MAV_CMD_RESET_CAMERA_SETTINGS;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
elif (self.RCV_COMMAND == mavdefs.MAV_CMD_DO_MOUNT_CONTROL_QUAT):
self.type_of_msg = mavdefs.MAV_CMD_DO_MOUNT_CONTROL_QUAT;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
self.Got_Param4 = msg.param4
elif (self.RCV_COMMAND == mavdefs.MAV_CMD_DO_GIMBAL_MANAGER_PITCHYAW):
self.type_of_msg = mavdefs.MAV_CMD_DO_GIMBAL_MANAGER_PITCHYAW;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
self.Got_Param4 = msg.param4
self.Got_Param5 = msg.param5
self.Got_Param6 = msg.param6
elif (self.RCV_COMMAND == mavdefs.MAV_CMD_DO_TRIGGER_CONTROL):
self.type_of_msg = mavdefs.MAV_CMD_DO_TRIGGER_CONTROL;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
elif (self.RCV_COMMAND == 2004): # MAV_CMD_CAMERA_TRACK_POINT=2004
self.type_of_msg = 2004;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
elif (self.RCV_COMMAND == 2005): # MAV_CMD_CAMERA_TRACK_RECTANGLE=2005
self.type_of_msg = 2005;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
elif (self.RCV_COMMAND == 2010): # MAV_CMD_CAMERA_STOP_TRACKING=2010
self.type_of_msg = 2010;
elif (self.RCV_COMMAND == mavdefs.MAV_CMD_STORAGE_FORMAT):
self.type_of_msg = mavdefs.MAV_CMD_STORAGE_FORMAT;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
else:
self.RPM2 = 0
self.type_of_msg = self.RCV_COMMAND
self.ACK_RESULT = mavutil.mavlink.MAV_RESULT_ACCEPTED
else:
self.ACK_ERROR = self.GOT_ERROR
self.errRCV_COMMAND = msg.command
self.errRPM2 = msg.param1
elif msg.get_type() == 'CAMERA_IMAGE_CAPTURED':
print("Cam Cap message received (system %u component %u)\n" % (the_connection.target_system, the_connection.target_component))
print("lat %u lon %u alt %u\n" % (msg.lat, msg.lon, msg.alt))
print("URL %u)\n" % (msg.file_url))
elif msg.get_type() == 'GPS_RAW_INT':
the_connection.mav.gps_raw_int_send( 1000, 1, 22, 21, 1, 3, 1, 2, 3, 5)
elif msg.get_type() == 'CAMERA_FEEDBACK':
print("Camera Feedback")
the_connection.mav.camera_feedback_send( 1000, 1, 1, 22, 21, 10, 30, 21, 2, 3, 5, 2, 3)
elif msg.get_type() == 'REQUEST_DATA_STREAM':
print("REQUEST DATA STREAM :: start %u id %u req_rte %u\n" % (msg.start_stop, msg.req_stream_id, msg.req_message_rate))
elif msg.get_type() == 'STATUSTEXT':
print("STATUSTEXT :: text %s " % (msg.text))
elif msg.get_type() == 'HEARTBEAT':
print("HEARTBEAT :: src %s type %s auto %s sys %s" % (msg.get_srcSystem(), msg.type,msg.autopilot,msg.system_status))
elif msg.get_type() == 'CAMERA_INFORMATION':
print("CAMERA INFO WAS GOT: url is %s\n" % (msg.cam_definition_uri))
elif msg.get_type() == 'VIDEO_STREAM_INFORMATION':
print("VID INFO :: %s\n" % (msg.uri))
else:
print(f"unsupported command :: {msg.get_type()}")
#time.sleep(0.05)
loop = loop - 1
def mavlink_send_ack_command(self, the_connection, cmd, rpm2, pro, res):
#if self.mavlink10():
the_connection.mav.command_ack_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
cmd, # command
rpm2, # result_param2
pro, # progress
res) # result
def raspberry_pi3_set_relay(self, rNum, rState):
#if self.mavlink10():
if (rNum == 12): # only relay we defined at the top
if (rState == True):
GPIO.output(rNum, GPIO.HIGH)
else:
GPIO.output(rNum, GPIO.LOW)
return True
else:
return False
# ----------- for sony api to camera -----------------------------------
#
# ref :- https://github.com/petabite/libsonyapi
#
# on raspberry pi os you might need this first
# sudo apt-get install python3-pip
# sudo apt-get install python3-setuptools
#
# git clone https://github.com/petabite/libsonyapi.git
# cd libsonyapi
# python setup.py install
#
import numpy as np
from libsonyapi.camera import Camera
from libsonyapi.actions import Actions
# ----------- for image saving -----------------------------------------
from PIL import Image
import queue
lifo_queue = queue.LifoQueue(maxsize=20)
fifo_queue = queue.Queue(maxsize=20)
class pySony():
# connect to the sony camera and print all info
#
def createCameraInstance(self):
camera = Camera()
camera_info = camera.info()
print(camera_info)
print(Camera.do("getAvailableApiList")['result'])
print(camera.name)
print(camera.api_version)
return camera
def startMovieRec(self,camera):
res = camera.do(Actions.startMovieRec)
return res
def stopMovieRec(self,camera):
res = camera.do(Actions.stopMovieRec)
return res
def takeOnePicture(self,camera):
res = camera.do(Actions.actTakePicture) # take a picture
if res.get('result') is not None:
print("Result", res)
def saveOnePicture(self,camera):
res = camera.do(Actions.actTakePicture)
if res.get('result') is not None:
stopLiveView()
photo_path = res['result'][0][0]
photo_name = photo_path.split('/')[-1]
im = Image.open(requests.get(photo_path, stream=True).raw)
im.save('images/' + photo_name)
lifo_queue.put(photo_name)
fifo_queue.put(photo_name)
return photo_path
def getLastPicture(self,camera):
return lifo_queue.get()
def getFirstPicture(self,camera):
return fifo_queue.get()
def listAllPictures(self):
while not fifo.empty():
print(fifo.get(), end=' ')
def startLiveView(self,camera):
res = camera.do(Actions.startLiveview)
print("Liveview", res)
return res
def stopLiveView(self,camera):
res = camera.do(Actions.stopLiveview)
print("Liveview", res)
return res
def startRecMode(self,camera):
res = camera.do(Actions.startRecMode)
print("Rec Mode ", res)
return res
def stopRecMode():
res = camera.do(Actions.stopRecMo)
print("Rec Mode ", res)
return res
def startContShooting(self,camera):
res = camera.do(Actions.startContShooting)
print("Cont Shooting ", res)
return res
def stopContShooting(self,camera):
res = camera.do(Actions.stopContShooting)
print("Cont Shooting ", res)
return res
def startMovieRec(self,camera):
res = camera.do(Actions.startMovieRec)
print("Movie ", res)
return res
def stopMovieRec(self,camera):
res = camera.do(Actions.stopMovieRec)
print("Movie ", res)
return res
def startAudioRec(self,camera):
res = camera.do(Actions.startAudioRec)
print("Audio ", res)
return res
def stopAudioRec(self,camera):
res = camera.do(Actions.stopAudioRec)
print("Audio ", res)
return res
# ----------- micaSense RedEye Camera -----------------------------------
#
#
#
#
# for http request
#
# if you have minimal you might need sudo apt install libpython2.7-stdlib
#pip install requests
import requests
import json
class redEye():
HTTP_SUCCESS_RETURN = 200
# Functions
#
#def print_myJson( self, cap_data ):
# if (cap_data.status_code == self.HTTP_SUCCESS_RETURN):
# print cap_data.json()
# else:
# print "http REST API error"
# Post a message to the camera commanding a capture, block until complete
#
def redEyeCapture( self ):
capture_params = { 'store_capture' : True, 'block' : True }
capture_data = requests.post("http://192.168.10.254/capture", json=capture_params)
#print_myJson( capture_data )
return cap_data.status_code
#
# Send the Camera Request
#
async def sendCameraRequest(fm, cID, sleep=0):
print("sending the camera request 2")
fm.mavlink_get_new_camera_data(cID)
print("sending the camera request 3")
while sleep > 0:
await asyncio.sleep(1)
print(f'{sleep} seconds')
sleep -= 1
#
# Send the Stream Info Request
#
async def sendStreamInfoRequest(fm, cID, sleep=0):
fm.mavlink_get_stream_info(cID)
while sleep > 0:
await asyncio.sleep(1)
print(f'{sleep} seconds')
sleep -= 1
#
# Send the Capture Status Request
#
async def sendCapStatusRequest(fm, cID, sleep=0):
fm.mavlink_get_capture_status(cID)
while sleep > 0:
await asyncio.sleep(1)
print(f'{sleep} seconds')
sleep -= 1
#
# Send the Camera Settings Request
#
async def sendCamSettingsRequest(fm, cID, sleep=0):
fm.mavlink_get_camera_settings(cID)
while sleep > 0:
await asyncio.sleep(1)
print(f'{sleep} seconds')
sleep -= 1
#
# Send the Storage Info Request
#
async def sendStorageInfoRequest(fm, cID, sID=1, sleep=0):
fm.mavlink_get_storage_info(cID, sID)
while sleep > 0:
await asyncio.sleep(1)
print(f'{sleep} seconds')
sleep -= 1
#
# Send the Video Info Request
#
async def sendVideoControlRequest(fm, cID, camID, camTransmission=1, camInterval=2, camRecording=3, sleep=0):
fm.mavlink_do_video_control(cID, camID, camTransmission, camInterval, camRecording)
while sleep > 0:
await asyncio.sleep(1)
print(f'{sleep} seconds')
sleep -= 1
#
# Send the Camera Trig Request
#
async def sendCamTrigRequest(fm, cID, camTriggerCycle=5, camShutterIntegration=4, sleep=0):
fm.mavlink_set_camera_trig_interval(cID, camTriggerCycle, camShutterIntegration)
while sleep > 0:
await asyncio.sleep(1)
print(f'{sleep} seconds')
sleep -= 1
#
# Send the Reset Cam Request
#
async def sendResetCamRequest(fm, cID, sleep=0):
fm.mavlink_reset_camera(cID)
while sleep > 0:
await asyncio.sleep(1)
print(f'{sleep} seconds')
sleep -= 1
#
# Send the video focus
#
async def sendVideoFocusRequest(fm, cID, camFocusType=9, camFocusValue=300, sleep=0):
fm.mavlink_video_set_camera_focus(cID, camFocusType, camFocusValue)
while sleep > 0:
await asyncio.sleep(1)
print(f'{sleep} seconds')
sleep -= 1
#
# Send the digicam configure
#
async def sendDigiCamConfigureRequest(fm, cID, camMode=2, camShutterSpeed=3, camAperture=4, camISO=5, camExposure=6, camCommandIdentity=7, camEngineCutOff=8, sleep=0):
fm.mavlink_do_digicam_configure(cID, camMode, camShutterSpeed, camAperture, camISO, camExposure, camCommandIdentity, camEngineCutOff)
while sleep > 0:
await asyncio.sleep(1)
print(f'{sleep} seconds')
sleep -= 1
#
# Send the digicam control
#
async def sendDigiCamControlRequest(fm, cID, camSessionControl=11, camZoomAbsolute=12, camZoomRelative=13, camFocus=14, camShootCommand=15, camCommandIdentity=16, camShotID=666, sleep=0):
fm.mavlink_do_digicam_control(cID, camSessionControl, camZoomAbsolute, camZoomRelative, camFocus, camShootCommand, camCommandIdentity, camShotID)
while sleep > 0:
await asyncio.sleep(1)
print(f'{sleep} seconds')
sleep -= 1
#
# The continuos reading thread
#
async def readMavlinkIncomingData(fm, cID):
fm.process_messages_from_connection(cID)
#
# The ACK send thread
#
async def sendMavlinkAckData(fm, cID, sleep, cmd, rpm2, pro, res):
fm.mavlink_send_ack_command(cID, cmd, rpm2, pro, res)
while sleep > 0:
await asyncio.sleep(1)
print(f'{sleep} seconds')
sleep -= 1
#
# The handle with ACK an error during collection that might happen during the send, its told to come again later or its wrong
#
async def execptionMavlinkErrorAckData(fm, cID):
while fm.task_control_1 > 0:
await asyncio.sleep(1)
if (fm.ACK_ERROR == fm.GOT_ERROR):
fm.mavlink_send_ack_command(cID, fm.errRCV_COMMAND, fm.errRPM2, 0, mavutil.mavlink.MAV_RESULT_TEMPORARILY_REJECTED)
fm.ACK_ERROR = 0
fm.errRCV_COMMAND = 0
fm.errRPM2 = 0
elif (fm.ACK_ERROR == fm.GOT_BAD):
fm.mavlink_send_ack_command(cID, fm.errRCV_COMMAND, fm.errRPM2, 0, mavutil.mavlink.MAV_RESULT_FAILED)
fm.ACK_ERROR = 0
#
# The MSG send thread
#
async def processMavlinkMessageData(fm, cID, sleep, sonycam=0, caminst=0, redeyecam=0 ):
# define what is sent in param1
myRedEyeCamera = 1
mySonyCamera = 2
mySonyCameraContShoot = 3
if (fm.type_of_msg == 65000):
#
## TODO :: Add the cmera retrieval class cam_data_result = fm.getCameraInfomationFromCam()
#
cam_data_result = fm.GOT_SUCCESS
if (cam_data_result == fm.GOT_SUCCESS):
fm.mavlink_send_camera_information(cID)
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == 65001):
#
## TODO :: Add the cmera retrieval class cam_data_result = fm.getCameraSettingsFromCam()
#
cam_data_result = fm.GOT_SUCCESS
if (cam_data_result == fm.GOT_SUCCESS):
fm.mavlink_send_camera_settings(cID)
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == 65002):
#
## TODO :: Add the cmera retrieval class cam_data_result = fm.getStorageInfomationFromCam()
#
cam_data_result = fm.GOT_SUCCESS
if (cam_data_result == fm.GOT_SUCCESS):
fm.mavlink_send_storage_information(cID)
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
elif (fm.cam_data_result == fm.GOT_UNFORMAT):
fm.mavlink_send_storage_information(cID)
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == 65003):
#
## TODO :: Add the cmera retrieval class cam_data_result = fm.getCameraCaptureStatusFromCam()
#
cam_data_result = fm.GOT_SUCCESS
if (cam_data_result == fm.GOT_SUCCESS):
fm.mavlink_send_camera_capture_status(cID)
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == 65004):
#
## TODO :: Add the cmera retrieval class cam_data_result = fm.getCameraCaptureInformationFromCam(self.Got_Param1)
#
cam_data_result = fm.GOT_SUCCESS
if (cam_data_result == fm.GOT_SUCCESS):
fm.mavlink_send_camera_capture_information(cID)
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == 65005):
#
## TODO :: Add the cmera retrieval class cam_data_result = fm.getVideoStreamInformationFromCam()
#
cam_data_result = fm.GOT_SUCCESS
if (cam_data_result == fm.GOT_SUCCESS):
fm.mavlink_send_video_stream_information(cID)
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_DO_SET_RELAY):
#
## Sets the relay No passed from the mavlink command to the state requested
#
relay_data_result = fm.raspberry_pi3_set_relay(fm.Got_Param1,fm.Got_Param2)
if (relay_data_result == True):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_VIDEO_STOP_CAPTURE):
#
#
## TODO :: add the camera control
#
# if (self.Got_Param1 == mySonyCamera):
# cam_action_result = sonycam.stopMovieRec(caminst)
# if not cam_action_result.find("OK") == -1:
# cam_action_result = fm.GOT_SUCCESS
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_VIDEO_START_CAPTURE):
#
#
## TODO :: add the camera control
#
# if (self.Got_Param1 == mySonyCamera):
# cam_action_result = sonycam.startMovieRec(caminst)
# if not cam_action_result.find("OK") == -1:
# cam_action_result = fm.GOT_SUCCESS
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_IMAGE_START_CAPTURE):
#
#
## TODO :: add the camera control
#
# if (self.Got_Param1 == myRedEyeCamera):
# cam_action_result = redeyecam.redEyeCapture()
# if (cam_action_result == redeyecam.HTTP_SUCCESS_RETURN):
# cam_action_result = fm.GOT_SUCCESS
# elif (self.Got_Param1 == mySonyCamera):
# cam_action_result = sonycam.saveOnePicture(caminst)
# if not cam_action_result.find("OK") == -1:
# cam_action_result = fm.GOT_SUCCESS
# elif (self.Got_Param1 == mySonyCameraContShoot):
# cam_action_result = sonycam.startContShooting(caminst)
# if not cam_action_result.find("OK") == -1:
# cam_action_result = fm.GOT_SUCCESS
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_IMAGE_STOP_CAPTURE):
#
#
## TODO :: add the camera control
#
# if (self.Got_Param1 == mySonyCameraContShoot):
# cam_action_result = sonycam.stopContShooting(caminst)
# if not cam_action_result.find("OK") == -1:
# cam_action_result = fm.GOT_SUCCESS
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_VIDEO_STOP_STREAMING):
#
#
## TODO :: add the camera control
#
# if (self.Got_Param1 == mySonyCam):
# cam_action_result = sonycam.stopLiveView(caminst)
# if not cam_action_result.find("OK") == -1:
# cam_action_result = fm.GOT_SUCCESS
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_VIDEO_START_STREAMING):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
# if (self.Got_Param1 == mySonyCam):
# cam_action_result = sonycam.startLiveView(caminst)
# if not cam_action_result.find("OK") == -1:
# cam_action_result = fm.GOT_SUCCESS
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_SET_CAMERA_MODE):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_SET_CAMERA_ZOOM):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_SET_CAMERA_FOCUS):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONFIGURE):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONTROL):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_DO_CONTROL_VIDEO):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_DO_SET_CAM_TRIGG_INTERVAL):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_RESET_CAMERA_SETTINGS):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_DO_MOUNT_CONTROL_QUAT):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_DO_GIMBAL_MANAGER_PITCHYAW):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_DO_TRIGGER_CONTROL):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == 2004):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == 2005):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == 2010):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
elif (fm.type_of_msg == mavutil.mavlink.MAV_CMD_STORAGE_FORMAT):
#
## Sets the relay No passed from the mavlink command to the state requested
#
## TODO :: add the camera control
#
#
cam_action_result = fm.GOT_SUCCESS
if (cam_action_result == fm.GOT_SUCCESS):
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_IN_PROGRESS
else:
fm.ACK_RESULT = mavutil.mavlink.MAV_RESULT_FAILED
while sleep > 0:
await asyncio.sleep(1)
print(f'{sleep} seconds')
sleep -= 1
fm.task_control_1 = 0
#
# The main thread to run this is the camera receiver client
#
async def main():
frame = MAVFrame()
state = False
while (state == False):
try:
cID,state = frame.makeMAVlinkConn()
except Exception as e:
print("Error Trap :: ", e.__class__, " occurred.")
# wait heartbeat
# look for the camera
#
state = False
xx = 1
while xx == 1:
print("receiving....")
m = cID.recv_match(type="HEARTBEAT", blocking=True, timeout=5)
id = m.get_srcSystem()
print("got a heart..... %s type %s "%(id,m.type))
#if ((( m.autopilot == mavutil.mavlink.MAV_AUTOPILOT_INVALID ) and ( m.type == mavutil.mavlink.MAV_TYPE_CAMERA )) and (id == mavutil.mavlink.MAV_COMP_ID_CAMERA)) :
if (( m.autopilot == mavutil.mavlink.MAV_AUTOPILOT_INVALID ) and ( m.type == mavutil.mavlink.MAV_TYPE_CAMERA )) :
xx = 2
frame.RCV_COMMAND = 0
frame.RPM2 = 0
frame.ACK_RESULT = 0
#read_task_1 = asyncio.create_task(readMavlinkIncomingData(frame, cID))
print("started reader")
while True:
read_task_1 = asyncio.create_task(readMavlinkIncomingData(frame, cID))
#await read_task_1
print(f"Started in main : {time.strftime('%X')}")
snd_task_1 = asyncio.create_task(sendCameraRequest(frame, cID, 1))
print("................................. sending camera request ....................................................................")
await snd_task_1
#snd_task_2 = asyncio.create_task(sendStreamInfoRequest(frame, cID, 1))
#print("................................. sending stream request ....................................................................")
#await snd_task_2
#snd_task_3 = asyncio.create_task(sendCapStatusRequest(frame, cID, 1))
#print("................................. sending cap status request ....................................................................")
#await snd_task_3
#snd_task_4 = asyncio.create_task(sendCamSettingsRequest(frame, cID, 1))
#print("................................. sending cam settings request ....................................................................")
#await snd_task_4
#snd_task_5 = asyncio.create_task(sendStorageInfoRequest(frame, cID, 8, 1))
#print("................................. sending storage info request ....................................................................")
#await snd_task_5
#snd_task_6 = asyncio.create_task(sendVideoControlRequest(frame, cID, 1, 2, 3, 1))
#print("................................. sending video control request ....................................................................")
#await snd_task_6
#snd_task_7 = asyncio.create_task(sendCamTrigRequest(frame, cID, 15, 14, 1))
#print("................................. sending cam trigg request ....................................................................")
#await snd_task_7
#snd_task_8 = asyncio.create_task(sendResetCamRequest(frame, cID, 1))
#print("................................. sending cam reset camera request ....................................................................")
#await snd_task_8
#snd_task_9 = asyncio.create_task(sendVideoFocusRequest(frame, cID, 9, 300, 1))
#print("................................. sending video focus request ....................................................................")
#await snd_task_9
#snd_task_10 = asyncio.create_task(sendDigiCamConfigureRequest(frame, cID, 12, 13, 14, 15, 16, 17, 18, 1))
#print("................................. sending digi cam configure request ....................................................................")
#await snd_task_10
#snd_task_11 = asyncio.create_task(sendDigiCamControlRequest(frame, cID, 211, 212, 213, 214, 215, 216, 666, 1))
#print("................................. sending digi cam control request ....................................................................")
#await snd_task_11
read_task_1.cancel()
print(f"Ended: {time.strftime('%X')}")
if __name__ == '__main__':
# Create our wxPython application and show our frame
# This is the mavlink test reader it should activate the thread upon click
#
# app = wx.App()
# frame = MAVFrame(None)
# frame.Show()
# app.MainLoop()
# This is joystick test application to send over UDP port joystick position
#
# frame = MAVFrame(None)
# frame.joystickInit()
# udpS = frame.initUDPSocket()
# frame.joyMavlinkInit()
# frame.processJoystickSendMavlink(udpS)
# frame.closeUDPSocket(udpS)
#
# This is an example sending app for mavlink camera control messages going out
#
# frame = MAVFrame()
# connID = frame.makeMAVlinkConn()
# print("connect %s"%(connID))
# frame.mavlink_send_GCS_heartbeat(connID)
# frame.mavlink_reset_camera(connID)
# frame.mavlink_video_set_camera_focus(connID, frame.MAV_FOCUS_TYPE_METERS, 1.5)
# frame.mavlink_image_start_capture(connID, 0.5, 2, 1)
# frame.mavlink_image_stop_capture(connID)
# frame.process_messages_from_connection(connID)
##
#
# ===================== Main Multi-Thread send/rcv Task ============================
#
asyncio.run(main())
|
test_distributed_sampling.py
|
import dgl
import unittest
import os
from dgl.data import CitationGraphDataset
from dgl.distributed import sample_neighbors, find_edges
from dgl.distributed import partition_graph, load_partition, load_partition_book
import sys
import multiprocessing as mp
import numpy as np
import backend as F
import time
from utils import get_local_usable_addr
from pathlib import Path
import pytest
from scipy import sparse as spsp
from dgl.distributed import DistGraphServer, DistGraph
def start_server(rank, tmpdir, disable_shared_mem, graph_name):
g = DistGraphServer(rank, "rpc_ip_config.txt", 1, 1,
tmpdir / (graph_name + '.json'), disable_shared_mem=disable_shared_mem)
g.start()
def start_sample_client(rank, tmpdir, disable_shared_mem):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", gpb=gpb)
try:
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
except Exception as e:
print(e)
sampled_graph = None
dgl.distributed.exit_client()
return sampled_graph
def start_find_edges_client(rank, tmpdir, disable_shared_mem, eids):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_find_edges.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_find_edges", gpb=gpb)
try:
u, v = find_edges(dist_graph, eids)
except Exception as e:
print(e)
u, v = None, None
dgl.distributed.exit_client()
return u, v
def check_rpc_sampling(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
print(g.idtype)
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=False)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
def check_rpc_find_edges_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
partition_graph(g, 'test_find_edges', num_parts, tmpdir,
num_hops=1, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_find_edges'))
p.start()
time.sleep(1)
pserver_list.append(p)
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64)
orig_eid = F.zeros((g.number_of_edges(),), dtype=F.int64)
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_find_edges.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']
time.sleep(3)
eids = F.tensor(np.random.randint(g.number_of_edges(), size=100))
u, v = g.find_edges(orig_eid[eids])
du, dv = start_find_edges_client(0, tmpdir, num_server > 1, eids)
du = orig_nid[du]
dv = orig_nid[dv]
assert F.array_equal(u, du)
assert F.array_equal(v, dv)
#@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
#@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skip('Only support partition with shuffle')
def test_rpc_sampling():
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling(Path(tmpdirname), 2)
def check_rpc_sampling_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64)
orig_eid = F.zeros((g.number_of_edges(),), dtype=F.int64)
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']
src, dst = sampled_graph.edges()
src = orig_nid[src]
dst = orig_nid[dst]
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
eids1 = orig_eid[sampled_graph.edata[dgl.EID]]
assert np.array_equal(F.asnumpy(eids1), F.asnumpy(eids))
def create_random_hetero():
num_nodes = {'n1': 1010, 'n2': 1000, 'n3': 1020}
etypes = [('n1', 'r1', 'n2'),
('n1', 'r2', 'n3'),
('n2', 'r3', 'n3')]
edges = {}
for etype in etypes:
src_ntype, _, dst_ntype = etype
arr = spsp.random(num_nodes[src_ntype], num_nodes[dst_ntype], density=0.001, format='coo',
random_state=100)
edges[etype] = (arr.row, arr.col)
g = dgl.heterograph(edges, num_nodes)
g.nodes['n1'].data['feat'] = F.ones((g.number_of_nodes('n1'), 10), F.float32, F.cpu())
return g
def start_hetero_sample_client(rank, tmpdir, disable_shared_mem):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", gpb=gpb)
assert 'feat' in dist_graph.nodes['n1'].data
assert 'feat' not in dist_graph.nodes['n2'].data
assert 'feat' not in dist_graph.nodes['n3'].data
if gpb is None:
gpb = dist_graph.get_partition_book()
try:
nodes = {'n3': [0, 10, 99, 66, 124, 208]}
sampled_graph = sample_neighbors(dist_graph, nodes, 3)
nodes = gpb.map_to_homo_nid(nodes['n3'], 'n3')
block = dgl.to_block(sampled_graph, nodes)
block.edata[dgl.EID] = sampled_graph.edata[dgl.EID]
except Exception as e:
print(e)
block = None
dgl.distributed.exit_client()
return block, gpb
def check_rpc_hetero_sampling_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = create_random_hetero()
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
block, gpb = start_hetero_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
orig_nid_map = F.zeros((g.number_of_nodes(),), dtype=F.int64)
orig_eid_map = F.zeros((g.number_of_edges(),), dtype=F.int64)
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
F.scatter_row_inplace(orig_nid_map, part.ndata[dgl.NID], part.ndata['orig_id'])
F.scatter_row_inplace(orig_eid_map, part.edata[dgl.EID], part.edata['orig_id'])
src, dst = block.edges()
# These are global Ids after shuffling.
shuffled_src = F.gather_row(block.srcdata[dgl.NID], src)
shuffled_dst = F.gather_row(block.dstdata[dgl.NID], dst)
shuffled_eid = block.edata[dgl.EID]
# Get node/edge types.
etype, _ = gpb.map_to_per_etype(shuffled_eid)
src_type, _ = gpb.map_to_per_ntype(shuffled_src)
dst_type, _ = gpb.map_to_per_ntype(shuffled_dst)
etype = F.asnumpy(etype)
src_type = F.asnumpy(src_type)
dst_type = F.asnumpy(dst_type)
# These are global Ids in the original graph.
orig_src = F.asnumpy(F.gather_row(orig_nid_map, shuffled_src))
orig_dst = F.asnumpy(F.gather_row(orig_nid_map, shuffled_dst))
orig_eid = F.asnumpy(F.gather_row(orig_eid_map, shuffled_eid))
etype_map = {g.get_etype_id(etype):etype for etype in g.etypes}
etype_to_eptype = {g.get_etype_id(etype):(src_ntype, dst_ntype) for src_ntype, etype, dst_ntype in g.canonical_etypes}
for e in np.unique(etype):
src_t = src_type[etype == e]
dst_t = dst_type[etype == e]
assert np.all(src_t == src_t[0])
assert np.all(dst_t == dst_t[0])
# Check the node Ids and edge Ids.
orig_src1, orig_dst1 = g.find_edges(orig_eid[etype == e], etype=etype_map[e])
assert np.all(F.asnumpy(orig_src1) == orig_src[etype == e])
assert np.all(F.asnumpy(orig_dst1) == orig_dst[etype == e])
# Check the node types.
src_ntype, dst_ntype = etype_to_eptype[e]
assert np.all(src_t == g.get_ntype_id(src_ntype))
assert np.all(dst_t == g.get_ntype_id(dst_ntype))
# Wait non shared memory graph store
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@pytest.mark.parametrize("num_server", [1, 2])
def test_rpc_sampling_shuffle(num_server):
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling_shuffle(Path(tmpdirname), num_server)
check_rpc_hetero_sampling_shuffle(Path(tmpdirname), num_server)
def check_standalone_sampling(tmpdir, reshuffle):
g = CitationGraphDataset("cora")[0]
num_parts = 1
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=reshuffle)
os.environ['DGL_DIST_MODE'] = 'standalone'
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", part_config=tmpdir / 'test_sampling.json')
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
dgl.distributed.exit_client()
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_standalone_sampling():
import tempfile
os.environ['DGL_DIST_MODE'] = 'standalone'
with tempfile.TemporaryDirectory() as tmpdirname:
check_standalone_sampling(Path(tmpdirname), False)
check_standalone_sampling(Path(tmpdirname), True)
def start_in_subgraph_client(rank, tmpdir, disable_shared_mem, nodes):
gpb = None
dgl.distributed.initialize("rpc_ip_config.txt")
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_in_subgraph.json', rank)
dist_graph = DistGraph("test_in_subgraph", gpb=gpb)
try:
sampled_graph = dgl.distributed.in_subgraph(dist_graph, nodes)
except Exception as e:
print(e)
sampled_graph = None
dgl.distributed.exit_client()
return sampled_graph
def check_rpc_in_subgraph_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
partition_graph(g, 'test_in_subgraph', num_parts, tmpdir,
num_hops=1, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_in_subgraph'))
p.start()
time.sleep(1)
pserver_list.append(p)
nodes = [0, 10, 99, 66, 1024, 2008]
time.sleep(3)
sampled_graph = start_in_subgraph_client(0, tmpdir, num_server > 1, nodes)
for p in pserver_list:
p.join()
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64)
orig_eid = F.zeros((g.number_of_edges(),), dtype=F.int64)
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_in_subgraph.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']
src, dst = sampled_graph.edges()
src = orig_nid[src]
dst = orig_nid[dst]
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
subg1 = dgl.in_subgraph(g, orig_nid[nodes])
src1, dst1 = subg1.edges()
assert np.all(np.sort(F.asnumpy(src)) == np.sort(F.asnumpy(src1)))
assert np.all(np.sort(F.asnumpy(dst)) == np.sort(F.asnumpy(dst1)))
eids = g.edge_ids(src, dst)
eids1 = orig_eid[sampled_graph.edata[dgl.EID]]
assert np.array_equal(F.asnumpy(eids1), F.asnumpy(eids))
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_rpc_in_subgraph():
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_in_subgraph_shuffle(Path(tmpdirname), 2)
if __name__ == "__main__":
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_sampling(Path(tmpdirname), True)
check_standalone_sampling(Path(tmpdirname), False)
os.environ['DGL_DIST_MODE'] = 'distributed'
check_rpc_sampling(Path(tmpdirname), 2)
check_rpc_sampling(Path(tmpdirname), 1)
check_rpc_find_edges_shuffle(Path(tmpdirname), 2)
check_rpc_find_edges_shuffle(Path(tmpdirname), 1)
check_rpc_in_subgraph_shuffle(Path(tmpdirname), 2)
check_rpc_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_hetero_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_sampling_shuffle(Path(tmpdirname), 2)
|
my_module.py
|
import os
import rospy
import rospkg
import threading
from qt_gui.plugin import Plugin
from python_qt_binding import loadUi
from python_qt_binding import QtWidgets
from std_msgs.msg import Bool
class MyPlugin(Plugin):
def __init__(self, context):
super(MyPlugin, self).__init__(context)
# Give QObjects reasonable names
self.setObjectName('MyPlugin')
# Process standalone plugin command-line arguments
from argparse import ArgumentParser
parser = ArgumentParser()
# Add argument(s) to the parser.
parser.add_argument("-q", "--quiet", action="store_true",
dest="quiet",
help="Put plugin in silent mode")
args, unknowns = parser.parse_known_args(context.argv())
if not args.quiet:
print 'arguments: ', args
print 'unknowns: ', unknowns
# Create QWidget
self._widget = QtWidgets.QWidget()
# Get path to UI file which should be in the "resource" folder of this package
ui_file = os.path.join(rospkg.RosPack().get_path('robot_rqt_plugin'), 'resource', 'MyPlugin.ui')
# Extend the widget with all attributes and children from UI file
loadUi(ui_file, self._widget)
# Give QObjects reasonable names
self._widget.setObjectName('MyPluginUi')
# Show _widget.windowTitle on left-top of each plugin (when
# it's set in _widget). This is useful when you open multiple
# plugins at once. Also if you open multiple instances of your
# plugin at once, these lines add number to make it easy to
# tell from pane to pane.
if context.serial_number() > 1:
self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number()))
# Add widget to the user interface
context.add_widget(self._widget)
# Sets up the functions to be called on clicking on buttons
self._widget.hard_stop.clicked[bool].connect(self._hardstop)
self._widget.soft_stop.clicked[bool].connect(self._softstop)
self._widget.control.clicked[bool].connect(self._switchmode)
# Sets up the variables and the text label
self.mode_val = False
self.soft_stop = False
self.decide_text()
# Sets the topics and the publishing rate
self.control_publisher = rospy.Publisher("control_mode", Bool, queue_size=0)
self.stop_publisher = rospy.Publisher("soft_stop", Bool, queue_size=0)
self.rate = rospy.Rate(10)
# Starts a thread to run in parallel publishing messages
threading.Thread(target=self.publish_manually).start()
def _hardstop(self):
# Kills all the running nodes
os.system("rosnode kill -a")
# Updates the text label
self._widget.hard.setText('All nodes killed')
def _softstop(self):
# Changes the value of the soft_stop variable
# False means not stopped and True means stopped
self.soft_stop = not self.soft_stop
# Updates the text labels
self.decide_text()
def _switchmode(self):
# Changes the value of the mode_val variable
# False means automatic and True means manual
self.mode_val = not self.mode_val
# Updates the text labels
self.decide_text()
def publish_manually(self):
# Runs till killed
while not rospy.is_shutdown():
# Publishes the value of the variables to the topics
self.stop_publisher.publish(Bool(self.soft_stop))
self.control_publisher.publish(Bool(self.mode_val))
# Publishes at the set rate
self.rate.sleep()
def decide_text(self):
# Sets the value for the text labels
self._widget.mode.setText(['Manual' if self.mode_val else 'Automatic'][0])
self._widget.soft.setText(['Enabled' if self.soft_stop else 'Disabled'][0])
# def shutdown_plugin(self):
# # unregister all publishers here
# pass
#
# def save_settings(self, plugin_settings, instance_settings):
# # save intrinsic configuration, usually using:
# # instance_settings.set_value(k, v)
# pass
#
# def restore_settings(self, plugin_settings, instance_settings):
# # restore intrinsic configuration, usually using:
# # v = instance_settings.value(k)
# pass
|
main3.py
|
import tensorflow as tf
from gym import wrappers
import make_env
import numpy as np
import random
from ReplayMemory import ReplayMemory
from ExplorationNoise import OrnsteinUhlenbeckActionNoise as OUNoise
from actorcriticv2 import ActorNetwork,CriticNetwork
#from actorcriticv1 import Brain, Worker
from Train import train
# from Distributed_Train import *
import argparse
from keras.models import load_model
import os
import threading, queue, time
class Brain(object):
def __init__(self, modelFolder):
self.actors = None
self.critics = None
self.ave_n = None
self.env_n = None
self.modelFolder = modelFolder
def update(self):
global global_step, global_step_max
while not coord.should_stop():
if global_step < global_step_max:
update_event.wait()
# print("Brain working!")
#global global_queue
s_batch, a_batch, r_batch, d_batch, s2_batch = [], [], [], [], []
for i in range(global_queue.qsize()):
data = global_queue.get()
s_batch.append(data[0])
a_batch.append(data[1])
r_batch.append(data[2])
d_batch.append(data[3])
s2_batch.append(data[4])
s_batch = np.array(s_batch)
a_batch = np.array(a_batch)
r_batch = np.array(r_batch)
d_batch = np.array(d_batch)
s2_batch = np.array(s2_batch)
# print("batch size:", s_batch.shape, s2_batch.shape)
action_dims_done = 0
for i in range(self.ave_n):
actor = self.actors[i]
critic = self.critics[i]
if True:
a = []
for j in range(self.ave_n):
state_batch_j = np.asarray([x for x in s_batch[:,j]]) #batch processing will be much more efficient even though reshaping will have to be done
a.append(self.actors[j].predict_target(state_batch_j))
a_temp = np.transpose(np.asarray(a),(1,0,2))
a_for_critic = np.asarray([x.flatten() for x in a_temp])
s2_batch_i = np.asarray([x for x in s2_batch[:,i]]) # Checked till this point, should be fine.
targetQ = critic.predict_target(s2_batch_i,a_for_critic) # Should work, probably
yi = []
for k in range(int(args['minibatch_size'])):
if d_batch[:,i][k]:
yi.append(r_batch[:,i][k])
else:
yi.append(r_batch[:,i][k] + critic.gamma*targetQ[k])
s_batch_i = np.asarray([x for x in s_batch[:,i]])
critic.train(s_batch_i,np.asarray([x.flatten() for x in a_batch[:, 0: self.ave_n, :]]),np.asarray(yi))
actions_pred = []
for j in range(self.ave_n):
state_batch_j = np.asarray([x for x in s2_batch[:,j]])
actions_pred.append(self.actors[j].predict(state_batch_j)) # Should work till here, roughly, probably
a_temp = np.transpose(np.asarray(actions_pred),(1,0,2))
a_for_critic_pred = np.asarray([x.flatten() for x in a_temp])
s_batch_i = np.asarray([x for x in s_batch[:,i]])
grads = critic.action_gradients(s_batch_i,a_for_critic_pred)[:,action_dims_done:action_dims_done + actor.action_dim]
actor.train(s_batch_i,grads)
actor.update_target()
critic.update_target()
action_dims_done = action_dims_done + actor.action_dim
# Only DDPG agent
for i in range(self.ave_n, self.env_n):
actor = self.actors[i]
critic = self.critics[i]
if True:
s_batch_i = np.asarray([x for x in s_batch[:,i]])
action = np.asarray(actor.predict_target(s_batch_i))
action_for_critic = np.asarray([x.flatten() for x in action])
s2_batch_i = np.asarray([x for x in s2_batch[:, i]])
targetQ = critic.predict_target(s2_batch_i, action_for_critic)
y_i = []
for k in range(int(args['minibatch_size'])):
# If ep is end
if d_batch[:, i][k]:
y_i.append(r_batch[:, i][k])
else:
y_i.append(r_batch[:, i][k] + critic.gamma * targetQ[k])
s_batch_i= np.asarray([x for x in s_batch[:, i]])
critic.train(s_batch_i, np.asarray([x.flatten() for x in a_batch[:, i]]), np.asarray(y_i))
action_for_critic_pred = actor.predict(s2_batch_i)
gradients = critic.action_gradients(s_batch_i, action_for_critic_pred)[:, :]
actor.train(s_batch_i, gradients)
actor.update_target()
critic.update_target()
global_step += 1
if global_step % (100*50) == 0 and global_step != 0:
directory = self.modelFolder + "ep" + str(global_step) + "/"
if not os.path.exists(directory):
os.makedirs(directory)
print("Starting saving model weights to folder every 200 episodes")
for i in range(self.env_n):
# saveModel(actors[i], i, args["modelFolder"])
saveWeights(self.actors[i], i, directory)
print("Model weights saved to folder")
update_event.clear() # updating finished
rolling_event.set()
###########################
##### WORKER ########
###########################
class Worker(object):
# init
def __init__(self, wid, n, max_episode_len, batch_size, seed, noise):
self.wid = wid
self.env = make_env.make_env("simple_tag")
print("Initiate worker ", wid)
self.env.seed(int(seed))
self.brain = brain
self.agent_num = n
self.max_episode_len = max_episode_len
self.batch_size = batch_size
self.noise = noise
def work(self):
global global_step_max, global_step
while not coord.should_stop():
s = self.env.reset()
episode_reward = np.zeros((self.agent_num,))
start = time.time()
# print("env", s[0])
for stp in range(200):
if not rolling_event.is_set():
rolling_event.wait()
# self.env.render()
actions = []
global graph
with graph.as_default():
# print("s0:", s[0])
for i in range(self.agent_num):
# print("Taking actions")
actor = self.brain.actors[i]
# print("wid:", self.wid, " actor!", i)
state_input = np.reshape(s[i],(-1,actor.state_dim))
# print(state_input)
actions.append(actor.act(state_input, self.noise[i]()).reshape(actor.action_dim,))
s2, r, done, _ = self.env.step(actions)
episode_reward += r
if global_queue.qsize() < self.batch_size:
global_queue.put([s, actions, r, done, s2])
# global_step += 1
s = s2
episode_reward += r
if stp == self.max_episode_len - 1:
if self.wid == 0:
showAveReward(self.wid, episode_reward, self.agent_num, stp, start)
break
if global_queue.qsize() > self.batch_size - 1:
rolling_event.clear()
update_event.set()
if global_step >= global_step_max:
coord.request_stop()
break
def build_summaries(n):
losses = [tf.Variable(0.) for i in range(n)]
for i in range(n):
tf.summary.scalar("Loss_Agent" + str(i), losses[i])
summary_vars = losses
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
def getFromQueue():
s_batch, a_batch, r_batch, d_batch, s2_batch = [], [], [], [], []
for i in range(global_queue.qsize()):
data = global_queue.get()
s_batch.append(data[0])
a_batch.append(data[1])
r_batch.append(data[2])
d_batch.append(data[3])
s2_batch.append(data[4])
return s_batch, a_batch, r_batch, d_batch, s2_batch
class Controller(object):
def __init__(self):
self.update_event = update_event
self.rolling_event = rolling_event
self.update_event.clear()
self.rolling_event.set()
self.coord = tf.train.Coordinator()
def distributed_train(sess, env, args, actors, critics, noise, ave_n):
# callbacks = []
# train_names = ['train_loss', 'train_mae']
# callback = TensorBoard(args['summary_dir'])
for actor in actors:
actor.update_target()
for critic in critics:
critic.update_target()
worker_num = 4
global update_event, rolling_event
update_event.clear()
rolling_event.set()
brain.actors = actors
brain.critics = critics
brain.ave_n = ave_n
brain.env_n = env.n
workers = [Worker(i, env.n, 200, 64, 1234+i, noise) for i in range(worker_num)]
global_queue = queue.Queue()
threads = []
for worker in workers:
t = threading.Thread(target=worker.work, args=())
threads.append(t)
threads.append(threading.Thread(target=brain.update, args=()))
for t in threads:
t.start()
#time.sleep(0.2)
# print("before worker")
coord.join(threads)
def saveModel(actor, i, pathToSave):
actor.mainModel.save(pathToSave + str(i) + ".h5")
def saveWeights(actor, i, pathToSave):
actor.mainModel.save_weights(pathToSave + str(i) + "_weights.h5")
def showReward(episode_reward, n, ep, start):
reward_string = ""
for re in episode_reward:
reward_string += " {:5.2f} ".format(re)
print ('|Episode: {:4d} | Time: {:2d} | Rewards: {:s}'.format(ep, int(time.time() - start), reward_string))
def showAveReward(wid, episode_reward, n, ep, start):
reward_string = ""
for re in episode_reward:
reward_string += " {:5.2f} ".format(re / ep)
global global_step
print ('Global step: {:6.0f} | Worker: {:d} | Rewards: {:s}'.format(global_step, wid, reward_string))
def write_log(callback, names, logs, batch_no):
for name, value in zip(names, logs):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
callback.writer.add_summary(summary, batch_no)
callback.writer.flush()
def main(args):
if not os.path.exists(args["modelFolder"]):
os.makedirs(args["modelFolder"])
if not os.path.exists(args["summary_dir"]):
os.makedirs(args["summary_dir"])
#with tf.device("/gpu:0"):
# MADDPG for Ave Agent
# DDPG for Good Agent
with tf.Session() as sess:
env = make_env.make_env('simple_tag')
np.random.seed(int(args['random_seed']))
tf.set_random_seed(int(args['random_seed']))
env.seed(int(args['random_seed']))
#with tf.device('/cpu:0'):
#if args["runTest"]:
#run()
#import sys
#sys.exit("test over!")
# Calculate good and ave agents number
ave_n = 0
good_n = 0
for i in env.agents:
if i.adversary:
ave_n += 1
else:
good_n += 1
print("adversary ", ave_n, "target ", good_n)
# print("ave_n", ave_n)
n = env.n
actors = []
critics = []
brains = []
exploration_noise = []
observation_dim = []
action_dim = []
total_action_dim = 0
# Aversary Agents action spaces
for i in range(ave_n):
total_action_dim = total_action_dim + env.action_space[i].n
print("total_action_dim", total_action_dim)
for i in range(n):
observation_dim.append(env.observation_space[i].shape[0])
action_dim.append(env.action_space[i].n) # assuming discrete action space here -> otherwise change to something like env.action_space[i].shape[0]
actors.append(ActorNetwork(sess,observation_dim[i],action_dim[i],float(args['actor_lr']),float(args['tau'])))
# critics.append(CriticNetwork(sess,n,observation_dim[i],total_action_dim,float(args['critic_lr']),float(args['tau']),float(args['gamma'])))
if i < ave_n:
#MADDPG - centralized Critic
critics.append(CriticNetwork(sess,n,observation_dim[i],total_action_dim,float(args['critic_lr']),float(args['tau']),float(args['gamma'])))
else:
# DDPG
critics.append(CriticNetwork(sess,n,observation_dim[i],action_dim[i],float(args['critic_lr']),float(args['tau']),float(args['gamma'])))
exploration_noise.append(OUNoise(mu = np.zeros(action_dim[i])))
"""
print("Test predict")
s = env.reset()
# print(s[0])
actions = []
for index in range(len(actors)):
state_input = np.reshape(s[index],(-1,actors[index].state_dim))
actions.append(actors[index].predict(state_input))
actors[index].predict_target(state_input)
actions1 = actions[:ave_n]
actions2 = actions[ave_n:]
a_temp1 = np.transpose(np.asarray(actions1),(1,0,2))
a_for_critic1 = np.asarray([x.flatten() for x in a_temp1])
a_temp2 = np.transpose(np.asarray(actions2),(1,0,2))
a_for_critic2 = np.asarray([x.flatten() for x in a_temp2])
for index in range(len(critics)):
state_input = np.reshape(s[index],(-1,actors[index].state_dim))
if index < ave_n:
critics[index].predict_target(state_input, a_for_critic1)
#critics[index].predict(state_input, a_for_critic1)
else:
critics[index].predict_target(state_input, a_for_critic2)
#critics[index].predict(state_input, a_for_critic2)
"""
# if args['use_gym_monitor']:
# if not args['render_env']:
# envMonitor = wrappers.Monitor(env, args['monitor_dir'], video_callable=False, force=True)
# else:
# envMonitor = wrappers.Monitor(env, args['monitor_dir'], force=True)
# n brains
if False:
for i in range(n):
observation_dim.append(env.observation_space[i].shape[0])
action_dim.append(env.action_space[i].n)
brains.apppen(Brain(sess, observation_dim[i], action_dim[i], float(args['actor_lr']), float(args['tau']), \
observation_dim[i], total_action_dim, float(args['critic_lr']), float(args['tau']),float(args['gamma'])))
exploration_noise.append(OUNoise(mu = np.zeros(action_dim[i])))
# learn()
if args["runTest"]:
# , force=True
# env = wrappers.Monitor(env, args["monitor_dir"], force=True)
for i in range(n):
# load model
actors[i].mainModel.load_weights(args["modelFolder"]+str(i)+'_weights'+'.h5')
# episode 4754
import time
# time.sleep(3)
for ep in range(10):
s = env.reset()
reward = 0.0
for step in range(200):
time.sleep(0.01)
env.render()
actions = []
for i in range(env.n):
state_input = np.reshape(s[i],(-1,env.observation_space[i].shape[0]))
noise = OUNoise(mu = np.zeros(5))
# predict_action = actors[i].predict(state_input) #+ exploration_noise[i]()
# actions.append(predict_action.reshape(env.action_space[i].n,))
# +noise()
actions.append((actors[i].predict(np.reshape(s[i],(-1, actors[i].mainModel.input_shape[1])))).reshape(actors[i].mainModel.output_shape[1],))
#print("{}".format(actions))
s, r, d, s2 = env.step(actions)
for i in range(env.n):
reward += r[i]
if np.all(d):
break
print("Episode: {:d} | Reward: {:f}".format(ep, reward))
env.close()
import sys
sys.exit("test over!")
if False:
import time
# , force=True
# env = wrappers.Monitor(env, args["monitor_dir"], force=True)
for ep in range(10):
# load model
s = env.reset()
for j in range(env.n):
actors[j].mainModel.load_weights(args["modelFolder"]+ str(j) +'_weights'+'.h5')
for step in range(300):
reward = 0.0
# time.sleep(0.05)
env.render()
actions = []
for i in range(env.n):
state_input = np.reshape(s[i],(-1,env.observation_space[i].shape[0]))
noise = OUNoise(mu = np.zeros(5))
# predict_action = actors[i].predict(state_input) #+ exploration_noise[i]()
# actions.append(predict_action.reshape(env.action_space[i].n,))
# +noise()
actions.append((actors[i].predict(np.reshape(s[i],(-1, actors[i].mainModel.input_shape[1])))).reshape(actors[i].mainModel.output_shape[1],))
s, r, d, s2 = env.step(actions)
for i in range(env.n):
reward += r[i]
if np.all(d):
break
print("Episode: {:d} | Reward: {:f}".format(ep, reward))
else:
if True:
train(sess,env,args,actors,critics,exploration_noise, ave_n)
else:
global graph, global_queue, update_event, rolling_event, global_step_max, global_step, coord, brain
graph = tf.get_default_graph()
global_queue = queue.Queue()
update_event, rolling_event = threading.Event(), threading.Event()
global_step_max, global_step = 200*1000, 0
coord = tf.train.Coordinator()
brain = Brain(args["modelFolder"])
distributed_train(sess, env, args, actors, critics, exploration_noise, ave_n)
#if args['use_gym_monitor']:
# envMonitor.monitor.close()
# Training stop
def run():
env = make_env.make_env('simple_tag')
n = env.n
exploration_noise = []
actors = []
for i in range(n):
# load model
actors.append(load_model(args["modelFolder"] + str(i) + ".h5"))
exploration_noise.append(OUNoise(mu = np.zeros(env.action_space[i].n)))
# test for 100 episode
noise = OUNoise(mu = np.zeros(5))
import time
for ep in range(50):
s = env.reset()
#if ep == 0:
#print([i.state.p_pos for i in env.world.borders])
reward = 0.0
for step in range(100):
# time.sleep(0.05)
env.render()
actions = []
for i in range(env.n):
state_input = np.reshape(s[i],(-1,env.observation_space[i].shape[0]))
predict_action = actors[i].predict(state_input) #+ noise()
actions.append(predict_action.reshape(env.action_space[i].n,))
s, r, d, s2 = env.step(actions)
for i in range(env.n):
reward += r[i]
if np.all(d):
break
print("Episode: {:5.2f} | Reward: {:f}".format(ep, reward))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='provide arguments for DDPG agent')
# agent parameters
parser.add_argument('--actor-lr', help='actor network learning rate', default=0.0001)
parser.add_argument('--critic-lr', help='critic network learning rate', default=0.001)
parser.add_argument('--gamma', help='discount factor for critic updates', default=0.99)
parser.add_argument('--tau', help='soft target update parameter', default=0.01)
parser.add_argument('--buffer-size', help='max size of the replay buffer', default=1000000)
parser.add_argument('--minibatch-size', help='size of minibatch for minibatch-SGD', default=64)
# run parameters
#parser.add_argument('--env', help='choose the gym env- tested on {Pendulum-v0}', default='MountainCarContinuous-v0')
parser.add_argument('--random-seed', help='random seed for repeatability', default=1234)
parser.add_argument('--max-episodes', help='max num of episodes to do while training', default=10000)
parser.add_argument('--max-episode-len', help='max length of 1 episode', default=200)
parser.add_argument('--render-env', help='render the gym env', action='store_true')
parser.add_argument('--use-gym-monitor', help='record gym results', action='store_true')
parser.add_argument('--monitor-dir', help='directory for storing gym results', default='./results/videos/video1')
parser.add_argument('--summary-dir', help='directory for storing tensorboard info', default='./results/3vs1_hard/tfdata/')
parser.add_argument('--modelFolder', help='the folder which saved model data', default="./results/3vs1_hard/weights/")
parser.add_argument('--runTest', help='use saved model to run', default=True)
parser.set_defaults(render_env=False)
parser.set_defaults(use_gym_monitor=False)
args = vars(parser.parse_args())
#pp.pprint(args)
## Distributed
main(args)
|
main.queue.py
|
from time import sleep
import threading
from storage_mysqldb import connect_db, create_table, insert_db
from input_thread import getPatientInfo, readSensorData
import alert_system
import output
import queue
def getAllInfo():
while True:
sleep(2)
# get data
PatientInfo = getPatientInfo()
SensorData = readSensorData(PatientInfo)
# putting data into queues.
patientInfoQueue.put(PatientInfo)
sensorDataQueue.put(SensorData)
alertQueue.put(SensorData)
def manageDatabase():
while True:
PatientInfo = patientInfoQueue.get()
SensorData = sensorDataQueue.get()
# insert data from the queues to database.
insert_db(db, PatientInfo, SensorData)
print("Insert data successful")
def alert():
while True:
SensorData = alertQueue.get()
# output alert messages
alert_json = alert_system.alertCheck(SensorData)
patient1 = output.patient()
patient1.recieveFromAlert(alert_json)
patient1.send_alert_to_UI()
db = connect_db()
create_table(db) # only use this for the first time
patientInfoQueue = queue.Queue()
sensorDataQueue = queue.Queue()
alertQueue = queue.Queue()
threadGetInfo = threading.Thread(target=getAllInfo)
threadManageDatabase = threading.Thread(target=manageDatabase)
threadAlert = threading.Thread(target=alert)
threadGetInfo.start()
threadManageDatabase.start()
threadAlert.start()
|
load_test_client.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A loadtest script which sends request via GRPC to TF inference server."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import io
import json
import threading
import time
import grpc
import numpy as np
from PIL import Image
import queue as Queue
from ratelimiter import RateLimiter
import requests
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
tf.app.flags.DEFINE_integer('num_requests', 20, 'Total # of requests sent.')
tf.app.flags.DEFINE_integer('qps', 4, 'Desired client side request QPS')
tf.app.flags.DEFINE_float('request_timeout', 300.0,
'Timeout for inference request.')
tf.app.flags.DEFINE_string('model_name', '',
'Name of the model being served on the ModelServer')
tf.app.flags.DEFINE_string(
'tpu', '',
'Inference server ip address and port (grpc://<tpu_ip_address>:8470) or'
'the name of the Cloud TPU for Cluster Resolvers. If it is a tpu name, it'
'will be resolved to ip address and port. Otherwise, the provided (proxy)'
'ip address and port will be directly used.')
tf.app.flags.DEFINE_boolean('grpc', True, 'True if gRPC, False if REST.')
tf.app.flags.DEFINE_string('image_path', '', 'The path of local image.')
tf.app.flags.DEFINE_string('input_name', 'input',
'The name of the model input tensor.')
tf.app.flags.DEFINE_string('image_format', 'jpeg',
'The image format for generated image (png, jpeg)')
tf.app.flags.DEFINE_integer('batch_size', 8, 'Per request batch size.')
tf.app.flags.DEFINE_integer('image_size', 224,
'Height and width of the image (square image).')
tf.app.flags.DEFINE_integer('channels', 3, 'Load image number of channels.')
tf.app.flags.DEFINE_string(
'api_key', '',
'API Key for ESP service if authenticating external requests.')
FLAGS = tf.app.flags.FLAGS
class Worker(object):
"""A loadtest worker which sends RPC request."""
__slot__ = ('_id', '_request', '_stub', '_queue', '_success', '_start_time',
'_end_time', '_qps', '_num_requests', '_metadata')
def __init__(self, index, request, stub, queue, qps, num_requests, metadata):
self._id = index
self._request = request
self._stub = stub
self._queue = queue
self._qps = qps
self._num_requests = num_requests
self._success = None
self._start_time = None
self._end_time = None
self._metadata = metadata
def start(self):
"""Start to send request."""
def _callback(resp_future):
"""Callback for aynchronous inference request sent."""
exception = resp_future.exception()
if exception:
self._success = False
tf.logging.error(exception)
else:
self._success = True
self._end_time = time.time()
self._queue.get()
self._queue.task_done()
processed_count = self._num_requests - self._queue.qsize()
if processed_count % self._qps == 0:
tf.logging.info('received {} responses'.format(processed_count))
def _send_rpc():
self._start_time = time.time()
resp_future = self._stub.Predict.future(self._request,
FLAGS.request_timeout,
metadata=self._metadata)
resp_future.add_done_callback(_callback)
_send_rpc()
def cancel(self):
self._rpc.StartCancel()
@property
def success_count(self):
return int(self._success)
@property
def error_count(self):
return int(not self._success)
@property
def latency(self):
if not (self._start_time and self._end_time):
raise Exception('Request is not complete yet.')
return self._end_time - self._start_time
def run_grpc_load_test(num_requests, qps, request, stub):
"""Loadtest the server gRPC endpoint with constant QPS.
Args:
num_requests: The total number of requests.
qps: The number of requests being sent per second.
request: The PredictRequest proto.
stub: The model server stub to which send inference requests.
"""
metadata = []
if FLAGS.api_key:
metadata.append(('x-api-key', FLAGS.api_key))
rate_limiter = RateLimiter(max_calls=qps, period=1)
q = Queue.Queue()
for i in range(num_requests):
q.put(i)
workers = []
start = time.time()
for i in range(num_requests):
worker = Worker(i, request, stub, q, qps, num_requests, metadata)
workers.append(worker)
if i % qps == 0:
tf.logging.info('sent {} requests.'.format(i))
with rate_limiter:
worker.start()
# block until all workers are done
q.join()
acc_time = time.time() - start
success_count = 0
error_count = 0
latency = []
for w in workers:
success_count += w.success_count
error_count += w.error_count
latency.append(w.latency)
tf.logging.info('num_qps:{} requests/second: {} #success:{} #error:{} '
'latencies: [p50:{:.5f} p90:{:.5f} p99:{:.5f}]'.format(
qps, num_requests / acc_time, success_count, error_count,
np.percentile(latency, 50), np.percentile(latency, 90),
np.percentile(latency, 99)))
def generate_image():
array = np.uint8(
np.random.rand(FLAGS.image_size, FLAGS.image_size, FLAGS.channels) * 255)
pil_image = Image.fromarray(array)
image_io = io.BytesIO()
pil_image.save(image_io, format=FLAGS.image_format)
return image_io.getvalue()
def get_image_payload():
image = None
if FLAGS.image_path:
tf.logging.info('Building request with image: {}'.format(FLAGS.image_path))
image = open(FLAGS.image_path, 'rb').read()
else:
tf.logging.info('Generating fake image with shape=[{},{},{}]'.format(
FLAGS.image_size, FLAGS.image_size, FLAGS.channels))
image = generate_image()
return image
def generate_grpc_request():
"""Generate gRPC inference request with payload."""
request = predict_pb2.PredictRequest()
request.model_spec.name = FLAGS.model_name
request.model_spec.signature_name = 'serving_default'
image = get_image_payload()
request.inputs[FLAGS.input_name].CopyFrom(
tf.contrib.util.make_tensor_proto(
[image] * FLAGS.batch_size, shape=[FLAGS.batch_size]))
return request
def run_rest_load_test(num_requests, qps, server_ip, payload):
"""Run inference load test against REST endpoint."""
address = 'http://{}/v1/models/{}:predict'.format(server_ip, FLAGS.model_name)
# List appends are thread safe
success = []
error = []
latency = []
def _make_rest_call():
"""Send REST POST request to Tensorflow Serving endpoint."""
start_time = time.time()
resp = requests.post(address, data=payload)
latency.append(time.time() - start_time)
if len(latency) % qps == 0:
tf.logging.info('received {} responses.'.format(len(latency)))
if resp.status_code == 200:
success.append(1)
else:
tf.logging.error(resp.json())
error.append(1)
resp.close()
rate_limiter = RateLimiter(max_calls=qps, period=1)
start_time = time.time()
thread_lst = []
for i in range(num_requests):
with rate_limiter:
thread = threading.Thread(target=_make_rest_call)
thread_lst.append(thread)
thread.start()
if i % qps == 0:
tf.logging.info('sent {} requests.'.format(i))
for thread in thread_lst:
thread.join()
acc_time = time.time() - start_time
tf.logging.info('num_qps:{} requests/second: {} #success:{} #error:{} '
'latencies: [p50:{:.5f} p90:{:.5f} p99:{:.5f}]'.format(
qps, num_requests / acc_time, sum(success), sum(error),
np.percentile(latency, 50), np.percentile(latency, 90),
np.percentile(latency, 99)))
def generate_rest_payload():
"""Generate REST inference request's payload."""
encoded_image = base64.encodestring(get_image_payload())
inputs = [{'b64': encoded_image}] * FLAGS.batch_size
payload = json.dumps({
'signature_name': 'serving_default',
'inputs': inputs,
})
return payload
def main(argv):
del argv
tpu_address = FLAGS.tpu
if not any(pref in FLAGS.tpu for pref in ['http://', 'grpc://']):
tpu_address = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu).master()
tpu_address = '{}:{}'.format(tpu_address[:-len(':1234')],
'8470' if FLAGS.grpc else '8473')
tpu_address = tpu_address[len('abcd://'):]
tf.logging.info('ModelServer at: {}'.format(tpu_address))
if FLAGS.grpc:
grpc_channel = grpc.insecure_channel(tpu_address)
stub = prediction_service_pb2_grpc.PredictionServiceStub(grpc_channel)
run_grpc_load_test(FLAGS.num_requests, FLAGS.qps, generate_grpc_request(),
stub)
else:
payload = generate_rest_payload()
run_rest_load_test(FLAGS.num_requests, FLAGS.qps, tpu_address, payload)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
|
gps3threaded.py
|
#!/usr/bin/env python3.5
# coding=utf-8
"""Threaded gps3 client"""
from __future__ import print_function
from threading import Thread
from time import sleep
try: # This kludge to get around imports with files and directories the same name.
import gps3 # Python 3
except ImportError:
from . import gps3 # Python 2
__author__ = 'Moe'
__copyright__ = 'Copyright 2016 Moe'
__license__ = 'MIT'
__version__ = '0.2.3'
HOST = '127.0.0.1' # gpsd
GPSD_PORT = 2947 # defaults
PROTOCOL = 'json' # "
class GPS3mechanism(object):
"""Create threaded data stream as updated object attributes
"""
def __init__(self):
self.socket = gps3.GPSDSocket()
self.data_stream = gps3.DataStream()
def stream_data(self, host=HOST, port=GPSD_PORT, enable=True, gpsd_protocol=PROTOCOL, devicepath=None):
""" Connect and command, point and shoot, flail and bail
"""
self.socket.connect(host, port)
self.socket.watch(enable, gpsd_protocol, devicepath)
def unpack_data(self, usnap=.2): # 2/10th second sleep between empty requests
""" Iterates over socket response and unpacks values of object attributes.
Sleeping here has the greatest response to cpu cycles short of blocking sockets
"""
for new_data in self.socket:
if new_data:
self.data_stream.unpack(new_data)
else:
sleep(usnap) # Sleep in seconds after an empty look up.
def run_thread(self, usnap=.2, daemon=True):
"""run thread with data
"""
# self.stream_data() # Unless other changes are made this would limit to localhost only.
try:
gps3_data_thread = Thread(target=self.unpack_data, args={usnap: usnap}, daemon=daemon)
except TypeError:
# threading.Thread() only accepts daemon argument in Python 3.3
gps3_data_thread = Thread(target=self.unpack_data, args={usnap: usnap})
gps3_data_thread.setDaemon(daemon)
gps3_data_thread.start()
def stop(self):
""" Stop as much as possible, as gracefully as possible, if possible.
"""
self.stream_data(enable=False) # Stop data stream, thread is on its own so far.
print('Process stopped by user')
print('Good bye.') # You haven't gone anywhere, re-start it all with 'self.stream_data()'
if __name__ == '__main__':
from misc import add_args
args = add_args()
gps3_thread = GPS3mechanism() # The thread triumvirate
gps3_thread.stream_data(host=args.host, port=args.port, gpsd_protocol=args.gpsd_protocol)
gps3_thread.run_thread(usnap=.2) # Throttle sleep between empty lookups in seconds defaults = 0.2 of a second.
seconds_nap = int(args.seconds_nap) # Threaded Demo loop 'seconds_nap' is not the same as 'usnap'
while True:
for nod in range(0, seconds_nap):
print('{:.0%} wait period of {} seconds'.format(nod / seconds_nap, seconds_nap), end='\r')
sleep(1)
print('\nGPS3 Thread still functioning at {}'.format(gps3_thread.data_stream.TPV['time']))
print('Lat:{} Lon:{} Speed:{} Course:{}\n'.format(gps3_thread.data_stream.TPV['lat'],
gps3_thread.data_stream.TPV['lon'],
gps3_thread.data_stream.TPV['speed'],
gps3_thread.data_stream.TPV['track']))
#
######
# END
|
app.py
|
from flask import Flask, render_template, request, redirect, url_for, jsonify
from dataclasses import dataclass
from flask_sqlalchemy import SQLAlchemy
import pyperclip
import psutil
import time
import json
from datetime import datetime
from multiprocessing import Process, Value
from hashlib import sha256
import signal
import sys
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///clippy.db'
db = SQLAlchemy(app)
@dataclass
class ClipboardItem(db.Model):
id: int
text: str
length: int
location: str
date: str
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.String(9999), nullable=False)
length = db.Column(db.Integer, nullable=False)
location = db.Column(db.String(2083))
date = db.Column(db.DateTime, nullable=False, default=datetime.now())
def get_hash(text):
return sha256(text.encode('utf-8')).hexdigest()
def listen(text):
while True:
check = pyperclip.paste().strip()
if check != text:
text = check
print("Copying Text...")
add(text)
time.sleep(0.5)
@app.route('/')
def index():
# items = ClipboardItem.query.all()
items = ClipboardItem.query.order_by(ClipboardItem.date.desc()).all()
# json_items = jsonify(items)
return render_template('index.html', items=items)
# json_data = jsonify(items)
# return jsonify(json_data.get_data(as_text=True))
@app.route('/json')
def post_json():
# items = ClipboardItem.query.all()
items = ClipboardItem.query.order_by(ClipboardItem.date.desc()).all()
return jsonify(items)
def add(text):
for _ in db.session.execute("SELECT * FROM clipboard_item WHERE text =:text", {"text":text }):
# db.session.execute("DELETE FROM clipboard_item WHERE text =:text", {"text":text })
print("Action Blocked: Entry Exists...")
break
else:
print('Inserting Entry...')
dummy = ClipboardItem(text=text, length=len(text), location="null", date=datetime.now())
db.session.add(dummy)
db.session.commit()
# dummy = ClipboardItem(text=text, length=len(text), location="null", date=datetime.now())
# db.session.add(dummy)
# db.session.commit()
@app.route('/deleteAll', methods=['POST'])
def removeAll():
print("Deleting All Entries...")
db.session.execute('DELETE FROM clipboard_item')
db.session.commit()
return redirect(url_for('index'))
@app.route('/itemAction', methods=['POST'])
def item_actions():
if request.method == "POST":
# I. Delete Individual Item
if request.form.get('action') and request.form.get('action')[:10] == 'deleteitem':
print("Deleting Entry ID {} ...".format(request.form.get('action')[10:]))
db.session.execute("DELETE FROM clipboard_item where id =:id", {"id":int(request.form.get('action')[10:]) })
# II. Delete Selected Items
elif request.form.getlist("copy_id"):
print("Deleting Selected Entries...")
print(request.form.getlist("copy_id"))
ClipboardItem.query.filter(ClipboardItem.id.in_(request.form.getlist("copy_id"))).delete(synchronize_session=False)
db.session.commit()
return redirect(url_for('index'))
@app.route('/toggle')
def toggle_listen():
if p.status() == "stopped":
print("Resuming Listen...")
p.resume()
else:
print("Pausing Listen...")
p.suspend()
return redirect(url_for('index'))
def signal_handler(sig, frame):
print("Closing Application...")
if p and p.status() == "stopped":
p.resume()
sys.exit(0)
if __name__ == "__main__":
listener = Process(target=listen, args=(pyperclip.paste().strip(),))
listener.start()
signal.signal(signal.SIGINT, signal_handler)
p = psutil.Process(listener.pid)
app.run(debug=True, use_reloader=False)
listener.join()
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developped and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
import errno
import gc
import logging
import os
import os.path as osp
import re
import signal
import socket
import subprocess
import sys
import threading
import traceback
import importlib
logger = logging.getLogger(__name__)
#==============================================================================
# Keeping a reference to the original sys.exit before patching it
#==============================================================================
ORIGINAL_SYS_EXIT = sys.exit
#==============================================================================
# Check requirements
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Windows only: support for hiding console window when started with python.exe
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
is_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Qt imports
#==============================================================================
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt,
QThread, QTimer, QUrl, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QDesktopServices, QIcon, QKeySequence, QPixmap
from qtpy.QtWidgets import (QAction, QApplication, QDockWidget, QMainWindow,
QMenu, QMessageBox, QShortcut, QSplashScreen,
QStyleFactory, QWidget, QDesktopWidget)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
# For issue 7447
try:
from qtpy.QtQuick import QQuickWindow, QSGRendererInterface
except Exception:
QQuickWindow = QSGRendererInterface = None
# To catch font errors in QtAwesome
from qtawesome.iconic_font import FontError
#==============================================================================
# Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must
# be set before creating the application.
#==============================================================================
from spyder.config.main import CONF
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
#==============================================================================
# Create our QApplication instance here because it's needed to render the
# splash screen created below
#==============================================================================
from spyder.utils.qthelpers import qapplication, MENU_SEPARATOR
from spyder.config.base import get_image_path
MAIN_APP = qapplication()
if PYQT5:
APP_ICON = QIcon(get_image_path("spyder.svg"))
else:
APP_ICON = QIcon(get_image_path("spyder.png"))
MAIN_APP.setWindowIcon(APP_ICON)
#==============================================================================
# Create splash screen out of MainWindow to reduce perceived startup time.
#==============================================================================
from spyder.config.base import _, get_image_path, DEV, running_under_pytest
if not running_under_pytest():
SPLASH = QSplashScreen(QPixmap(get_image_path('splash.svg')))
SPLASH_FONT = SPLASH.font()
SPLASH_FONT.setPixelSize(10)
SPLASH.setFont(SPLASH_FONT)
SPLASH.show()
SPLASH.showMessage(_("Initializing..."), Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
else:
SPLASH = None
#==============================================================================
# Local utility imports
#==============================================================================
from spyder import (__version__, __project_url__, __forum_url__,
__trouble_url__, __website_url__, get_versions)
from spyder.config.base import (get_conf_path, get_module_source_path, STDERR,
get_debug_level, MAC_APP_NAME, get_home_dir,
running_in_mac_app, get_module_path,
reset_config_files)
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.app.cli_options import get_options
from spyder import dependencies
from spyder.py3compat import (is_text_string, to_text_string,
PY3, qbytearray_to_str, configparser as cp)
from spyder.utils import encoding, programs
from spyder.utils import icon_manager as ima
from spyder.utils.programs import is_module_installed
from spyder.utils.misc import select_port, getcwd_or_home, get_python_executable
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
from spyder.config.gui import is_dark_font_color
#==============================================================================
# Local gui imports
#==============================================================================
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
try:
from spyder.utils.environ import WinUserEnvDialog
except ImportError:
WinUserEnvDialog = None # analysis:ignore
from spyder.utils.qthelpers import (create_action, add_actions, get_icon,
add_shortcut_to_tooltip,
create_module_bookmark_actions,
create_program_action, DialogManager,
create_python_script_action, file_uri)
from spyder.config.gui import get_shortcut
from spyder.otherplugins import get_spyderplugins_mods
from spyder.app import tour
#==============================================================================
# Third-party library imports
#==============================================================================
import qdarkstyle
#==============================================================================
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
#==============================================================================
CWD = getcwd_or_home()
#==============================================================================
# Utility functions
#==============================================================================
def get_python_doc_path():
"""
Return Python documentation path
(Windows: return the PythonXX.chm path if available)
"""
if os.name == 'nt':
doc_path = osp.join(sys.prefix, "Doc")
if not osp.isdir(doc_path):
return
python_chm = [path for path in os.listdir(doc_path)
if re.match(r"(?i)Python[0-9]{3,6}.chm", path)]
if python_chm:
return file_uri(osp.join(doc_path, python_chm[0]))
else:
vinf = sys.version_info
doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
python_doc = osp.join(doc_path, "index.html")
if osp.isfile(python_doc):
return file_uri(python_doc)
def set_opengl_implementation(option):
"""
Set the OpenGL implementation used by Spyder.
See issue 7447 for the details.
"""
if option == 'software':
QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.Software)
elif option == 'desktop':
QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
elif option == 'gles':
QCoreApplication.setAttribute(Qt.AA_UseOpenGLES)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
def setup_logging(cli_options):
"""Setup logging with cli options defined by the user."""
if cli_options.debug_info or get_debug_level() > 0:
levels = {2: logging.INFO, 3: logging.DEBUG}
log_level = levels[get_debug_level()]
log_format = '%(asctime)s [%(levelname)s] [%(name)s] -> %(message)s'
if cli_options.debug_output == 'file':
log_file = 'spyder-debug.log'
else:
log_file = None
logging.basicConfig(level=log_level,
format=log_format,
filename=log_file,
filemode='w+')
def qt_message_handler(msg_type, msg_log_context, msg_string):
"""
Qt warning messages are intercepted by this handler.
On some operating systems, warning messages might be displayed
even if the actual message does not apply. This filter adds a
blacklist for messages that are being printed for no apparent
reason. Anything else will get printed in the internal console.
In DEV mode, all messages are printed.
"""
BLACKLIST = [
'QMainWidget::resizeDocks: all sizes need to be larger than 0',
]
if DEV or msg_string not in BLACKLIST:
print(msg_string) # spyder: test-skip
qInstallMessageHandler(qt_message_handler)
# =============================================================================
# Dependencies
# =============================================================================
QDARKSTYLE_REQVER = '>=2.6.4'
dependencies.add("qdarkstyle", _("Dark style for the entire interface"),
required_version=QDARKSTYLE_REQVER)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks
CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
BOOKMARKS = (
('Python2', "https://docs.python.org/2/index.html",
_("Python2 documentation")),
('Python3', "https://docs.python.org/3/index.html",
_("Python3 documentation")),
('numpy', "https://docs.scipy.org/doc/",
_("Numpy and Scipy documentation")),
('matplotlib', "https://matplotlib.org/contents.html",
_("Matplotlib documentation")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/",
_("PyQt5 Reference Guide")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/class_reference.html",
_("PyQt5 API Reference")),
('winpython', "https://winpython.github.io/",
_("WinPython"))
)
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
all_actions_defined = Signal()
sig_pythonpath_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # Related to interactive tour
sig_moved = Signal("QMoveEvent") # Related to interactive tour
sig_layout_setup_ready = Signal(object) # Related to default layouts
def __init__(self, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if PYQT5:
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
self.open_project = options.project
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
# Loading Spyder path
self.path = []
self.not_active_path = []
self.project_path = []
if osp.isfile(self.SPYDER_PATH):
self.path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = [name for name in self.path if osp.isdir(name)]
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
self.not_active_path, _x = \
encoding.readlines(self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = \
[name for name in self.not_active_path if osp.isdir(name)]
self.remove_path_from_sys_path()
self.add_path_to_sys_path()
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.help = None
self.onlinehelp = None
self.projects = None
self.outlineexplorer = None
self.historylog = None
self.ipyconsole = None
self.variableexplorer = None
self.plots = None
self.findinfiles = None
self.thirdparty_plugins = []
# Tour # TODO: Should I consider it a plugin?? or?
self.tour = None
self.tours_available = None
# File switcher
self.fileswitcher = None
# Check for updates Thread and Worker, refereces needed to prevent
# segfaulting
self.check_updates_action = None
self.thread_updates = None
self.worker_updates = None
self.give_updates_feedback = True
# Preferences
from spyder.preferences.appearance import AppearanceConfigPage
from spyder.preferences.general import MainConfigPage
from spyder.preferences.shortcuts import ShortcutsConfigPage
from spyder.preferences.runconfig import RunConfigPage
from spyder.preferences.maininterpreter import MainInterpreterConfigPage
from spyder.preferences.languageserver import LSPManagerConfigPage
self.general_prefs = [MainConfigPage, AppearanceConfigPage,
ShortcutsConfigPage, MainInterpreterConfigPage,
RunConfigPage, LSPManagerConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
# Quick Layouts and Dialogs
from spyder.preferences.layoutdialog import (LayoutSaveDialog,
LayoutSettingsDialog)
self.dialog_layout_save = LayoutSaveDialog
self.dialog_layout_settings = LayoutSettingsDialog
# Actions
self.lock_interface_action = None
self.show_toolbars_action = None
self.close_dockwidget_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.plugins_menu = None
self.plugins_menu_actions = []
self.toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.mem_status = None
self.cpu_status = None
# Toolbars
self.visible_toolbars = []
self.toolbarslist = []
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.edit_toolbar = None
self.edit_toolbar_actions = []
self.search_toolbar = None
self.search_toolbar_actions = []
self.source_toolbar = None
self.source_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.layout_toolbar = None
self.layout_toolbar_actions = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
# Set window title
self.set_window_title()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_travis = os.environ.get('TEST_CI_APP', None)
if test_travis is not None:
global MAIN_APP
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(MAIN_APP.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = SPLASH
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.interface_locked = CONF.get('main', 'panes_locked')
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None # TODO: related to quick layouts
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# The following flag is used to restore window's geometry when
# toggling out of fullscreen mode in Windows.
self.saved_normal_geometry = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See issue 4132
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError as e:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: #555555\'><b>netsh winsock reset"
"</b></span><br>"))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# Apply preferences
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""Create and return toolbar with *title* and *object_name*"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize(QSize(iconsize, iconsize))
self.toolbarslist.append(toolbar)
return toolbar
def setup(self):
"""Setup main window"""
logger.info("*** Start of MainWindow setup ***")
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
dark_qss = qdarkstyle.load_stylesheet_from_environment()
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
dark_qss = qdarkstyle.load_stylesheet_from_environment()
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
css_path = CSS_PATH
else:
css_path = CSS_PATH
logger.info("Creating core actions...")
self.close_dockwidget_action = create_action(
self, icon=ima.icon('close_pane'),
text=_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut
)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
self.lock_interface_action = create_action(
self,
_("Lock panes and toolbars"),
toggled=self.toggle_lock,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.lock_interface_action, "_",
"Lock unlock panes")
# custom layouts shortcuts
self.toggle_next_layout_action = create_action(self,
_("Use next layout"),
triggered=self.toggle_next_layout,
context=Qt.ApplicationShortcut)
self.toggle_previous_layout_action = create_action(self,
_("Use previous layout"),
triggered=self.toggle_previous_layout,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.toggle_next_layout_action, "_",
"Use next layout")
self.register_shortcut(self.toggle_previous_layout_action, "_",
"Use previous layout")
# File switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_fileswitcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_sc_to_tip=True)
self.file_toolbar_actions = [self.file_switcher_action,
self.symbol_finder_action]
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
namespace = None
logger.info("Creating toolbars...")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Edit menu/toolbar
self.edit_menu = self.menuBar().addMenu(_("&Edit"))
self.edit_toolbar = self.create_toolbar(_("Edit toolbar"),
"edit_toolbar")
# Search menu/toolbar
self.search_menu = self.menuBar().addMenu(_("&Search"))
self.search_toolbar = self.create_toolbar(_("Search toolbar"),
"search_toolbar")
# Source menu/toolbar
self.source_menu = self.menuBar().addMenu(_("Sour&ce"))
self.source_toolbar = self.create_toolbar(_("Source toolbar"),
"source_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
self.run_toolbar = self.create_toolbar(_("Run toolbar"),
"run_toolbar")
# Debug menu/toolbar
self.debug_menu = self.menuBar().addMenu(_("&Debug"))
self.debug_toolbar = self.create_toolbar(_("Debug toolbar"),
"debug_toolbar")
# Consoles menu/toolbar
self.consoles_menu = self.menuBar().addMenu(_("C&onsoles"))
self.consoles_menu.aboutToShow.connect(
self.update_execution_state_kernel)
# Projects menu
self.projects_menu = self.menuBar().addMenu(_("&Projects"))
self.projects_menu.aboutToShow.connect(self.valid_project)
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
# Help menu
self.help_menu = self.menuBar().addMenu(_("&Help"))
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
logger.info("Creating Tools menu...")
# Tools + External Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon=ima.icon('configure'),
triggered=self.edit_preferences,
context=Qt.ApplicationShortcut)
self.register_shortcut(prefs_action, "_", "Preferences",
add_sc_to_tip=True)
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.path_manager_callback,
tip=_("Python Path Manager"),
menurole=QAction.ApplicationSpecificRole)
reset_spyder_action = create_action(
self, _("Reset Spyder to factory defaults"),
triggered=self.reset_spyder)
self.tools_menu_actions = [prefs_action, spyder_path_action]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
self.tools_menu_actions += [MENU_SEPARATOR, reset_spyder_action]
# External Tools submenu
self.external_tools_menu = QMenu(_("External Tools"))
self.external_tools_menu_actions = []
# WinPython control panel
self.wp_action = create_action(self, _("WinPython control panel"),
icon=get_icon('winpython.svg'),
triggered=lambda:
programs.run_python_script('winpython', 'controlpanel'))
if os.name == 'nt' and is_module_installed('winpython'):
self.external_tools_menu_actions.append(self.wp_action)
# Qt-related tools
additact = []
for name in ("designer-qt4", "designer"):
qtdact = create_program_action(self, _("Qt Designer"), name)
if qtdact:
break
for name in ("linguist-qt4", "linguist"):
qtlact = create_program_action(self, _("Qt Linguist"), "linguist")
if qtlact:
break
args = ['-no-opengl'] if os.name == 'nt' else []
for act in (qtdact, qtlact):
if act:
additact.append(act)
if additact and is_module_installed('winpython'):
self.external_tools_menu_actions += [None] + additact
# Guidata and Sift
logger.info("Creating guidata and sift entries...")
gdgq_act = []
# Guidata and Guiqwt don't support PyQt5 yet and they fail
# with an AssertionError when imported using those bindings
# (see issue 2274)
try:
from guidata import configtools
from guidata import config # analysis:ignore
guidata_icon = configtools.get_icon('guidata.svg')
guidata_act = create_python_script_action(self,
_("guidata examples"), guidata_icon,
"guidata",
osp.join("tests", "__init__"))
gdgq_act += [guidata_act]
except:
pass
try:
from guidata import configtools
from guiqwt import config # analysis:ignore
guiqwt_icon = configtools.get_icon('guiqwt.svg')
guiqwt_act = create_python_script_action(self,
_("guiqwt examples"), guiqwt_icon, "guiqwt",
osp.join("tests", "__init__"))
if guiqwt_act:
gdgq_act += [guiqwt_act]
sift_icon = configtools.get_icon('sift.svg')
sift_act = create_python_script_action(self, _("Sift"),
sift_icon, "guiqwt", osp.join("tests", "sift"))
if sift_act:
gdgq_act += [sift_act]
except:
pass
if gdgq_act:
self.external_tools_menu_actions += [None] + gdgq_act
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.maximize_action, "_", "Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.fullscreen_action, "_",
"Fullscreen mode", add_sc_to_tip=True)
# Main toolbar
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action,
None,
prefs_action, spyder_path_action]
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# Internal console plugin
logger.info("Loading internal console...")
from spyder.plugins.console.plugin import Console
self.console = Console(self, namespace, exitfunc=self.closing,
profile=self.profile,
multithreaded=self.multithreaded,
message=_("Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"))
self.console.register_plugin()
# Language Server Protocol Client initialization
self.set_splash(_("Starting Language Server Protocol manager..."))
from spyder.plugins.editor.lsp.manager import LSPManager
self.lspmanager = LSPManager(self)
# Working directory plugin
logger.info("Loading working directory...")
from spyder.plugins.workingdirectory.plugin import WorkingDirectory
self.workingdirectory = WorkingDirectory(self, self.init_workdir, main=self)
self.workingdirectory.register_plugin()
self.toolbarslist.append(self.workingdirectory.toolbar)
# Help plugin
if CONF.get('help', 'enable'):
self.set_splash(_("Loading help..."))
from spyder.plugins.help.plugin import Help
self.help = Help(self, css_path=css_path)
self.help.register_plugin()
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyder.plugins.outlineexplorer.plugin import OutlineExplorer
self.outlineexplorer = OutlineExplorer(self)
self.outlineexplorer.register_plugin()
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyder.plugins.editor.plugin import Editor
self.editor = Editor(self)
self.editor.register_plugin()
# Start LSP client
self.set_splash(_("Launching LSP Client for Python..."))
self.lspmanager.start_client(language='python')
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon=ima.icon('exit'),
tip=_("Quit"),
triggered=self.console.quit,
context=Qt.ApplicationShortcut)
self.register_shortcut(quit_action, "_", "Quit")
restart_action = create_action(self, _("&Restart"),
icon=ima.icon('restart'),
tip=_("Restart"),
triggered=self.restart,
context=Qt.ApplicationShortcut)
self.register_shortcut(restart_action, "_", "Restart")
self.file_menu_actions += [self.file_switcher_action,
self.symbol_finder_action, None,
restart_action, quit_action]
self.set_splash("")
# Namespace browser
self.set_splash(_("Loading namespace browser..."))
from spyder.plugins.variableexplorer.plugin import VariableExplorer
self.variableexplorer = VariableExplorer(self)
self.variableexplorer.register_plugin()
# Figure browser
self.set_splash(_("Loading figure browser..."))
from spyder.plugins.plots.plugin import Plots
self.plots = Plots(self)
self.plots.register_plugin()
# History log widget
if CONF.get('historylog', 'enable'):
self.set_splash(_("Loading history plugin..."))
from spyder.plugins.history.plugin import HistoryLog
self.historylog = HistoryLog(self)
self.historylog.register_plugin()
# IPython console
self.set_splash(_("Loading IPython console..."))
from spyder.plugins.ipythonconsole.plugin import IPythonConsole
self.ipyconsole = IPythonConsole(self, css_path=css_path)
self.ipyconsole.register_plugin()
# Explorer
if CONF.get('explorer', 'enable'):
self.set_splash(_("Loading file explorer..."))
from spyder.plugins.explorer.plugin import Explorer
self.explorer = Explorer(self)
self.explorer.register_plugin()
# Online help widget
try: # Qt >= v4.4
from spyder.plugins.onlinehelp.plugin import OnlineHelp
except ImportError: # Qt < v4.4
OnlineHelp = None # analysis:ignore
if CONF.get('onlinehelp', 'enable') and OnlineHelp is not None:
self.set_splash(_("Loading online help..."))
self.onlinehelp = OnlineHelp(self)
self.onlinehelp.register_plugin()
# Project explorer widget
self.set_splash(_("Loading project explorer..."))
from spyder.plugins.projects.plugin import Projects
self.projects = Projects(self)
self.projects.register_plugin()
self.project_path = self.projects.get_pythonpath(at_start=True)
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyder.plugins.findinfiles.plugin import FindInFiles
self.findinfiles = FindInFiles(self)
self.findinfiles.register_plugin()
# Load other plugins (former external plugins)
# TODO: Use this bucle to load all internall plugins and remove
# duplicated code
other_plugins = ['breakpoints', 'profiler', 'pylint']
for plugin_name in other_plugins:
if CONF.get(plugin_name, 'enable'):
module = importlib.import_module(
'spyder.plugins.{}'.format(plugin_name))
plugin = module.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
# Third-party plugins
self.set_splash(_("Loading third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Setting up main window..."))
# Help menu
trouble_action = create_action(self,
_("Troubleshooting..."),
triggered=self.trouble_guide)
dep_action = create_action(self, _("Dependencies..."),
triggered=self.show_dependencies,
icon=ima.icon('advanced'))
report_action = create_action(self,
_("Report issue..."),
icon=ima.icon('bug'),
triggered=self.report_issue)
support_action = create_action(self,
_("Spyder support..."),
triggered=self.google_group)
self.check_updates_action = create_action(self,
_("Check for updates..."),
triggered=self.check_updates)
# Spyder documentation
spyder_doc = 'https://docs.spyder-ide.org/'
doc_action = create_action(self, _("Spyder documentation"),
icon=ima.icon('DialogHelpButton'),
triggered=lambda:
programs.start_file(spyder_doc))
self.register_shortcut(doc_action, "_",
"spyder documentation")
if self.help is not None:
tut_action = create_action(self, _("Spyder tutorial"),
triggered=self.help.show_tutorial)
else:
tut_action = None
shortcuts_action = create_action(self, _("Shortcuts Summary"),
shortcut="Meta+F1",
triggered=self.show_shortcuts_dialog)
#----- Tours
self.tour = tour.AnimatedTour(self)
self.tours_menu = QMenu(_("Interactive tours"), self)
self.tour_menu_actions = []
# TODO: Only show intro tour for now. When we are close to finish
# 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(0)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
def trigger(i=i, self=self): # closure needed!
return lambda: self.show_tour(i)
temp_action = create_action(self, tour_name, tip="",
triggered=trigger())
self.tour_menu_actions += [temp_action]
self.tours_menu.addActions(self.tour_menu_actions)
self.help_menu_actions = [doc_action, tut_action, shortcuts_action,
self.tours_menu,
MENU_SEPARATOR, trouble_action,
report_action, dep_action,
self.check_updates_action, support_action,
MENU_SEPARATOR]
# Python documentation
if get_python_doc_path() is not None:
pydoc_act = create_action(self, _("Python documentation"),
triggered=lambda:
programs.start_file(get_python_doc_path()))
self.help_menu_actions.append(pydoc_act)
# IPython documentation
if self.help is not None:
ipython_menu = QMenu(_("IPython documentation"), self)
intro_action = create_action(self, _("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(self, _("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(self, _("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(ipython_menu, (intro_action, guiref_action,
quickref_action))
self.help_menu_actions.append(ipython_menu)
# Windows-only: documentation located in sys.prefix/Doc
ipm_actions = []
def add_ipm_action(text, path):
"""Add installed Python module doc action to help submenu"""
# QAction.triggered works differently for PySide and PyQt
path = file_uri(path)
if not API == 'pyside':
slot=lambda _checked, path=path: programs.start_file(path)
else:
slot=lambda path=path: programs.start_file(path)
action = create_action(self, text,
icon='%s.png' % osp.splitext(path)[1][1:],
triggered=slot)
ipm_actions.append(action)
sysdocpth = osp.join(sys.prefix, 'Doc')
if osp.isdir(sysdocpth): # exists on Windows, except frozen dist.
for docfn in os.listdir(sysdocpth):
pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)'
match = re.match(pt, docfn)
if match is not None:
pname = match.groups()[0]
if pname not in ('Python', ):
add_ipm_action(pname, osp.join(sysdocpth, docfn))
# Installed Python modules submenu (Windows only)
if ipm_actions:
pymods_menu = QMenu(_("Installed Python modules"), self)
add_actions(pymods_menu, ipm_actions)
self.help_menu_actions.append(pymods_menu)
# Online documentation
web_resources = QMenu(_("Online documentation"), self)
webres_actions = create_module_bookmark_actions(self,
self.BOOKMARKS)
webres_actions.insert(2, None)
webres_actions.insert(5, None)
webres_actions.insert(8, None)
add_actions(web_resources, webres_actions)
self.help_menu_actions.append(web_resources)
# Qt assistant link
if sys.platform.startswith('linux') and not PYQT5:
qta_exe = "assistant-qt4"
else:
qta_exe = "assistant"
qta_act = create_program_action(self, _("Qt documentation"),
qta_exe)
if qta_act:
self.help_menu_actions += [qta_act, None]
# About Spyder
about_action = create_action(self,
_("About %s...") % "Spyder",
icon=ima.icon('MessageBoxInformation'),
triggered=self.about)
self.help_menu_actions += [MENU_SEPARATOR, about_action]
# Status bar widgets
from spyder.widgets.status import MemoryStatus, CPUStatus
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.apply_statusbar_settings()
# ----- View
# View menu
self.plugins_menu = QMenu(_("Panes"), self)
self.toolbars_menu = QMenu(_("Toolbars"), self)
self.quick_layout_menu = QMenu(_("Window layouts"), self)
self.quick_layout_set_menu()
self.view_menu.addMenu(self.plugins_menu) # Panes
add_actions(self.view_menu, (self.lock_interface_action,
self.close_dockwidget_action,
self.maximize_action,
MENU_SEPARATOR))
self.show_toolbars_action = create_action(self,
_("Show toolbars"),
triggered=self.show_toolbars,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.show_toolbars_action, "_",
"Show toolbars")
self.view_menu.addMenu(self.toolbars_menu)
self.view_menu.addAction(self.show_toolbars_action)
add_actions(self.view_menu, (MENU_SEPARATOR,
self.quick_layout_menu,
self.toggle_previous_layout_action,
self.toggle_next_layout_action,
MENU_SEPARATOR,
self.fullscreen_action))
if set_attached_console_visible is not None:
cmd_act = create_action(self,
_("Attached console window (debugging)"),
toggled=set_attached_console_visible)
cmd_act.setChecked(is_attached_console_visible())
add_actions(self.view_menu, (MENU_SEPARATOR, cmd_act))
# Adding external tools action to "Tools" menu
if self.external_tools_menu_actions:
external_tools_act = create_action(self, _("External Tools"))
external_tools_act.setMenu(self.external_tools_menu)
self.tools_menu_actions += [None, external_tools_act]
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.external_tools_menu,
self.external_tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
add_actions(self.edit_toolbar, self.edit_toolbar_actions)
add_actions(self.search_toolbar, self.search_toolbar_actions)
add_actions(self.source_toolbar, self.source_toolbar_actions)
add_actions(self.debug_toolbar, self.debug_toolbar_actions)
add_actions(self.run_toolbar, self.run_toolbar_actions)
# Apply all defined shortcuts (plugins + 3rd-party plugins)
self.apply_shortcuts()
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
# Window set-up
logger.info("Setting up window...")
self.setup_layout(default=False)
# Show and hide shortcuts in menus for Mac.
# This is a workaround because we can't disable shortcuts
# by setting context=Qt.WidgetShortcut there
if sys.platform == 'darwin':
for name in ['file', 'edit', 'search', 'source', 'run', 'debug',
'projects', 'tools', 'plugins']:
menu_object = getattr(self, name + '_menu')
menu_object.aboutToShow.connect(
lambda name=name: self.show_shortcuts(name))
menu_object.aboutToHide.connect(
lambda name=name: self.hide_shortcuts(name))
if self.splash is not None:
self.splash.hide()
# Enabling tear off for all menus except help menu
if CONF.get('main', 'tear_off_menus'):
for child in self.menuBar().children():
if isinstance(child, QMenu) and child != self.help_menu:
child.setTearOffEnabled(True)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
self.restore_scrollbar_position.emit()
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# In MacOS X 10.7 our app is not displayed after initialized (I don't
# know why because this doesn't happen when started from the terminal),
# so we need to resort to this hack to make it appear.
if running_in_mac_app():
idx = __file__.index(MAC_APP_NAME)
app_path = __file__[:idx]
subprocess.call(['open', app_path + MAC_APP_NAME])
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emmited by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Create Plugins and toolbars submenus
self.create_plugins_menu()
self.create_toolbars_menu()
# Update toolbar visibility status
self.toolbars_visible = CONF.get('main', 'toolbars_visible')
self.load_last_visible_toolbars()
# Update lock status
self.lock_interface_action.setChecked(self.interface_locked)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole.isvisible:
self.historylog.add_history(get_conf_path('history.py'))
if self.open_project:
self.projects.open_project(self.open_project)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files()
# Check for spyder updates
if DEV is None and CONF.get('main', 'check_updates_on_startup'):
self.give_updates_feedback = False
self.check_updates(startup=True)
# Show dialog with missing dependencies
self.report_missing_dependencies()
# Raise the menuBar to the top of the main window widget's stack
# (Fixes issue 3887)
self.menuBar().raise_()
self.is_setting_up = False
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
def report_missing_dependencies(self):
"""Show a QMessageBox with a list of missing hard dependencies"""
missing_deps = dependencies.missing_dependencies()
if missing_deps:
QMessageBox.critical(self, _('Error'),
_("<b>You have missing dependencies!</b>"
"<br><br><tt>%s</tt><br><br>"
"<b>Please install them to avoid this message.</b>"
"<br><br>"
"<i>Note</i>: Spyder could work without some of these "
"dependencies, however to have a smooth experience when "
"using Spyder we <i>strongly</i> recommend you to install "
"all the listed missing dependencies.<br><br>"
"Failing to install these dependencies might result in bugs. "
"Please be sure that any found bugs are not the direct "
"result of missing dependencies, prior to reporting a new "
"issue."
) % missing_deps, QMessageBox.Ok)
def load_window_settings(self, prefix, default=False, section='main'):
"""Load window layout settings from userconfig-based configuration
with *prefix*, under *section*
default: if True, do not restore inner layout"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix+'size')
prefs_dialog_size = get_func(section, prefix+'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix+'state', None)
pos = get_func(section, prefix+'position')
# It's necessary to verify if the window/position value is valid
# with the current screen. See issue 3748
width = pos[0]
height = pos[1]
screen_shape = QApplication.desktop().geometry()
current_width = screen_shape.width()
current_height = screen_shape.height()
if current_width < width or current_height < height:
pos = CONF.get_default(section, prefix+'position')
is_maximized = get_func(section, prefix+'is_maximized')
is_fullscreen = get_func(section, prefix+'is_fullscreen')
return hexstate, window_size, prefs_dialog_size, pos, is_maximized, \
is_fullscreen
def get_window_settings(self):
"""Return current window settings
Symetric to the 'set_window_settings' setter"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(self.saveState())
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""Set window settings
Symetric to the 'get_window_settings' accessor"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main',
none_state=False):
"""Save current window settings with *prefix* in
the userconfig-based configuration, under *section*"""
win_size = self.window_size
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix+'size', (win_size.width(), win_size.height()))
CONF.set(section, prefix+'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix+'is_maximized', self.isMaximized())
CONF.set(section, prefix+'is_fullscreen', self.isFullScreen())
pos = self.window_position
CONF.set(section, prefix+'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
if none_state:
CONF.set(section, prefix + 'state', None)
else:
qba = self.saveState()
CONF.set(section, prefix + 'state', qbytearray_to_str(qba))
CONF.set(section, prefix+'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
# --- Layouts
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = 'window' + '/'
settings = self.load_window_settings(prefix, default)
hexstate = settings[0]
self.first_spyder_run = False
if hexstate is None:
# First Spyder execution:
self.setWindowState(Qt.WindowMaximized)
self.first_spyder_run = True
self.setup_default_layouts('default', settings)
# Now that the initial setup is done, copy the window settings,
# except for the hexstate in the quick layouts sections for the
# default layouts.
# Order and name of the default layouts is found in config.py
section = 'quick_layouts'
get_func = CONF.get_default if default else CONF.get
order = get_func(section, 'order')
# restore the original defaults if reset layouts is called
if default:
CONF.set(section, 'active', order)
CONF.set(section, 'order', order)
CONF.set(section, 'names', order)
for index, name, in enumerate(order):
prefix = 'layout_{0}/'.format(index)
self.save_current_window_settings(prefix, section,
none_state=True)
# store the initial layout as the default in spyder
prefix = 'layout_default/'
section = 'quick_layouts'
self.save_current_window_settings(prefix, section, none_state=True)
self.current_quick_layout = 'default'
# Regenerate menu
self.quick_layout_set_menu()
self.set_window_settings(*settings)
for plugin in (self.widgetlist + self.thirdparty_plugins):
try:
plugin.initialize_plugin_in_mainwindow_layout()
except Exception as error:
print("%s: %s" % (plugin, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time."""
self.setUpdatesEnabled(False)
first_spyder_run = bool(self.first_spyder_run) # Store copy
if first_spyder_run:
self.set_window_settings(*settings)
else:
if self.last_plugin:
if self.last_plugin.ismaximized:
self.maximize_dockwidget(restore=True)
if not (self.isMaximized() or self.maximized_flag):
self.showMaximized()
min_width = self.minimumWidth()
max_width = self.maximumWidth()
base_width = self.width()
self.setFixedWidth(base_width)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(self.DEFAULT_LAYOUTS)
# Define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
plots = self.plots
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
# Stored for tests
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
# Layout definition
# --------------------------------------------------------------------
# Layouts are organized by columns, each column is organized by rows.
# Widths have to add 1.0 (except if hidden), height per column has to
# add 1.0 as well
# Spyder Default Initial Layout
s_layout = {
'widgets': [
# Column 0
[[explorer_project]],
# Column 1
[[editor]],
# Column 2
[[outline]],
# Column 3
[[help_plugin, explorer_variable, plots, # Row 0
helper, explorer_file, finder] + plugins,
[console_int, console_ipy, history]] # Row 1
],
'width fraction': [0.05, # Column 0 width
0.55, # Column 1 width
0.05, # Column 2 width
0.45], # Column 3 width
'height fraction': [[1.0], # Column 0, row heights
[1.0], # Column 1, row heights
[1.0], # Column 2, row heights
[0.46, 0.54]], # Column 3, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# RStudio
r_layout = {
'widgets': [
# column 0
[[editor], # Row 0
[console_ipy, console_int]], # Row 1
# column 1
[[explorer_variable, plots, history, # Row 0
outline, finder] + plugins,
[explorer_file, explorer_project, # Row 1
help_plugin, helper]]
],
'width fraction': [0.55, # Column 0 width
0.45], # Column 1 width
'height fraction': [[0.55, 0.45], # Column 0, row heights
[0.55, 0.45]], # Column 1, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# Matlab
m_layout = {
'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_int]],
# column 2
[[explorer_variable, plots, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [0.10, # Column 0 width
0.45, # Column 1 width
0.45], # Column 2 width
'height fraction': [[0.55, 0.45], # Column 0, row heights
[0.55, 0.45], # Column 1, row heights
[0.55, 0.45]], # Column 2, row heights
'hidden widgets': global_hidden_widgets,
'hidden toolbars': [],
}
# Vertically split
v_layout = {
'widgets': [
# column 0
[[editor], # Row 0
[console_ipy, console_int, explorer_file, # Row 1
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [1.0], # Column 0 width
'height fraction': [[0.55, 0.45]], # Column 0, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': [],
}
# Horizontally split
h_layout = {
'widgets': [
# column 0
[[editor]], # Row 0
# column 1
[[console_ipy, console_int, explorer_file, # Row 0
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [0.55, # Column 0 width
0.45], # Column 1 width
'height fraction': [[1.0], # Column 0, row heights
[1.0]], # Column 1, row heights
'hidden widgets': [outline] + global_hidden_widgets,
'hidden toolbars': []
}
# Layout selection
layouts = {
'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout,
}
layout = layouts[index]
# Remove None from widgets layout
widgets_layout = layout['widgets']
widgets_layout_clean = []
for column in widgets_layout:
clean_col = []
for row in column:
clean_row = [w for w in row if w is not None]
if clean_row:
clean_col.append(clean_row)
if clean_col:
widgets_layout_clean.append(clean_col)
# Flatten widgets list
widgets = []
for column in widgets_layout_clean:
for row in column:
for widget in row:
widgets.append(widget)
# Make every widget visible
for widget in widgets:
widget.toggle_view(True)
widget.toggle_view_action.setChecked(True)
# We use both directions to ensure proper update when moving from
# 'Horizontal Split' to 'Spyder Default'
# This also seems to help on random cases where the display seems
# 'empty'
for direction in (Qt.Vertical, Qt.Horizontal):
# Arrange the widgets in one direction
for idx in range(len(widgets) - 1):
first, second = widgets[idx], widgets[idx+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
direction)
# Arrange the widgets in the other direction
for column in widgets_layout_clean:
for idx in range(len(column) - 1):
first_row, second_row = column[idx], column[idx+1]
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout_clean:
for row in column:
for idx in range(len(row) - 1):
first, second = row[idx], row[idx+1]
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Set dockwidget widths
width_fractions = layout['width fraction']
if len(width_fractions) > 1:
_widgets = [col[0][0].dockwidget for col in widgets_layout]
self.resizeDocks(_widgets, width_fractions, Qt.Horizontal)
# Set dockwidget heights
height_fractions = layout['height fraction']
for idx, column in enumerate(widgets_layout_clean):
if len(column) > 1:
_widgets = [row[0].dockwidget for row in column]
self.resizeDocks(_widgets, height_fractions[idx], Qt.Vertical)
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
if first_spyder_run:
self.first_spyder_run = False
else:
self.setMinimumWidth(min_width)
self.setMaximumWidth(max_width)
if not (self.isMaximized() or self.maximized_flag):
self.showMaximized()
self.setUpdatesEnabled(True)
self.sig_layout_setup_ready.emit(layout)
return layout
@Slot()
def toggle_previous_layout(self):
""" """
self.toggle_layout('previous')
@Slot()
def toggle_next_layout(self):
""" """
self.toggle_layout('next')
def toggle_layout(self, direction='next'):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
if len(active) == 0:
return
layout_index = ['default']
for name in order:
if name in active:
layout_index.append(names.index(name))
current_layout = self.current_quick_layout
dic = {'next': 1, 'previous': -1}
if current_layout is None:
# Start from default
current_layout = 'default'
if current_layout in layout_index:
current_index = layout_index.index(current_layout)
else:
current_index = 0
new_index = (current_index + dic[direction]) % len(layout_index)
self.quick_layout_switch(layout_index[new_index])
def quick_layout_set_menu(self):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
ql_actions = []
ql_actions = [create_action(self, _('Spyder Default Layout'),
triggered=lambda:
self.quick_layout_switch('default'))]
for name in order:
if name in active:
index = names.index(name)
# closure required so lambda works with the default parameter
def trigger(i=index, self=self):
return lambda: self.quick_layout_switch(i)
qli_act = create_action(self, name, triggered=trigger())
# closure above replaces the following which stopped working
# qli_act = create_action(self, name, triggered=lambda i=index:
# self.quick_layout_switch(i)
ql_actions += [qli_act]
self.ql_save = create_action(self, _("Save current layout"),
triggered=lambda:
self.quick_layout_save(),
context=Qt.ApplicationShortcut)
self.ql_preferences = create_action(self, _("Layout preferences"),
triggered=lambda:
self.quick_layout_settings(),
context=Qt.ApplicationShortcut)
self.ql_reset = create_action(self, _('Reset to spyder default'),
triggered=self.reset_window_layout)
self.register_shortcut(self.ql_save, "_", "Save current layout")
self.register_shortcut(self.ql_preferences, "_", "Layout preferences")
ql_actions += [None]
ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset]
self.quick_layout_menu.clear()
add_actions(self.quick_layout_menu, ql_actions)
if len(order) == 0:
self.ql_preferences.setEnabled(False)
else:
self.ql_preferences.setEnabled(True)
@Slot()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_save(self):
"""Save layout dialog"""
get = CONF.get
set_ = CONF.set
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
dlg = self.dialog_layout_save(self, names)
if dlg.exec_():
name = dlg.combo_box.currentText()
if name in names:
answer = QMessageBox.warning(self, _("Warning"),
_("Layout <b>%s</b> will be \
overwritten. Do you want to \
continue?") % name,
QMessageBox.Yes | QMessageBox.No)
index = order.index(name)
else:
answer = True
if None in names:
index = names.index(None)
names[index] = name
else:
index = len(names)
names.append(name)
order.append(name)
# Always make active a new layout even if it overwrites an inactive
# layout
if name not in active:
active.append(name)
if answer:
self.save_current_window_settings('layout_{}/'.format(index),
section='quick_layouts')
set_('quick_layouts', 'names', names)
set_('quick_layouts', 'order', order)
set_('quick_layouts', 'active', active)
self.quick_layout_set_menu()
def quick_layout_settings(self):
"""Layout settings dialog"""
get = CONF.get
set_ = CONF.set
section = 'quick_layouts'
names = get(section, 'names')
order = get(section, 'order')
active = get(section, 'active')
dlg = self.dialog_layout_settings(self, names, order, active)
if dlg.exec_():
set_(section, 'names', dlg.names)
set_(section, 'order', dlg.order)
set_(section, 'active', dlg.active)
self.quick_layout_set_menu()
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
section = 'quick_layouts'
try:
settings = self.load_window_settings('layout_{}/'.format(index),
section=section)
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = settings
# The defaults layouts will always be regenerated unless there was
# an overwrite, either by rewriting with same name, or by deleting
# and then creating a new one
if hexstate is None:
# The value for hexstate shouldn't be None for a custom saved
# layout (ie, where the index is greater than the number of
# defaults). See issue 6202.
if index != 'default' and index >= self.DEFAULT_LAYOUTS:
QMessageBox.critical(
self, _("Warning"),
_("Error opening the custom layout. Please close"
" Spyder and try again. If the issue persists,"
" then you must use 'Reset to Spyder default' "
"from the layout menu."))
return
self.setup_default_layouts(index, settings)
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%s has not yet "
"been defined.") % str(index))
return
# TODO: is there any real use in calling the previous layout
# setting?
# self.previous_layout_settings = self.get_window_settings()
self.set_window_settings(*settings)
self.current_quick_layout = index
# make sure the flags are correctly set for visible panes
for plugin in (self.widgetlist + self.thirdparty_plugins):
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
# --- Show/Hide toolbars
def _update_show_toolbars_action(self):
"""Update the text displayed in the menu entry."""
if self.toolbars_visible:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip)
def save_visible_toolbars(self):
"""Saves the name of the visible toolbars in the .ini file."""
toolbars = []
for toolbar in self.visible_toolbars:
toolbars.append(toolbar.objectName())
CONF.set('main', 'last_visible_toolbars', toolbars)
def get_visible_toolbars(self):
"""Collects the visible toolbars."""
toolbars = []
for toolbar in self.toolbarslist:
if toolbar.toggleViewAction().isChecked():
toolbars.append(toolbar)
self.visible_toolbars = toolbars
def load_last_visible_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbarslist:
dic[toolbar.objectName()] = toolbar
toolbars = []
for name in toolbars_names:
if name in dic:
toolbars.append(dic[name])
self.visible_toolbars = toolbars
else:
self.get_visible_toolbars()
self._update_show_toolbars_action()
@Slot()
def show_toolbars(self):
"""Show/Hides toolbars."""
value = not self.toolbars_visible
CONF.set('main', 'toolbars_visible', value)
if value:
self.save_visible_toolbars()
else:
self.get_visible_toolbars()
for toolbar in self.visible_toolbars:
toolbar.toggleViewAction().setChecked(value)
toolbar.setVisible(value)
self.toolbars_visible = value
self._update_show_toolbars_action()
# --- Other
def update_execution_state_kernel(self):
"""Handle execution state of the current console."""
try:
self.ipyconsole.update_execution_state_kernel()
except AttributeError:
return
def valid_project(self):
"""Handle an invalid active project."""
try:
path = self.projects.get_active_project_path()
except AttributeError:
return
if bool(path):
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(element._shown_shortcut)
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(QKeySequence())
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if not console and not_readonly and not self.editor.is_file_opened():
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
self.search_menu_actions[3].setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'ipython_console', 'variable_explorer',
'help', 'plots', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console', None]
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
self.plugins_menu_actions = actions
add_actions(self.plugins_menu, actions)
def create_toolbars_menu(self):
order = ['file_toolbar', 'run_toolbar', 'debug_toolbar',
'main_toolbar', 'Global working directory', None,
'search_toolbar', 'edit_toolbar', 'source_toolbar']
for toolbar in self.toolbarslist:
action = toolbar.toggleViewAction()
name = toolbar.objectName()
try:
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
add_actions(self.toolbars_menu, order)
def createPopupMenu(self):
menu = QMenu('', self)
actions = self.help_menu_actions[:3] + \
[None, self.help_menu_actions[-1]]
add_actions(menu, actions)
return menu
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message, Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
if plugin.isAncestorOf(self.last_focused_widget):
plugin.visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
for plugin in (self.widgetlist + self.thirdparty_plugins):
plugin.close_window()
if not plugin.closing_plugin(cancelable):
return False
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars()
self.lspmanager.shutdown()
self.already_closed = True
return True
def add_dockwidget(self, child):
"""Add QDockWidget and toggleViewAction"""
dockwidget, location = child.create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(dockwidget.features()|
QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(child)
@Slot()
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in (self.widgetlist + self.thirdparty_plugins):
if plugin.isAncestorOf(widget):
plugin.toggle_view_action.setChecked(False)
break
def toggle_lock(self, value):
"""Lock/Unlock dockwidgets and toolbars"""
self.interface_locked = value
CONF.set('main', 'panes_locked', value)
# Apply lock to panes
for plugin in (self.widgetlist + self.thirdparty_plugins):
if self.interface_locked:
if plugin.dockwidget.isFloating():
plugin.dockwidget.setFloating(False)
plugin.dockwidget.setTitleBarWidget(QWidget())
else:
plugin.dockwidget.set_title_bar()
# Apply lock to toolbars
for toolbar in self.toolbarslist:
if self.interface_locked:
toolbar.setMovable(False)
else:
toolbar.setMovable(True)
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = ima.icon('maximize')
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = ima.icon('unmaximize')
self.maximize_action.setText(text)
self.maximize_action.setIcon(icon)
self.maximize_action.setToolTip(tip)
@Slot()
@Slot(bool)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# Select plugin to maximize
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in (self.widgetlist + self.thirdparty_plugins):
plugin.dockwidget.hide()
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
# Only plugins that have a dockwidget are part of widgetlist,
# so last_plugin can be None after the above "for" cycle.
# For example, this happens if, after Spyder has started, focus
# is set to the Working directory toolbar (which doesn't have
# a dockwidget) and then you press the Maximize button
if self.last_plugin is None:
# Using the Editor as default plugin to maximize
self.last_plugin = self.editor
# Maximize last_plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
self.setCentralWidget(self.last_plugin)
self.last_plugin.ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
self.last_plugin.show()
self.last_plugin.visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
self.last_plugin.ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.fullscreen_flag:
icon = ima.icon('window_nofullscreen')
else:
icon = ima.icon('window_fullscreen')
if is_text_string(icon):
icon = get_icon(icon)
self.fullscreen_action.setIcon(icon)
@Slot()
def toggle_fullscreen(self):
if self.fullscreen_flag:
self.fullscreen_flag = False
if os.name == 'nt':
self.setWindowFlags(
self.windowFlags()
^ (Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint))
self.setGeometry(self.saved_normal_geometry)
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.saved_normal_geometry = self.normalGeometry()
if os.name == 'nt':
# Due to limitations of the Windows DWM, compositing is not
# handled correctly for OpenGL based windows when going into
# full screen mode, so we need to use this workaround.
# See Issue #4291.
self.setWindowFlags(self.windowFlags()
| Qt.FramelessWindowHint
| Qt.WindowStaysOnTopHint)
screen_number = QDesktopWidget().screenNumber(self)
if screen_number < 0:
screen_number = 0
r = QApplication.desktop().screenGeometry(screen_number)
self.setGeometry(
r.left() - 1, r.top() - 1, r.width() + 2, r.height() + 2)
self.showNormal()
else:
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
@Slot()
def about(self):
"""Create About Spyder dialog with general information."""
versions = get_versions()
# Show Git revision for development version
revlink = ''
if versions['revision']:
rev = versions['revision']
revlink = " (<a href='https://github.com/spyder-ide/spyder/"\
"commit/%s'>Commit: %s</a>)" % (rev, rev)
msgBox = QMessageBox(self)
msgBox.setText(
"""
<b>Spyder {spyder_ver}</b> {revision}
<br>The Scientific Python Development Environment |
<a href="{website_url}">Spyder-IDE.org</a>
<br>Copyright © 2009-2019 Spyder Project Contributors and
<a href="{github_url}/blob/master/AUTHORS.txt">others</a>
<br>Distributed under the terms of the
<a href="{github_url}/blob/master/LICENSE.txt">MIT License</a>.
<p>Created by Pierre Raybaut; current maintainer is Carlos Cordoba.
<br>Developed by the
<a href="{github_url}/graphs/contributors">international
Spyder community</a>.
<br>Many thanks to all the Spyder beta testers and dedicated users.
<p>For help with Spyder errors and crashes, please read our
<a href="{trouble_url}">Troubleshooting Guide</a>, and for bug
reports and feature requests, visit our
<a href="{github_url}">Github site</a>.
For project discussion, see our
<a href="{forum_url}">Google Group</a>.
<p>This project is part of a larger effort to promote and
facilitate the use of Python for scientific and engineering
software development.
The popular Python distributions
<a href="https://www.anaconda.com/download/">Anaconda</a> and
<a href="https://winpython.github.io/">WinPython</a>
also contribute to this plan.
<p>Python {python_ver} {bitness}-bit | Qt {qt_ver} |
{qt_api} {qt_api_ver} | {os_name} {os_ver}
<small><p>Certain source files under other compatible permissive
licenses and/or originally by other authors.
Spyder 3 theme icons derived from
<a href="https://fontawesome.com/">Font Awesome</a> 4.7
(© 2016 David Gandy; SIL OFL 1.1) and
<a href="http://materialdesignicons.com/">Material Design</a>
(© 2014 Austin Andrews; SIL OFL 1.1).
Most Spyder 2 theme icons sourced from the
<a href="https://www.everaldo.com">Crystal Project iconset</a>
(© 2006-2007 Everaldo Coelho; LGPL 2.1+).
Other icons from
<a href="http://p.yusukekamiyamane.com/">Yusuke Kamiyamane</a>
(© 2013 Yusuke Kamiyamane; CC-BY 3.0),
the <a href="http://www.famfamfam.com/lab/icons/silk/">FamFamFam
Silk icon set</a> 1.3 (© 2006 Mark James; CC-BY 2.5), and
the <a href="https://www.kde.org/">KDE Oxygen icons</a>
(© 2007 KDE Artists; LGPL 3.0+).</small>
<p>See the <a href="{github_url}/blob/master/NOTICE.txt">NOTICE</a>
file for full legal information.
"""
.format(spyder_ver=versions['spyder'],
revision=revlink,
website_url=__website_url__,
github_url=__project_url__,
trouble_url=__trouble_url__,
forum_url=__forum_url__,
python_ver=versions['python'],
bitness=versions['bitness'],
qt_ver=versions['qt'],
qt_api=versions['qt_api'],
qt_api_ver=versions['qt_api_ver'],
os_name=versions['system'],
os_ver=versions['release'])
)
msgBox.setWindowTitle(_("About %s") % "Spyder")
msgBox.setStandardButtons(QMessageBox.Ok)
from spyder.config.gui import is_dark_interface
if PYQT5:
if is_dark_interface():
icon_filename = "spyder.svg"
else:
icon_filename = "spyder_dark.svg"
else:
if is_dark_interface():
icon_filename = "spyder.png"
else:
icon_filename = "spyder_dark.png"
app_icon = QIcon(get_image_path(icon_filename))
msgBox.setIconPixmap(app_icon.pixmap(QSize(64, 64)))
msgBox.setTextInteractionFlags(
Qt.LinksAccessibleByMouse | Qt.TextSelectableByMouse)
msgBox.exec_()
@Slot()
def show_dependencies(self):
"""Show Spyder's Dependencies dialog box"""
from spyder.widgets.dependencies import DependenciesDialog
dlg = DependenciesDialog(self)
dlg.set_data(dependencies.DEPENDENCIES)
dlg.exec_()
def render_issue(self, description='', traceback=''):
"""Render issue before sending it to Github"""
# Get component versions
versions = get_versions()
# Get git revision for development version
revision = ''
if versions['revision']:
revision = versions['revision']
# Make a description header in case no description is supplied
if not description:
description = "### What steps reproduce the problem?"
# Make error section from traceback and add appropriate reminder header
if traceback:
error_section = ("### Traceback\n"
"```python-traceback\n"
"{}\n"
"```".format(traceback))
else:
error_section = ''
issue_template = """\
## Description
{description}
{error_section}
## Versions
* Spyder version: {spyder_version} {commit}
* Python version: {python_version}
* Qt version: {qt_version}
* {qt_api_name} version: {qt_api_version}
* Operating System: {os_name} {os_version}
### Dependencies
```
{dependencies}
```
""".format(description=description,
error_section=error_section,
spyder_version=versions['spyder'],
commit=revision,
python_version=versions['python'],
qt_version=versions['qt'],
qt_api_name=versions['qt_api'],
qt_api_version=versions['qt_api_ver'],
os_name=versions['system'],
os_version=versions['release'],
dependencies=dependencies.status())
return issue_template
@Slot()
def report_issue(self, body=None, title=None, open_webpage=False):
"""Report a Spyder issue to github, generating body text if needed."""
if body is None:
from spyder.widgets.reporterror import SpyderErrorDialog
report_dlg = SpyderErrorDialog(self, is_report=True)
report_dlg.show()
else:
if open_webpage:
if PY3:
from urllib.parse import quote
else:
from urllib import quote # analysis:ignore
from qtpy.QtCore import QUrlQuery
url = QUrl(__project_url__ + '/issues/new')
query = QUrlQuery()
query.addQueryItem("body", quote(body))
if title:
query.addQueryItem("title", quote(title))
url.setQuery(query)
QDesktopServices.openUrl(url)
@Slot()
def trouble_guide(self):
"""Open Spyder troubleshooting guide in a web browser."""
url = QUrl(__trouble_url__)
QDesktopServices.openUrl(url)
@Slot()
def google_group(self):
"""Open Spyder Google Group in a web browser."""
url = QUrl(__forum_url__)
QDesktopServices.openUrl(url)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.shell.interpreter.redirect_stds()
else:
self.console.shell.interpreter.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.switch_to_plugin()
console.execute_code(lines)
if focus_to_editor:
self.editor.switch_to_plugin()
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
fname = encoding.to_unicode_from_fs(fname)
if osp.isfile(fname):
self.open_file(fname, external=True)
elif osp.isfile(osp.join(CWD, fname)):
self.open_file(osp.join(CWD, fname), external=True)
# ---- PYTHONPATH management, etc.
def get_spyder_pythonpath(self):
"""Return Spyder PYTHONPATH"""
active_path = [p for p in self.path if p not in self.not_active_path]
return active_path + self.project_path
def add_path_to_sys_path(self):
"""Add Spyder path to sys.path"""
for path in reversed(self.get_spyder_pythonpath()):
sys.path.insert(1, path)
def remove_path_from_sys_path(self):
"""Remove Spyder path from sys.path"""
for path in self.path + self.project_path:
while path in sys.path:
sys.path.remove(path)
@Slot()
def path_manager_callback(self):
"""Spyder path manager"""
from spyder.widgets.pathmanager import PathManager
self.remove_path_from_sys_path()
project_path = self.projects.get_pythonpath()
dialog = PathManager(self, self.path, project_path,
self.not_active_path, sync=True)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.exec_()
self.add_path_to_sys_path()
try:
encoding.writelines(self.path, self.SPYDER_PATH) # Saving path
encoding.writelines(self.not_active_path,
self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError:
pass
self.sig_pythonpath_changed.emit()
def pythonpath_changed(self):
"""Projects PYTHONPATH contribution has changed"""
self.remove_path_from_sys_path()
self.project_path = self.projects.get_pythonpath()
self.add_path_to_sys_path()
self.sig_pythonpath_changed.emit()
@Slot()
def win_env(self):
"""Show Windows current user environment variables"""
self.dialog_manager.show(WinUserEnvDialog(self))
#---- Preferences
def apply_settings(self):
"""Apply settings changed in 'Preferences' dialog box"""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes Issue 2036
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
else:
style_name = CONF.get('appearance', 'windows_style',
self.default_style)
style = QStyleFactory.create(style_name)
if style is not None:
style.setProperty('name', style_name)
qapp.setStyle(style)
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
if CONF.get('main', 'animated_docks'):
default = default|QMainWindow.AnimatedDocks
self.setDockOptions(default)
self.apply_panes_settings()
self.apply_statusbar_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings"""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.FEATURES
if CONF.get('main', 'vertical_dockwidget_titlebars'):
features = features | QDockWidget.DockWidgetVerticalTitleBar
plugin.dockwidget.setFeatures(features)
plugin.update_margins()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
else:
return
@Slot()
def edit_preferences(self):
"""Edit Spyder preferences"""
from spyder.preferences.configdialog import ConfigDialog
dlg = ConfigDialog(self)
dlg.size_change.connect(self.set_prefs_size)
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
for plugin in [self.workingdirectory, self.editor,
self.projects, self.ipyconsole,
self.historylog, self.help, self.variableexplorer,
self.onlinehelp, self.explorer, self.findinfiles
]+self.thirdparty_plugins:
if plugin is not None:
try:
widget = plugin.create_configwidget(dlg)
if widget is not None:
dlg.add_page(widget)
except Exception:
traceback.print_exc(file=sys.stderr)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
dlg.show()
dlg.check_all_settings()
dlg.pages_widget.currentChanged.connect(self.__preference_page_changed)
dlg.exec_()
def __preference_page_changed(self, index):
"""Preference page index has changed"""
self.prefs_index = index
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
#---- Shortcuts
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_sc_to_tip=False):
"""
Register QAction or QShortcut to Spyder main application,
with shortcut (context, name, default)
"""
self.shortcut_data.append( (qaction_or_qshortcut, context,
name, add_sc_to_tip) )
def apply_shortcuts(self):
"""Apply shortcuts settings to all widgets/plugins"""
toberemoved = []
for index, (qobject, context, name,
add_sc_to_tip) in enumerate(self.shortcut_data):
keyseq = QKeySequence( get_shortcut(context, name) )
try:
if isinstance(qobject, QAction):
if sys.platform == 'darwin' and \
qobject._shown_shortcut == 'missing':
qobject._shown_shortcut = keyseq
else:
qobject.setShortcut(keyseq)
if add_sc_to_tip:
add_shortcut_to_tooltip(qobject, context, name)
elif isinstance(qobject, QShortcut):
qobject.setKey(keyseq)
except RuntimeError:
# Object has been deleted
toberemoved.append(index)
for index in sorted(toberemoved, reverse=True):
self.shortcut_data.pop(index)
@Slot()
def show_shortcuts_dialog(self):
from spyder.widgets.shortcutssummary import ShortcutsSummaryDialog
dlg = ShortcutsSummaryDialog(None)
dlg.exec_()
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See Issue 1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""
Quit and Restart Spyder application.
If reset True it allows to reset spyder on restart.
"""
# Get start path to use in restart script
spyder_start_directory = get_module_path('spyder')
restart_script = osp.join(spyder_start_directory, 'app', 'restart.py')
# Get any initial argument passed when spyder was started
# Note: Variables defined in bootstrap.py and spyder/app/start.py
env = os.environ.copy()
bootstrap_args = env.pop('SPYDER_BOOTSTRAP_ARGS', None)
spyder_args = env.pop('SPYDER_ARGS')
# Get current process and python running spyder
pid = os.getpid()
python = sys.executable
# Check if started with bootstrap.py
if bootstrap_args is not None:
spyder_args = bootstrap_args
is_bootstrap = True
else:
is_bootstrap = False
# Pass variables as environment variables (str) to restarter subprocess
env['SPYDER_ARGS'] = spyder_args
env['SPYDER_PID'] = str(pid)
env['SPYDER_IS_BOOTSTRAP'] = str(is_bootstrap)
env['SPYDER_RESET'] = str(reset)
if DEV:
if os.name == 'nt':
env['PYTHONPATH'] = ';'.join(sys.path)
else:
env['PYTHONPATH'] = ':'.join(sys.path)
# Build the command and popen arguments depending on the OS
if os.name == 'nt':
# Hide flashing command prompt
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
shell = False
else:
startupinfo = None
shell = True
command = '"{0}" "{1}"'
command = command.format(python, restart_script)
try:
if self.closing(True):
subprocess.Popen(command, shell=shell, env=env,
startupinfo=startupinfo)
self.console.quit()
except Exception as error:
# If there is an error with subprocess, Spyder should not quit and
# the error can be inspected in the internal console
print(error) # spyder: test-skip
print(command) # spyder: test-skip
# ---- Interactive Tours
def show_tour(self, index):
"""Show interactive tour."""
self.maximize_dockwidget(restore=True)
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global File Switcher
def open_fileswitcher(self, symbol=False):
"""Open file list management dialog box."""
if self.fileswitcher is not None and \
self.fileswitcher.is_visible:
self.fileswitcher.hide()
self.fileswitcher.is_visible = False
return
if symbol:
self.fileswitcher.plugin = self.editor
self.fileswitcher.set_search_text('@')
else:
self.fileswitcher.set_search_text('')
self.fileswitcher.show()
self.fileswitcher.is_visible = True
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_fileswitcher(symbol=True)
def add_to_fileswitcher(self, plugin, tabs, data, icon):
"""Add a plugin to the File Switcher."""
if self.fileswitcher is None:
from spyder.widgets.fileswitcher import FileSwitcher
self.fileswitcher = FileSwitcher(self, plugin, tabs, data, icon)
else:
self.fileswitcher.add_plugin(plugin, tabs, data, icon)
self.fileswitcher.sig_goto_file.connect(
plugin.get_current_tab_manager().set_stack_index)
# ---- Check for Spyder Updates
def _check_updates_ready(self):
"""Called by WorkerUpdates when ready"""
from spyder.widgets.helperwidgets import MessageCheckBox
# feedback` = False is used on startup, so only positive feedback is
# given. `feedback` = True is used when after startup (when using the
# menu action, and gives feeback if updates are, or are not found.
feedback = self.give_updates_feedback
# Get results from worker
update_available = self.worker_updates.update_available
latest_release = self.worker_updates.latest_release
error_msg = self.worker_updates.error
url_r = __project_url__ + '/releases'
url_i = 'https://docs.spyder-ide.org/installation.html'
# Define the custom QMessageBox
box = MessageCheckBox(icon=QMessageBox.Information,
parent=self)
box.setWindowTitle(_("Spyder updates"))
box.set_checkbox_text(_("Check for updates on startup"))
box.setStandardButtons(QMessageBox.Ok)
box.setDefaultButton(QMessageBox.Ok)
# Adjust the checkbox depending on the stored configuration
section, option = 'main', 'check_updates_on_startup'
check_updates = CONF.get(section, option)
box.set_checked(check_updates)
if error_msg is not None:
msg = error_msg
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
else:
if update_available:
anaconda_msg = ''
if 'Anaconda' in sys.version or 'conda-forge' in sys.version:
anaconda_msg = _("<hr><b>IMPORTANT NOTE:</b> It seems "
"that you are using Spyder with "
"<b>Anaconda/Miniconda</b>. Please "
"<b>don't</b> use <code>pip</code> to "
"update it as that will probably break "
"your installation.<br><br>"
"Instead, please wait until new conda "
"packages are available and use "
"<code>conda</code> to perform the "
"update.<hr>")
msg = _("<b>Spyder %s is available!</b> <br><br>Please use "
"your package manager to update Spyder or go to our "
"<a href=\"%s\">Releases</a> page to download this "
"new version. <br><br>If you are not sure how to "
"proceed to update Spyder please refer to our "
" <a href=\"%s\">Installation</a> instructions."
"") % (latest_release, url_r, url_i)
msg += '<br>' + anaconda_msg
box.setText(msg)
box.set_check_visible(True)
box.exec_()
check_updates = box.is_checked()
elif feedback:
msg = _("Spyder is up to date.")
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
# Update checkbox based on user interaction
CONF.set(section, option, check_updates)
# Enable check_updates_action after the thread has finished
self.check_updates_action.setDisabled(False)
# Provide feeback when clicking menu if check on startup is on
self.give_updates_feedback = True
@Slot()
def check_updates(self, startup=False):
"""
Check for spyder updates on github releases using a QThread.
"""
from spyder.workers.updates import WorkerUpdates
# Disable check_updates_action while the thread is working
self.check_updates_action.setDisabled(True)
if self.thread_updates is not None:
self.thread_updates.terminate()
self.thread_updates = QThread(self)
self.worker_updates = WorkerUpdates(self, startup=startup)
self.worker_updates.sig_ready.connect(self._check_updates_ready)
self.worker_updates.sig_ready.connect(self.thread_updates.quit)
self.worker_updates.moveToThread(self.thread_updates)
self.thread_updates.started.connect(self.worker_updates.start)
self.thread_updates.start()
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Utilities to create the 'main' function
#==============================================================================
def initialize():
"""Initialize Qt, patching sys.exit and eventually setting up ETS"""
# This doesn't create our QApplication, just holds a reference to
# MAIN_APP, created above to show our splash screen as early as
# possible
app = qapplication()
# --- Set application icon
app.setWindowIcon(APP_ICON)
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
if PYQT5:
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
# Selecting Qt4 backend for Enthought Tool Suite (if installed)
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
except ImportError:
pass
return app
class Spy(object):
"""
Inspect Spyder internals
Attributes:
app Reference to main QApplication object
window Reference to spyder.MainWindow widget
"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return list(self.__dict__.keys()) +\
[x for x in dir(self.__class__) if x[0] != '_']
def versions(self):
return get_versions()
def run_spyder(app, options, args):
"""
Create and show Spyder's main window
Start QApplication event loop
"""
#TODO: insert here
# Main window
main = MainWindow(options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.shell.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
main.console.shell.interpreter.namespace['spy'] = \
Spy(app=app, window=main)
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main():
"""Main function"""
# **** For Pytest ****
# We need to create MainWindow **here** to avoid passing pytest
# options to Spyder
if running_under_pytest():
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
options = Mock()
options.working_directory = None
options.profile = False
options.multithreaded = False
options.new_instance = False
options.project = None
options.window_title = None
options.opengl_implementation = None
options.debug_info = None
options.debug_output = None
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = initialize()
window = run_spyder(app, options, None)
return window
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, argparse won't be able to exit if --help option is passed
options, args = get_options()
# **** Set OpenGL implementation to use ****
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set debugging info ****
setup_logging(options)
# **** Create the application ****
app = initialize()
# **** Handle other options ****
if options.reset_config_files:
# <!> Remove all configuration files!
reset_config_files()
return
elif options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults(save=True)
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Show crash dialog ****
if CONF.get('main', 'crash', False) and not DEV:
CONF.set('main', 'crash', False)
if SPLASH is not None:
SPLASH.hide()
QMessageBox.information(
None, "Spyder",
"Spyder crashed during last session.<br><br>"
"If Spyder does not start at all and <u>before submitting a "
"bug report</u>, please try to reset settings to defaults by "
"running Spyder with the command line option '--reset':<br>"
"<span style=\'color: #555555\'><b>spyder --reset</b></span>"
"<br><br>"
"<span style=\'color: #ff5555\'><b>Warning:</b></span> "
"this command will remove all your Spyder configuration files "
"located in '%s').<br><br>"
"If Spyder still fails to launch, you should consult our "
"comprehensive <b><a href=\"%s\">Troubleshooting Guide</a></b>, "
"which when followed carefully solves the vast majority of "
"crashes; also, take "
"the time to search for <a href=\"%s\">known bugs</a> or "
"<a href=\"%s\">discussions</a> matching your situation before "
"submitting a report to our <a href=\"%s\">issue tracker</a>. "
"Your feedback will always be greatly appreciated."
"" % (get_conf_path(), __trouble_url__, __project_url__,
__forum_url__, __project_url__))
# **** Create main window ****
mainwindow = None
try:
mainwindow = run_spyder(app, options, args)
except FontError as fontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
except BaseException:
CONF.set('main', 'crash', True)
import traceback
traceback.print_exc(file=STDERR)
traceback.print_exc(file=open('spyder_crash.log', 'w'))
if mainwindow is None:
# An exception occured
if SPLASH is not None:
SPLASH.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
exposition.py
|
#!/usr/bin/python
from __future__ import unicode_literals
import base64
import os
import socket
import sys
import threading
from contextlib import closing
from wsgiref.simple_server import make_server, WSGIRequestHandler
from prometheus_client import core
from prometheus_client import openmetrics
try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
from urllib2 import build_opener, Request, HTTPHandler
from urllib import quote_plus
from urlparse import parse_qs, urlparse
except ImportError:
# Python 3
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from urllib.request import build_opener, Request, HTTPHandler
from urllib.parse import quote_plus, parse_qs, urlparse
CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8')
'''Content type of the latest text format'''
PYTHON26_OR_OLDER = sys.version_info < (2, 7)
def make_wsgi_app(registry=core.REGISTRY):
'''Create a WSGI app which serves the metrics from a registry.'''
def prometheus_app(environ, start_response):
params = parse_qs(environ.get('QUERY_STRING', ''))
r = registry
encoder, content_type = choose_encoder(environ.get('HTTP_ACCEPT'))
if 'name[]' in params:
r = r.restricted_registry(params['name[]'])
output = encoder(r)
status = str('200 OK')
headers = [(str('Content-type'), content_type)]
start_response(status, headers)
return [output]
return prometheus_app
class _SilentHandler(WSGIRequestHandler):
"""WSGI handler that does not log requests."""
def log_message(self, format, *args):
"""Log nothing."""
def start_wsgi_server(port, addr='', registry=core.REGISTRY):
"""Starts a WSGI server for prometheus metrics as a daemon thread."""
app = make_wsgi_app(registry)
httpd = make_server(addr, port, app, handler_class=_SilentHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
def generate_latest(registry=core.REGISTRY):
'''Returns the metrics from the registry in latest text format as a string.'''
output = []
for metric in registry.collect():
mname = metric.name
mtype = metric.type
# Munging from OpenMetrics into Prometheus format.
if mtype == 'counter':
mname = mname + '_total'
elif mtype == 'info':
mname = mname + '_info'
mtype = 'gauge'
elif mtype == 'stateset':
mtype = 'gauge'
elif mtype == 'gaugehistogram':
# A gauge histogram is really a gauge,
# but this captures the strucutre better.
mtype = 'histogram'
elif mtype == 'unknown':
mtype = 'untyped'
output.append('# HELP {0} {1}'.format(
mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n')))
output.append('\n# TYPE {0} {1}\n'.format(mname, mtype))
for s in metric.samples:
if s.name == metric.name + '_created':
continue # Ignore OpenMetrics specific sample. TODO: Make these into a gauge.
if s.labels:
labelstr = '{{{0}}}'.format(','.join(
['{0}="{1}"'.format(
k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"'))
for k, v in sorted(s.labels.items())]))
else:
labelstr = ''
timestamp = ''
if s.timestamp is not None:
# Convert to milliseconds.
timestamp = ' {0:d}'.format(int(float(s.timestamp) * 1000))
output.append('{0}{1} {2}{3}\n'.format(
s.name, labelstr, core._floatToGoString(s.value), timestamp))
return ''.join(output).encode('utf-8')
def choose_encoder(accept_header):
accept_header = accept_header or ''
for accepted in accept_header.split(','):
if accepted == 'text/openmetrics; version=0.0.1':
return (openmetrics.exposition.generate_latest,
openmetrics.exposition.CONTENT_TYPE_LATEST)
return (generate_latest, CONTENT_TYPE_LATEST)
class MetricsHandler(BaseHTTPRequestHandler):
"""HTTP handler that gives metrics from ``core.REGISTRY``."""
registry = core.REGISTRY
def do_GET(self):
registry = self.registry
params = parse_qs(urlparse(self.path).query)
encoder, content_type = choose_encoder(self.headers.get('Accept'))
if 'name[]' in params:
registry = registry.restricted_registry(params['name[]'])
try:
output = encoder(registry)
except:
self.send_error(500, 'error generating metric output')
raise
self.send_response(200)
self.send_header('Content-Type', content_type)
self.end_headers()
self.wfile.write(output)
def log_message(self, format, *args):
"""Log nothing."""
@staticmethod
def factory(registry):
"""Returns a dynamic MetricsHandler class tied
to the passed registry.
"""
# This implementation relies on MetricsHandler.registry
# (defined above and defaulted to core.REGISTRY).
# As we have unicode_literals, we need to create a str()
# object for type().
cls_name = str('MetricsHandler')
MyMetricsHandler = type(cls_name, (MetricsHandler, object),
{"registry": registry})
return MyMetricsHandler
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
"""Thread per request HTTP server."""
def start_http_server(port, addr='', registry=core.REGISTRY):
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
CustomMetricsHandler = MetricsHandler.factory(registry)
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
def write_to_textfile(path, registry):
'''Write metrics to the given path.
This is intended for use with the Node exporter textfile collector.
The path must end in .prom for the textfile collector to process it.'''
tmppath = '%s.%s.%s' % (path, os.getpid(), threading.current_thread().ident)
with open(tmppath, 'wb') as f:
f.write(generate_latest(registry))
# rename(2) is atomic.
os.rename(tmppath, path)
def default_handler(url, method, timeout, headers, data):
'''Default handler that implements HTTP/HTTPS connections.
Used by the push_to_gateway functions. Can be re-used by other handlers.'''
def handle():
request = Request(url, data=data)
request.get_method = lambda: method
for k, v in headers:
request.add_header(k, v)
resp = build_opener(HTTPHandler).open(request, timeout=timeout)
if resp.code >= 400:
raise IOError("error talking to pushgateway: {0} {1}".format(
resp.code, resp.msg))
return handle
def basic_auth_handler(url, method, timeout, headers, data, username=None, password=None):
'''Handler that implements HTTP/HTTPS connections with Basic Auth.
Sets auth headers using supplied 'username' and 'password', if set.
Used by the push_to_gateway functions. Can be re-used by other handlers.'''
def handle():
'''Handler that implements HTTP Basic Auth.
'''
if username is not None and password is not None:
auth_value = '{0}:{1}'.format(username, password).encode('utf-8')
auth_token = base64.b64encode(auth_value)
auth_header = b'Basic ' + auth_token
headers.append(['Authorization', auth_header])
default_handler(url, method, timeout, headers, data)()
return handle
def push_to_gateway(
gateway, job, registry, grouping_key=None, timeout=30,
handler=default_handler):
'''Push metrics to the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`registry` is an instance of CollectorRegistry
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long push will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
If not None, the argument must be a function which accepts
the following arguments:
url, method, timeout, headers, and content
May be used to implement additional functionality not
supported by the built-in default handler (such as SSL
client certicates, and HTTP authentication mechanisms).
'url' is the URL for the request, the 'gateway' argument
described earlier will form the basis of this URL.
'method' is the HTTP method which should be used when
carrying out the request.
'timeout' requests not successfully completed after this
many seconds should be aborted. If timeout is None, then
the handler should not set a timeout.
'headers' is a list of ("header-name","header-value") tuples
which must be passed to the pushgateway in the form of HTTP
request headers.
The function should raise an exception (e.g. IOError) on
failure.
'content' is the data which should be used to form the HTTP
Message Body.
This overwrites all metrics with the same job and grouping_key.
This uses the PUT HTTP method.'''
_use_gateway('PUT', gateway, job, registry, grouping_key, timeout, handler)
def pushadd_to_gateway(
gateway, job, registry, grouping_key=None, timeout=30,
handler=default_handler):
'''PushAdd metrics to the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`registry` is an instance of CollectorRegistry
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long push will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
See the 'prometheus_client.push_to_gateway' documentation
for implementation requirements.
This replaces metrics with the same name, job and grouping_key.
This uses the POST HTTP method.'''
_use_gateway('POST', gateway, job, registry, grouping_key, timeout, handler)
def delete_from_gateway(
gateway, job, grouping_key=None, timeout=30, handler=default_handler):
'''Delete metrics from the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long delete will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
See the 'prometheus_client.push_to_gateway' documentation
for implementation requirements.
This deletes metrics with the given job and grouping_key.
This uses the DELETE HTTP method.'''
_use_gateway('DELETE', gateway, job, None, grouping_key, timeout, handler)
def _use_gateway(method, gateway, job, registry, grouping_key, timeout, handler):
gateway_url = urlparse(gateway)
if not gateway_url.scheme or (PYTHON26_OR_OLDER and gateway_url.scheme not in ['http', 'https']):
gateway = 'http://{0}'.format(gateway)
url = '{0}/metrics/job/{1}'.format(gateway, quote_plus(job))
data = b''
if method != 'DELETE':
data = generate_latest(registry)
if grouping_key is None:
grouping_key = {}
url += ''.join(
'/{0}/{1}'.format(quote_plus(str(k)), quote_plus(str(v)))
for k, v in sorted(grouping_key.items()))
handler(
url=url, method=method, timeout=timeout,
headers=[('Content-Type', CONTENT_TYPE_LATEST)], data=data,
)()
def instance_ip_grouping_key():
'''Grouping key with instance set to the IP Address of this host.'''
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s:
s.connect(('localhost', 0))
return {'instance': s.getsockname()[0]}
|
agent.py
|
# Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Agent utilities, incl. choosing the move and running in separate process."""
import collections
import functools
import multiprocessing
from typing import Any, Callable
import flax
import jax
import numpy as np
import env_utils
@functools.partial(jax.jit, static_argnums=0)
def policy_action(
apply_fn: Callable[..., Any],
params: flax.core.frozen_dict.FrozenDict,
state: np.ndarray):
"""Forward pass of the network.
Args:
params: the parameters of the actor-critic model
module: the actor-critic model
state: the input for the forward pass
Returns:
out: a tuple (log_probabilities, values)
"""
out = apply_fn({'params': params}, state)
return out
ExpTuple = collections.namedtuple(
'ExpTuple', ['state', 'action', 'reward', 'value', 'log_prob', 'done'])
class RemoteSimulator:
"""Wrap functionality for an agent emulating Atari in a separate process.
An object of this class is created for every agent.
"""
def __init__(self, game: str):
"""Start the remote process and create Pipe() to communicate with it."""
parent_conn, child_conn = multiprocessing.Pipe()
self.proc = multiprocessing.Process(
target=rcv_action_send_exp, args=(child_conn, game))
self.proc.daemon = True
self.conn = parent_conn
self.proc.start()
def rcv_action_send_exp(conn, game: str):
"""Run the remote agents.
Receive action from the main learner, perform one step of simulation and
send back collected experience.
"""
env = env_utils.create_env(game, clip_rewards=True)
while True:
obs = env.reset()
done = False
# Observations fetched from Atari env need additional batch dimension.
state = obs[None, ...]
while not done:
conn.send(state)
action = conn.recv()
obs, reward, done, _ = env.step(action)
next_state = obs[None, ...] if not done else None
experience = (state, action, reward, done)
conn.send(experience)
if done:
break
state = next_state
|
views.py
|
import logging
import re
import subprocess
import os
import sys
import traceback
import time
import json
import copy
import random
import numpy as np
import pandas as pd
import math
import base64
import urllib3
import hashlib
from io import BytesIO
from datetime import datetime, timedelta
from threading import Thread, Lock
from urllib.parse import urlencode, urlparse, urlunparse, parse_qs
from elasticsearch_dsl import Search
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.db.models import Count, Sum, F, Value, FloatField, Q, DateTimeField
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.cache import never_cache
from django.utils import timezone
from django.utils.cache import patch_response_headers
from django.core.cache import cache
from django.conf import settings as djangosettings
from django.db import connection
from django.template.loaders.app_directories import get_app_template_dirs
from django.template.defaulttags import register
from django.template.context_processors import csrf
from core import chainsql
import core.constants as const
import core.Customrenderer as Customrenderer
from core.common.utils import getPrefix, getContextVariables
from core.settings import defaultDatetimeFormat
from core.pandajob.SQLLookups import CastDate
from core.pandajob.models import Jobsactive4, Jobsdefined4, Jobswaiting4, Jobsarchived4, Jobsarchived, \
GetRWWithPrioJedi3DAYS, RemainedEventsPerCloud3dayswind, CombinedWaitActDefArch4, PandaJob
from core.schedresource.models import Schedconfig, SchedconfigJson
from core.common.models import Filestable4
from core.common.models import Datasets
from core.common.models import FilestableArch
from core.common.models import Users
from core.common.models import Jobparamstable
from core.common.models import JobsStatuslog
from core.common.models import Logstable
from core.common.models import Jobsdebug
from core.common.models import Cloudconfig
from core.common.models import Incidents
from core.common.models import Pandalog
from core.common.models import JediJobRetryHistory
from core.common.models import JediTasks
from core.common.models import JediTasksOrdered
from core.common.models import TasksStatusLog
from core.common.models import GetEventsForTask
from core.common.models import JediEvents
from core.common.models import JediDatasets
from core.common.models import JediDatasetContents
from core.common.models import JediWorkQueue
from core.oauth.models import BPUser
from core.compare.modelsCompare import ObjectsComparison
from core.filebrowser.ruciowrapper import ruciowrapper
from core.settings.local import dbaccess
from core.settings.local import PRODSYS
from core.settings.local import GRAFANA
from core.settings.config import DEPLOYMENT, DB_N_MAX_IN_QUERY, PRMON_LOGS_DIRECTIO_LOCATION
from core.libs.TaskProgressPlot import TaskProgressPlot
from core.libs.UserProfilePlot import UserProfilePlot
from core.libs.TasksErrorCodesAnalyser import TasksErrorCodesAnalyser
from core.oauth.utils import login_customrequired
from core.utils import is_json_request, extensibleURL, complete_request, is_wildcards, removeParam
from core.libs.dropalgorithm import insert_dropped_jobs_to_tmp_table, drop_job_retries
from core.libs.cache import getCacheEntry, setCacheEntry, set_cache_timeout, getCacheData
from core.libs.exlib import insert_to_temp_table, get_tmp_table_name, create_temporary_table
from core.libs.exlib import is_timestamp, get_file_info, convert_bytes, convert_hs06, dictfetchall
from core.libs.eventservice import event_summary_for_task
from core.libs.task import input_summary_for_task, datasets_for_task, \
get_task_params, humanize_task_params, get_hs06s_summary_for_task, cleanTaskList, get_task_flow_data, \
get_datasets_for_tasklist
from core.libs.task import get_dataset_locality, is_event_service_task, \
get_prod_slice_by_taskid, get_task_timewindow, get_task_time_archive_flag, get_logs_by_taskid, task_summary_dict, \
wg_task_summary
from core.libs.job import is_event_service, get_job_list, calc_jobs_metrics, add_job_category, \
job_states_count_by_param, is_job_active, get_job_queuetime, get_job_walltime, \
getSequentialRetries, getSequentialRetries_ES, getSequentialRetries_ESupstream, is_debug_mode, clean_job_list
from core.libs.eventservice import job_suppression
from core.libs.jobmetadata import addJobMetadata
from core.libs.error import errorInfo, getErrorDescription, get_job_error_desc
from core.libs.site import get_pq_metrics
from core.libs.bpuser import get_relevant_links, filterErrorData
from core.libs.user import prepare_user_dash_plots, get_panda_user_stats, humanize_metrics
from core.libs.elasticsearch import create_esatlas_connection, get_payloadlog
from core.libs.sqlcustom import escape_input, preprocess_wild_card_string
from core.libs.datetimestrings import datetime_handler, parse_datetime
from core.libs.jobconsumers import reconstruct_job_consumers
from core.libs.DateEncoder import DateEncoder
from core.libs.DateTimeEncoder import DateTimeEncoder
from core.pandajob.summary_error import errorSummaryDict, get_error_message_summary
from core.pandajob.summary_task import task_summary, job_summary_for_task, job_summary_for_task_light, \
get_job_state_summary_for_tasklist, get_top_memory_consumers
from core.pandajob.summary_site import cloud_site_summary, vo_summary, site_summary_dict
from core.pandajob.summary_wg import wg_summary
from core.pandajob.summary_wn import wn_summary
from core.pandajob.summary_os import objectstore_summary
from core.pandajob.summary_user import user_summary_dict
from core.pandajob.utils import job_summary_dict
from core.iDDS.algorithms import checkIfIddsTask
from core.dashboards.jobsummaryregion import get_job_summary_region, prepare_job_summary_region, prettify_json_output
from core.dashboards.jobsummarynucleus import get_job_summary_nucleus, prepare_job_summary_nucleus, get_world_hs06_summary
from core.dashboards.eventservice import get_es_job_summary_region, prepare_es_job_summary_region
from core.schedresource.utils import get_pq_atlas_sites, get_panda_queues, get_basic_info_for_pqs, \
get_panda_resource, get_pq_clouds, get_pq_object_store_path
inilock = Lock()
DateTimeField.register_lookup(CastDate)
try:
hostname = subprocess.getoutput('hostname')
if hostname.find('.') > 0: hostname = hostname[:hostname.find('.')]
except:
hostname = ''
cloudList = ['CA', 'CERN', 'DE', 'ES', 'FR', 'IT', 'ND', 'NL', 'RU', 'TW', 'UK', 'US']
statelist = ['pending', 'defined', 'waiting', 'assigned', 'throttled',
'activated', 'sent', 'starting', 'running', 'holding',
'transferring', 'merging', 'finished', 'failed', 'cancelled', 'closed']
sitestatelist = ['defined', 'waiting', 'assigned', 'throttled', 'activated', 'sent', 'starting', 'running', 'holding',
'merging', 'transferring', 'finished', 'failed', 'cancelled', 'closed']
eventservicestatelist = ['ready', 'sent', 'running', 'finished', 'cancelled', 'discarded', 'done', 'failed', 'fatal','merged', 'corrupted']
taskstatelist = ['registered', 'defined', 'assigning', 'ready', 'pending', 'scouting', 'scouted', 'running', 'prepared',
'done', 'failed', 'finished', 'aborting', 'aborted', 'finishing', 'topreprocess', 'preprocessing',
'tobroken', 'broken', 'toretry', 'toincexec', 'rerefine']
taskstatelist_short = ['reg', 'def', 'assgn', 'rdy', 'pend', 'scout', 'sctd', 'run', 'prep', 'done', 'fail', 'finish',
'abrtg', 'abrtd', 'finishg', 'toprep', 'preprc', 'tobrok', 'broken', 'retry', 'incexe', 'refine']
taskstatedict = []
for i in range(0, len(taskstatelist)):
tsdict = {'state': taskstatelist[i], 'short': taskstatelist_short[i]}
taskstatedict.append(tsdict)
errorcodelist = [
{'name': 'brokerage', 'error': 'brokerageerrorcode', 'diag': 'brokerageerrordiag'},
{'name': 'ddm', 'error': 'ddmerrorcode', 'diag': 'ddmerrordiag'},
{'name': 'exe', 'error': 'exeerrorcode', 'diag': 'exeerrordiag'},
{'name': 'jobdispatcher', 'error': 'jobdispatchererrorcode', 'diag': 'jobdispatchererrordiag'},
{'name': 'pilot', 'error': 'piloterrorcode', 'diag': 'piloterrordiag'},
{'name': 'sup', 'error': 'superrorcode', 'diag': 'superrordiag'},
{'name': 'taskbuffer', 'error': 'taskbuffererrorcode', 'diag': 'taskbuffererrordiag'},
{'name': 'transformation', 'error': 'transexitcode', 'diag': None},
]
_logger = logging.getLogger('bigpandamon')
notcachedRemoteAddress = ['188.184.185.129', '188.184.116.46']
LAST_N_HOURS_MAX = 0
# JOB_LIMIT = 0
# TFIRST = timezone.now()
# TLAST = timezone.now() - timedelta(hours=2400)
PLOW = 1000000
PHIGH = -1000000
standard_fields = ['processingtype', 'computingsite', 'jobstatus', 'prodsourcelabel', 'produsername', 'jeditaskid',
'workinggroup', 'transformation', 'cloud', 'homepackage', 'inputfileproject', 'inputfiletype',
'attemptnr', 'specialhandling', 'priorityrange', 'reqid', 'minramcount', 'eventservice',
'jobsubstatus', 'nucleus','gshare', 'resourcetype']
standard_sitefields = ['region', 'gocname', 'nickname', 'status', 'tier', 'comment_field', 'cloud', 'allowdirectaccess',
'allowfax', 'copytool', 'faxredirector', 'retry', 'timefloor']
standard_taskfields = [
'workqueue_id', 'tasktype', 'superstatus', 'status', 'corecount', 'taskpriority', 'currentpriority', 'username',
'transuses', 'transpath', 'workinggroup', 'processingtype', 'cloud', 'campaign', 'project', 'stream', 'tag',
'reqid', 'ramcount', 'nucleus', 'eventservice', 'gshare', 'container_name', 'attemptnr', 'site']
standard_errorfields = ['cloud', 'computingsite', 'eventservice', 'produsername', 'jeditaskid', 'jobstatus',
'processingtype', 'prodsourcelabel', 'specialhandling', 'taskid', 'transformation',
'workinggroup', 'reqid', 'computingelement']
VONAME = {'atlas': 'ATLAS', 'bigpanda': 'BigPanDA', 'htcondor': 'HTCondor', 'core': 'LSST', '': ''}
VOMODE = ' '
@register.filter(takes_context=True)
def get_count(dict, key):
return dict[key]['count']
@register.filter(takes_context=True)
def get_tk(dict, key):
return dict[key]['tk']
@register.filter(takes_context=True)
def get_item(dictionary, key):
return dictionary.get(key)
@register.simple_tag(takes_context=True)
def get_renderedrow(context, **kwargs):
if kwargs['type']=="world_nucleussummary":
kwargs['statelist'] = statelist
return Customrenderer.world_nucleussummary(context, kwargs)
if kwargs['type']=="world_computingsitesummary":
kwargs['statelist'] = statelist
return Customrenderer.world_computingsitesummary(context, kwargs)
if kwargs['type'] == "region_sitesummary":
kwargs['statelist'] = statelist
return Customrenderer.region_sitesummary(context, kwargs)
def initRequest(request, callselfmon=True):
global VOMODE, ENV, hostname
ENV = {}
VOMODE = ''
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
VOMODE = 'atlas'
# VOMODE = 'devtest'
request.session['req_init_time'] = time.time()
request.session['IS_TESTER'] = False
if VOMODE == 'atlas':
if "MELLON_SAML_RESPONSE" in request.META and base64.b64decode(request.META['MELLON_SAML_RESPONSE']):
if "ADFS_FULLNAME" in request.META:
request.session['ADFS_FULLNAME'] = request.META['ADFS_FULLNAME']
if "ADFS_EMAIL" in request.META:
request.session['ADFS_EMAIL'] = request.META['ADFS_EMAIL']
if "ADFS_FIRSTNAME" in request.META:
request.session['ADFS_FIRSTNAME'] = request.META['ADFS_FIRSTNAME']
if "ADFS_LASTNAME" in request.META:
request.session['ADFS_LASTNAME'] = request.META['ADFS_LASTNAME']
if "ADFS_LOGIN" in request.META:
request.session['ADFS_LOGIN'] = request.META['ADFS_LOGIN']
user = None
try:
user = BPUser.objects.get(username=request.session['ADFS_LOGIN'])
request.session['IS_TESTER'] = user.is_tester
request.session['USER_ID'] = user.id
except BPUser.DoesNotExist:
user = BPUser.objects.create_user(username=request.session['ADFS_LOGIN'], email=request.session['ADFS_EMAIL'], first_name=request.session['ADFS_FIRSTNAME'], last_name=request.session['ADFS_LASTNAME'])
user.set_unusable_password()
user.save()
# if VOMODE == 'devtest':
# request.session['ADFS_FULLNAME'] = ''
# request.session['ADFS_EMAIL'] = ''
# request.session['ADFS_FIRSTNAME'] = ''
# request.session['ADFS_LASTNAME'] = ''
# request.session['ADFS_LOGIN'] = ''
# # user = None
# user = BPUser.objects.get(username=request.session['ADFS_LOGIN'])
# request.session['IS_TESTER'] = user.is_tester
# request.user = user
# print("IP Address for debug-toolbar: " + request.META['REMOTE_ADDR'])
viewParams = {}
# if not 'viewParams' in request.session:
request.session['viewParams'] = viewParams
# creating a dict in session to store long urls as it will not be saved to session storage
# Session is NOT modified, because this alters sub dict
request.session['urls_cut'] = {}
url = request.get_full_path()
u = urlparse(url)
query = parse_qs(u.query)
query.pop('timestamp', None)
try:
u = u._replace(query=urlencode(query, True))
except UnicodeEncodeError:
data = {
'errormessage': 'Error appeared while encoding URL!'
}
return False, render_to_response('errorPage.html', data, content_type='text/html')
request.session['urls_cut']['notimestampurl'] = urlunparse(u) + ('&' if len(query) > 0 else '?')
notimerangeurl = extensibleURL(request)
timerange_params = [
'days', 'hours',
'date_from', 'date_to',
'endtimerange', 'endtime_from', 'endtime_to',
'earlierthan', 'earlierthandays'
]
for trp in timerange_params:
notimerangeurl = removeParam(notimerangeurl, trp, mode='extensible')
request.session['urls_cut']['notimerangeurl'] = notimerangeurl
if 'timerange' in request.session:
del request.session['timerange']
#if 'USER' in os.environ and os.environ['USER'] != 'apache':
# request.session['debug'] = True
if 'debug' in request.GET and request.GET['debug'] == 'insider':
request.session['debug'] = True
djangosettings.DEBUG = True
elif djangosettings.DEBUG is True:
request.session['debug'] = True
else:
request.session['debug'] = False
djangosettings.DEBUG = False
if len(hostname) > 0: request.session['hostname'] = hostname
#self monitor
if callselfmon:
initSelfMonitor(request)
# Set default page lifetime in the http header, for the use of the front end cache
set_cache_timeout(request)
# Is it an https connection with a legit cert presented by the user?
if 'SSL_CLIENT_S_DN' in request.META or 'HTTP_X_SSL_CLIENT_S_DN' in request.META:
if 'SSL_CLIENT_S_DN' in request.META:
request.session['userdn'] = request.META['SSL_CLIENT_S_DN']
else:
request.session['userdn'] = request.META['HTTP_X_SSL_CLIENT_S_DN']
userrec = Users.objects.filter(dn__startswith=request.session['userdn']).values()
if len(userrec) > 0:
request.session['username'] = userrec[0]['name']
if DEPLOYMENT == 'ORACLE_ATLAS':
VOMODE = 'atlas'
request.session['viewParams']['MON_VO'] = 'ATLAS'
else:
VOMODE = DEPLOYMENT
#request.session['viewParams']['MON_VO'] = DEPLOYMENT
# remove xurls from session if it is kept from previous requests
if 'xurls' in request.session:
try:
del request.session['xurls']
except:
pass
requestParams = {}
request.session['requestParams'] = requestParams
allowedemptyparams = ('json', 'snap', 'dt', 'dialogs', 'pandaids', 'workersstats', 'keephtml')
if request.method == 'POST':
for p in request.POST:
if p in ('csrfmiddlewaretoken',): continue
pval = request.POST[p]
pval = pval.replace('+', ' ')
request.session['requestParams'][p.lower()] = pval
else:
for p in request.GET:
pval = request.GET[p]
####if injection###
if 'script' in pval.lower() and ('</' in pval.lower() or '/>' in pval.lower()):
data = {
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
"errormessage": "Illegal value '%s' for %s" % (pval, p),
}
return False, render_to_response('errorPage.html', data, content_type='text/html')
pval = pval.replace('+', ' ')
pval = pval.replace("\'", '')
if p.lower() != 'batchid': # Special requester exception
pval = pval.replace('#', '')
## is it int, if it's supposed to be?
if p.lower() in (
'days', 'hours', 'limit', 'display_limit', 'taskid', 'jeditaskid', 'jobsetid', 'reqid', 'corecount',
'taskpriority', 'priority', 'attemptnr', 'statenotupdated', 'tasknotupdated', 'corepower',
'wansourcelimit', 'wansinklimit', 'nqueue', 'nodes', 'queuehours', 'memory', 'maxtime', 'space',
'maxinputsize', 'timefloor', 'depthboost', 'idlepilotsupression', 'pilotlimit', 'transferringlimit',
'cachedse', 'stageinretry', 'stageoutretry', 'maxwdir', 'minmemory', 'maxmemory', 'minrss',
'maxrss', 'mintime', 'nlastnightlies'):
try:
requestVal = request.GET[p]
if '|' in requestVal:
values = requestVal.split('|')
for value in values:
i = int(value)
elif requestVal == 'Not specified':
# allow 'Not specified' value for int parameters
i = requestVal
else:
i = int(requestVal)
except:
data = {
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
"errormessage": "Illegal value '%s' for %s" % (pval, p),
}
return False, render_to_response('errorPage.html', data, content_type='text/html')
if p.lower() in ('date_from', 'date_to'):
try:
requestVal = request.GET[p]
i = parse_datetime(requestVal)
except:
data = {
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
"errormessage": "Illegal value '%s' for %s" % (pval, p),
}
return False, render_to_response('errorPage.html', data, content_type='text/html')
if p.lower() not in allowedemptyparams and len(pval) == 0:
data = {
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
"errormessage": "Empty value '%s' for %s" % (pval, p),
}
return False, render_to_response('errorPage.html', data, content_type='text/html')
if p.lower() in ('jobname', 'taskname', ) and len(pval) > 0 and ('%' in pval or '%s' in pval):
data = {
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
"errormessage": "Use * symbol for pattern search instead of % for {}".format(p),
}
return False, render_to_response('errorPage.html', data, content_type='text/html')
request.session['requestParams'][p.lower()] = pval
return True, None
def setupView(request, opmode='', hours=0, limit=-99, querytype='job', wildCardExt=False):
viewParams = {}
if not 'viewParams' in request.session:
request.session['viewParams'] = viewParams
extraQueryString = '(1=1) '
extraQueryFields = [] # params that goes directly to the wildcards processing
LAST_N_HOURS_MAX = 0
for paramName, paramVal in request.session['requestParams'].items():
try:
request.session['requestParams'][paramName] = urllib.unquote(paramVal)
except:
request.session['requestParams'][paramName] = paramVal
excludeJobNameFromWildCard = True
if 'jobname' in request.session['requestParams']:
if is_wildcards(request.session['requestParams']['jobname']):
excludeJobNameFromWildCard = False
processor_type = request.session['requestParams'].get('processor_type', None)
if processor_type:
if processor_type.lower() == 'cpu':
extraQueryString += " AND (cmtconfig not like '%%gpu%%')"
if processor_type.lower() == 'gpu':
extraQueryString += " AND (cmtconfig like '%%gpu%%')"
if 'site' in request.session['requestParams'] and (
request.session['requestParams']['site'] == 'hpc' or not is_wildcards(request.session['requestParams']['site'])):
extraQueryFields.append('site')
wildSearchFields = []
if querytype == 'job':
for field in Jobsactive4._meta.get_fields():
if field.get_internal_type() == 'CharField':
if not (field.name == 'jobstatus' or field.name == 'modificationhost'
or (excludeJobNameFromWildCard and field.name == 'jobname')):
wildSearchFields.append(field.name)
if querytype == 'task':
for field in JediTasks._meta.get_fields():
if field.get_internal_type() == 'CharField':
if not (field.name == 'modificationhost' or field.name in extraQueryFields):
wildSearchFields.append(field.name)
deepquery = False
fields = standard_fields
if 'limit' in request.session['requestParams']:
request.session['JOB_LIMIT'] = int(request.session['requestParams']['limit'])
elif limit != -99 and limit > 0:
request.session['JOB_LIMIT'] = limit
elif VOMODE == 'atlas':
request.session['JOB_LIMIT'] = 20000
else:
request.session['JOB_LIMIT'] = 10000
if VOMODE == 'atlas':
LAST_N_HOURS_MAX = 12
else:
LAST_N_HOURS_MAX = 7 * 24
if VOMODE == 'atlas':
if 'cloud' not in fields: fields.append('cloud')
if 'atlasrelease' not in fields: fields.append('atlasrelease')
if 'produsername' in request.session['requestParams'] or 'jeditaskid' in request.session[
'requestParams'] or 'user' in request.session['requestParams']:
if 'jobsetid' not in fields: fields.append('jobsetid')
if ('hours' not in request.session['requestParams']) and (
'days' not in request.session['requestParams']) and (
'jobsetid' in request.session['requestParams'] or 'taskid' in request.session[
'requestParams'] or 'jeditaskid' in request.session['requestParams']):
## Cases where deep query is safe. Unless the time depth is specified in URL.
if 'hours' not in request.session['requestParams'] and 'days' not in request.session['requestParams']:
deepquery = True
else:
if 'jobsetid' in fields: fields.remove('jobsetid')
else:
fields.append('vo')
if hours > 0:
## Call param overrides default hours, but not a param on the URL
LAST_N_HOURS_MAX = hours
## For site-specific queries, allow longer time window
if 'batchid' in request.session['requestParams'] and (hours is None or hours == 0):
LAST_N_HOURS_MAX = 12
if 'computingsite' in request.session['requestParams'] and hours is None:
LAST_N_HOURS_MAX = 12
if 'jobtype' in request.session['requestParams'] and request.session['requestParams']['jobtype'] == 'eventservice':
LAST_N_HOURS_MAX = 2 * 24
## hours specified in the URL takes priority over the above
if 'hours' in request.session['requestParams']:
LAST_N_HOURS_MAX = int(request.session['requestParams']['hours'])
if 'days' in request.session['requestParams']:
LAST_N_HOURS_MAX = int(request.session['requestParams']['days']) * 24
## Exempt single-job, single-task etc queries from time constraint
if 'hours' not in request.session['requestParams'] and 'days' not in request.session['requestParams']:
if 'jeditaskid' in request.session['requestParams']: deepquery = True
if 'taskid' in request.session['requestParams']: deepquery = True
if 'pandaid' in request.session['requestParams']: deepquery = True
if 'jobname' in request.session['requestParams']: deepquery = True
#if 'batchid' in request.session['requestParams']: deepquery = True
if deepquery:
opmode = 'notime'
hours = LAST_N_HOURS_MAX = 24 * 180
request.session['JOB_LIMIT'] = 999999
if opmode != 'notime':
if LAST_N_HOURS_MAX <= 72 and not ('date_from' in request.session['requestParams'] or 'date_to' in request.session['requestParams']
or 'earlierthan' in request.session['requestParams'] or 'earlierthandays' in request.session['requestParams']):
request.session['viewParams']['selection'] = ", last %s hours" % LAST_N_HOURS_MAX
else:
request.session['viewParams']['selection'] = ", last %d days" % (float(LAST_N_HOURS_MAX) / 24.)
# if JOB_LIMIT < 999999 and JOB_LIMIT > 0:
# viewParams['selection'] += ", <font style='color:#FF8040; size=-1'>Warning: limit %s per job table</font>" % JOB_LIMIT
request.session['viewParams']['selection'] += ". <b>Params:</b> "
# if 'days' not in requestParams:
# viewParams['selection'] += "hours=%s" % LAST_N_HOURS_MAX
# else:
# viewParams['selection'] += "days=%s" % int(LAST_N_HOURS_MAX/24)
if request.session['JOB_LIMIT'] < 100000 and request.session['JOB_LIMIT'] > 0:
request.session['viewParams']['selection'] += " <b>limit=</b>%s" % request.session['JOB_LIMIT']
else:
request.session['viewParams']['selection'] = ""
for param in request.session['requestParams']:
if request.session['requestParams'][param] == 'None': continue
if request.session['requestParams'][param] == '': continue
if param == 'display_limit': continue
if param == 'sortby': continue
if param == 'timestamp': continue
if param == 'limit' and request.session['JOB_LIMIT'] > 0: continue
request.session['viewParams']['selection'] += " <b>%s=</b>%s " % (
param, request.session['requestParams'][param])
startdate = None
if 'date_from' in request.session['requestParams']:
startdate = parse_datetime(request.session['requestParams']['date_from'])
if not startdate:
startdate = timezone.now() - timedelta(hours=LAST_N_HOURS_MAX)
# startdate = startdate.strftime(defaultDatetimeFormat)
enddate = None
endtime__castdate__range = None
if 'endtimerange' in request.session['requestParams']:
endtimerange = request.session['requestParams']['endtimerange'].split('|')
endtime__castdate__range = [parse_datetime(endtimerange[0]).strftime(defaultDatetimeFormat),
parse_datetime(endtimerange[1]).strftime(defaultDatetimeFormat)]
if 'date_to' in request.session['requestParams']:
enddate = parse_datetime(request.session['requestParams']['date_to'])
if 'earlierthan' in request.session['requestParams']:
enddate = timezone.now() - timedelta(hours=float(request.session['requestParams']['earlierthan']))
# enddate = enddate.strftime(defaultDatetimeFormat)
if 'earlierthandays' in request.session['requestParams']:
enddate = timezone.now() - timedelta(hours=float(request.session['requestParams']['earlierthandays']) * 24)
# enddate = enddate.strftime(defaultDatetimeFormat)
if enddate == None:
enddate = timezone.now() # .strftime(defaultDatetimeFormat)
request.session['noenddate'] = True
else:
request.session['noenddate'] = False
if request.path.startswith('/running'):
query = {}
else:
if not endtime__castdate__range:
query = {
'modificationtime__castdate__range': [startdate.strftime(defaultDatetimeFormat), enddate.strftime(defaultDatetimeFormat)]}
else:
query = {
'endtime__castdate__range': [endtime__castdate__range[0], endtime__castdate__range[1]]}
request.session['TFIRST'] = startdate # startdate[:18]
request.session['TLAST'] = enddate # enddate[:18]
request.session['PLOW'] = 1000000
request.session['PHIGH'] = -1000000
### Add any extensions to the query determined from the URL
#query['vo'] = 'atlas'
#for vo in ['atlas', 'core']:
# if 'HTTP_HOST' in request.META and request.META['HTTP_HOST'].startswith(vo):
# query['vo'] = vo
for param in request.session['requestParams']:
if param in ('hours', 'days'): continue
elif param == 'cloud' and request.session['requestParams'][param] == 'All':
continue
elif param == 'workinggroup':
if request.session['requestParams'][param] and not is_wildcards(request.session['requestParams'][param]):
query[param] = request.session['requestParams'][param]
elif param == 'harvesterinstance' or param == 'harvesterid':
val = request.session['requestParams'][param]
if val == 'Not specified':
extraQueryString += " AND ((schedulerid not like 'harvester%%') or (schedulerid = '') or (schedulerid is null))"
elif val == 'all':
query['schedulerid__startswith'] = 'harvester'
else:
query['schedulerid'] = 'harvester-'+val
elif param == 'schedulerid':
if 'harvester-*' in request.session['requestParams'][param]:
query['schedulerid__startswith'] = 'harvester'
else:
val = request.session['requestParams'][param]
query['schedulerid__startswith'] = val
elif param == 'priorityrange':
mat = re.match('([0-9]+)\:([0-9]+)', request.session['requestParams'][param])
if mat:
plo = int(mat.group(1))
phi = int(mat.group(2))
query['currentpriority__gte'] = plo
query['currentpriority__lte'] = phi
elif param == 'jobsetrange':
mat = re.match('([0-9]+)\:([0-9]+)', request.session['requestParams'][param])
if mat:
plo = int(mat.group(1))
phi = int(mat.group(2))
query['jobsetid__gte'] = plo
query['jobsetid__lte'] = phi
elif param == 'user' or param == 'username' or param == 'produsername':
if querytype == 'job':
query['produsername__icontains'] = request.session['requestParams'][param].strip()
elif param in ('project',) and querytype == 'task':
val = request.session['requestParams'][param]
query['taskname__istartswith'] = val
elif param in ('outputfiletype',) and querytype != 'task':
val = request.session['requestParams'][param]
query['destinationdblock__icontains'] = val
elif param in ('stream',) and querytype == 'task':
val = request.session['requestParams'][param]
query['taskname__icontains'] = val
elif param == 'harvesterid':
val = escape_input(request.session['requestParams'][param])
values = val.split(',')
query['harvesterid__in'] = values
elif param in ('tag',):
val = request.session['requestParams'][param]
query['taskname__endswith'] = val
elif param == 'reqid_from':
val = int(request.session['requestParams'][param])
query['reqid__gte'] = val
elif param == 'reqid_to':
val = int(request.session['requestParams'][param])
query['reqid__lte'] = val
elif param == 'processingtype' and '|' not in request.session['requestParams'][param] and '*' not in request.session['requestParams'][param] and '!' not in request.session['requestParams'][param]:
val = request.session['requestParams'][param]
query['processingtype'] = val
elif param == 'mismatchedcloudsite' and request.session['requestParams'][param] == 'true':
listOfCloudSitesMismatched = cache.get('mismatched-cloud-sites-list')
if (listOfCloudSitesMismatched is None) or (len(listOfCloudSitesMismatched) == 0):
request.session['viewParams'][
'selection'] += " <b>The query can not be processed because list of mismatches is not found. Please visit %s/dash/production/?cloudview=region page and then try again</b>" % \
request.session['hostname']
else:
for count, cloudSitePair in enumerate(listOfCloudSitesMismatched):
extraQueryString += 'AND ( ( (cloud=\'%s\') and (computingsite=\'%s\') ) ' % (
cloudSitePair[1], cloudSitePair[0])
if (count < (len(listOfCloudSitesMismatched) - 1)):
extraQueryString += ' OR '
extraQueryString += ')'
elif param == 'pilotversion' and request.session['requestParams'][param]:
val = request.session['requestParams'][param]
if val == 'Not specified':
extraQueryString += ' AND ( (pilotid not like \'%%|%%\') or (pilotid is null) )'
else:
query['pilotid__endswith'] = val
elif param == 'durationmin' and request.session['requestParams'][param]:
try:
durationrange = request.session['requestParams'][param].split('-')
except:
continue
if durationrange[0] == '0' and durationrange[1] == '0':
extraQueryString += ' AND ( (endtime is NULL and starttime is null) ) '
else:
extraQueryString += """ AND (
(endtime is not NULL and starttime is not null
and (endtime - starttime) * 24 * 60 > {} and (endtime - starttime) * 24 * 60 < {} )
or
(endtime is NULL and starttime is not null
and (CAST(sys_extract_utc(SYSTIMESTAMP) AS DATE) - starttime) * 24 * 60 > {} and (CAST(sys_extract_utc(SYSTIMESTAMP) AS DATE) - starttime) * 24 * 60 < {} )
) """.format(str(durationrange[0]), str(durationrange[1]), str(durationrange[0]), str(durationrange[1]))
elif param == 'neventsrange' and request.session['requestParams'][param]:
try:
neventsrange = request.session['requestParams'][param].split('-')
except:
continue
if neventsrange and len(neventsrange) == 2:
query['nevents__gte'] = neventsrange[0]
query['nevents__lte'] = neventsrange[1]
elif param == 'errormessage':
errfield_map_dict = {}
for errcode in errorcodelist:
if errcode['name'] != 'transformation':
errfield_map_dict[errcode['error']] = errcode['diag']
for parname in request.session['requestParams']:
if parname in errfield_map_dict.keys():
query[errfield_map_dict[parname]] = request.session['requestParams'][param]
elif param == 'container_name' and request.session['requestParams']['container_name'] == 'all':
extraQueryString += " AND (container_name IS NOT NULL ) "
# remove from wildcard search fields
wildSearchFields.remove('container_name')
# add a new no_container_name xurl to request session
if 'xurl' not in request.session:
request.session['xurls'] = {}
request.session['xurls']['container_name'] = removeParam(extensibleURL(request), 'container_name', mode='extensible')
continue
elif param == 'reqtoken':
data = getCacheData(request, request.session['requestParams']['reqtoken'])
if data is not None:
if 'pandaid' in data:
pid = data['pandaid']
if pid.find(',') >= 0:
pidl = pid.split(',')
query['pandaid__in'] = pidl
else:
query['pandaid'] = int(pid)
elif 'jeditaskid' in data:
tid = data['jeditaskid']
if tid.find(',') >= 0:
tidl = tid.split(',')
query['jeditaskid__in'] = tidl
else:
query['jeditaskid'] = int(tid)
else:
return 'reqtoken', None, None
if querytype == 'task':
if param == 'category':
if request.session['requestParams'][param] == 'group production':
query['workinggroup__icontains'] = 'GP_'
elif request.session['requestParams'][param] == 'production':
query['tasktype'] = 'prod'
query['workinggroup__icontains'] = 'AP_'
elif request.session['requestParams'][param] == 'group analysis':
query['tasktype'] = 'anal'
query['workinggroup__isnull'] = False
extraQueryString += " AND username not in ('artprod', 'atlevind', 'gangarbt') "
elif request.session['requestParams'][param] == 'user analysis':
query['tasktype'] = 'anal'
query['workinggroup__isnull'] = True
extraQueryString += " AND username not in ('artprod', 'atlevind', 'gangarbt') "
elif request.session['requestParams'][param] == 'service':
query['username__in'] = ('artprod', 'atlevind', 'gangarbt')
for field in JediTasks._meta.get_fields():
# for param in requestParams:
if param == field.name:
if request.session['requestParams'][param] == 'Not specified':
extraQueryString += " AND ( {0} is NULL or {0} = '' ) ".format(param)
extraQueryFields.append(param)
continue
if param == 'ramcount':
if 'GB' in request.session['requestParams'][param]:
leftlimit, rightlimit = (request.session['requestParams'][param]).split('-')
rightlimit = rightlimit[:-2]
query['%s__range' % param] = (int(leftlimit) * 1000, int(rightlimit) * 1000 - 1)
else:
query[param] = int(request.session['requestParams'][param])
elif param == 'transpath':
query['%s__endswith' % param] = request.session['requestParams'][param]
elif param == 'tasktype':
ttype = request.session['requestParams'][param]
if ttype.startswith('anal'):
ttype = 'anal'
elif ttype.startswith('prod'):
ttype = 'prod'
query[param] = ttype
elif param == 'jeditaskid':
val = escape_input(request.session['requestParams'][param])
values = val.split('|')
query['jeditaskid__in'] = values
elif param == 'status':
val = escape_input(request.session['requestParams'][param])
if '*' not in val and '|' not in val and '!' not in val:
values = val.split(',')
query['status__in'] = values
elif param == 'superstatus':
val = escape_input(request.session['requestParams'][param])
values = val.split('|')
query['superstatus__in'] = values
elif param == 'reqid':
val = escape_input(request.session['requestParams'][param])
if val.find('|') >= 0:
values = val.split('|')
values = [int(val) for val in values]
query['reqid__in'] = values
else:
query['reqid'] = int(val)
elif param == 'site':
if request.session['requestParams'][param] != 'hpc' and param in extraQueryFields:
query['site__contains'] = request.session['requestParams'][param]
elif param == 'eventservice':
if request.session['requestParams'][param] == 'eventservice' or \
request.session['requestParams'][param] == '1':
query['eventservice'] = 1
else:
query['eventservice'] = 0
else:
if (param not in wildSearchFields):
query[param] = request.session['requestParams'][param]
else:
if param == 'jobtype':
jobtype = request.session['requestParams']['jobtype']
if jobtype.startswith('anal'):
query['prodsourcelabel__in'] = ['panda', 'user', 'rc_alrb', 'rc_test2']
query['transformation__startswith'] = 'http'
elif jobtype.startswith('prod'):
query['prodsourcelabel__in'] = ['managed', 'prod_test', 'ptest', 'rc_alrb', 'rc_test2']
query['transformation__endswith'] = '.py'
elif jobtype == 'groupproduction':
query['prodsourcelabel'] = 'managed'
query['workinggroup__isnull'] = False
elif jobtype == 'eventservice':
query['eventservice'] = 1
elif jobtype == 'esmerge':
query['eventservice'] = 2
elif jobtype == 'test' or jobtype.find('test') >= 0:
query['produsername'] = 'gangarbt'
for field in Jobsactive4._meta.get_fields():
if param == field.name:
if request.session['requestParams'][param] == 'Not specified':
extraQueryString += " AND ( {0} is NULL or {0} = '' ) ".format(param)
extraQueryFields.append(param)
continue
if param == 'minramcount':
if 'GB' in request.session['requestParams'][param]:
leftlimit, rightlimit = (request.session['requestParams'][param]).split('-')
rightlimit = rightlimit[:-2]
query['%s__range' % param] = (int(leftlimit) * 1000, int(rightlimit) * 1000 - 1)
else:
query[param] = int(request.session['requestParams'][param])
elif param == 'specialhandling' and not '*' in request.session['requestParams'][param]:
query['specialhandling__contains'] = request.session['requestParams'][param]
elif param == 'prodsourcelabel':
query['prodsourcelabel'] = request.session['requestParams'][param]
elif param == 'reqid':
val = escape_input(request.session['requestParams'][param])
if val.find('|') >= 0:
values = val.split('|')
values = [int(val) for val in values]
query['reqid__in'] = values
else:
query['reqid'] = int(val)
elif param == 'transformation' or param == 'transpath':
# we cut the transformation path and show only tail
query[param + '__contains'] = request.session['requestParams'][param].replace('*', '')
elif param == 'modificationhost' and request.session['requestParams'][param].find('@') < 0:
paramQuery = request.session['requestParams'][param]
if paramQuery[0] == '*': paramQuery = paramQuery[1:]
if paramQuery[-1] == '*': paramQuery = paramQuery[:-1]
query['%s__contains' % param] = paramQuery
elif param == 'jeditaskid' or param == 'taskid':
val = escape_input(request.session['requestParams'][param])
if '|' in val:
values = val.split('|')
values = [int(val) for val in values]
query[param + '__in'] = values
else:
query[param] = int(val)
elif param == 'pandaid':
try:
pid = request.session['requestParams']['pandaid']
if pid.find(',') >= 0:
pidl = pid.split(',')
query['pandaid__in'] = pidl
else:
query['pandaid'] = int(pid)
except:
query['jobname'] = request.session['requestParams']['pandaid']
elif param == 'jobstatus' and request.session['requestParams'][param] == 'finished' \
and (('mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'eventservice') or (
'jobtype' in request.session['requestParams'] and request.session['requestParams'][
'jobtype'] == 'eventservice')):
query['jobstatus__in'] = ('finished', 'cancelled')
elif param == 'jobstatus':
val = escape_input(request.session['requestParams'][param])
values = val.split('|') if '|' in val else val.split(',')
query['jobstatus__in'] = values
elif param == 'eventservice':
if '|' in request.session['requestParams'][param]:
paramsstr = request.session['requestParams'][param]
paramsstr = paramsstr.replace('eventservice', '1')
paramsstr = paramsstr.replace('esmerge', '2')
paramsstr = paramsstr.replace('clone', '3')
paramsstr = paramsstr.replace('cojumbo', '5')
paramsstr = paramsstr.replace('jumbo', '4')
paramvalues = paramsstr.split('|')
try:
paramvalues = [int(p) for p in paramvalues]
except:
paramvalues = []
query['eventservice__in'] = paramvalues
else:
if request.session['requestParams'][param] == 'esmerge' or request.session['requestParams'][
param] == '2':
query['eventservice'] = 2
elif request.session['requestParams'][param] == 'clone' or request.session['requestParams'][
param] == '3':
query['eventservice'] = 3
elif request.session['requestParams'][param] == 'jumbo' or request.session['requestParams'][
param] == '4':
query['eventservice'] = 4
elif request.session['requestParams'][param] == 'cojumbo' or request.session['requestParams'][
param] == '5':
query['eventservice'] = 5
elif request.session['requestParams'][param] == 'eventservice' or \
request.session['requestParams'][param] == '1':
query['eventservice'] = 1
extraQueryString += " AND not specialhandling like \'%%sc:%%\' "
elif request.session['requestParams'][param] == 'not2':
extraQueryString += ' AND (eventservice != 2) '
elif request.session['requestParams'][param] == 'all':
query['eventservice__isnull'] = False
continue
else:
query['eventservice__isnull'] = True
elif param == 'corecount' and request.session['requestParams'][param] == '1':
extraQueryString += ' AND (corecount = 1 or corecount is NULL) '
elif param == 'resourcetype' and request.session['requestParams'][param]:
if '|' in request.session['requestParams'][param]:
rtypes = request.session['requestParams'][param].split('|')
query['resourcetype__in'] = rtypes
else:
query['resourcetype'] = request.session['requestParams'][param]
else:
if (param not in wildSearchFields):
query[param] = request.session['requestParams'][param]
if 'region' in request.session['requestParams']:
region = request.session['requestParams']['region']
pq_clouds = get_pq_clouds()
siteListForRegion = []
for sn, rn in pq_clouds.items():
if rn == region:
siteListForRegion.append(str(sn))
query['computingsite__in'] = siteListForRegion
if opmode in ['analysis', 'production'] and querytype == 'job':
if opmode.startswith('analy'):
query['prodsourcelabel__in'] = ['panda', 'user']
elif opmode.startswith('prod'):
query['prodsourcelabel__in'] = ['managed']
if wildCardExt == False:
return query
try:
extraQueryString += ' AND '
except NameError:
extraQueryString = ''
# wild cards handling
wildSearchFields = (set(wildSearchFields) & set(list(request.session['requestParams'].keys())))
# filter out fields that already in query dict
wildSearchFields1 = set()
for currenfField in wildSearchFields:
if not (currenfField.lower() == 'transformation'):
if not ((currenfField.lower() == 'cloud') & (
any(card.lower() == 'all' for card in request.session['requestParams'][currenfField].split('|')))):
if not any(currenfField in key for key, value in query.items()) and currenfField not in extraQueryFields:
wildSearchFields1.add(currenfField)
wildSearchFields = wildSearchFields1
for i_field, field_name in enumerate(wildSearchFields, start=1):
extraQueryString += '('
wildCardsOr = request.session['requestParams'][field_name].split('|')
if not ((field_name.lower() == 'cloud') & (any(card.lower() == 'all' for card in wildCardsOr))):
for i_or, card_or in enumerate(wildCardsOr, start=1):
if ',' in card_or:
extraQueryString += '('
wildCardsAnd = card_or.split(',')
for i_and, card_and in enumerate(wildCardsAnd, start=1):
extraQueryString += preprocess_wild_card_string(card_and, field_name)
if i_and < len(wildCardsAnd):
extraQueryString += ' AND '
extraQueryString += ')'
else:
extraQueryString += preprocess_wild_card_string(card_or, field_name)
if i_or < len(wildCardsOr):
extraQueryString += ' OR '
extraQueryString += ')'
if i_field < len(wildSearchFields):
extraQueryString += ' AND '
if 'jobparam' in request.session['requestParams']:
jobParWildCards = request.session['requestParams']['jobparam'].split('|')
jobParCountCards = len(jobParWildCards)
jobParCurrentCardCount = 1
extraJobParCondition = '('
for card in jobParWildCards:
extraJobParCondition += preprocess_wild_card_string(escape_input(card), 'JOBPARAMETERS')
if (jobParCurrentCardCount < jobParCountCards): extraJobParCondition += ' OR '
jobParCurrentCardCount += 1
extraJobParCondition += ')'
pandaIDs = []
jobParamQuery = {'modificationtime__castdate__range': [
startdate.strftime(defaultDatetimeFormat),
enddate.strftime(defaultDatetimeFormat)]}
jobs = Jobparamstable.objects.filter(**jobParamQuery).extra(where=[extraJobParCondition]).values('pandaid')
for values in jobs:
pandaIDs.append(values['pandaid'])
query['pandaid__in'] = pandaIDs
if extraQueryString.endswith(' AND '):
extraQueryString = extraQueryString[:-5]
if (len(extraQueryString) < 2):
extraQueryString = '1=1'
return (query, extraQueryString, LAST_N_HOURS_MAX)
# def cleanJobList(request, jobl, mode='nodrop', doAddMeta=False):
# if 'mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'drop': mode = 'drop'
#
# if 'fields' in request.session['requestParams']:
# fieldsStr = request.session['requestParams']['fields']
# fields = fieldsStr.split("|")
# if 'metastruct' in fields:
# doAddMeta = True
#
# errorCodes = get_job_error_desc()
# pq_clouds = get_pq_clouds()
#
# if doAddMeta:
# jobs = addJobMetadata(jobl)
# else:
# jobs = jobl
# for job in jobs:
# if is_event_service(job):
# if 'jobmetrics' in job:
# pat = re.compile('.*mode\=([^\s]+).*HPCStatus\=([A-Za-z0-9]+)')
# mat = pat.match(job['jobmetrics'])
# if mat:
# job['jobmode'] = mat.group(1)
# job['substate'] = mat.group(2)
# pat = re.compile('.*coreCount\=([0-9]+)')
# mat = pat.match(job['jobmetrics'])
# if mat:
# job['corecount'] = mat.group(1)
# if 'jobsubstatus' in job and job['jobstatus'] == 'closed' and job['jobsubstatus'] == 'toreassign':
# job['jobstatus'] += ':' + job['jobsubstatus']
# if 'eventservice' in job:
# if is_event_service(job) and job['eventservice'] == 1:
# job['eventservice'] = 'eventservice'
# elif is_event_service(job) and job['eventservice'] == 2:
# job['eventservice'] = 'esmerge'
# elif job['eventservice'] == 3:
# job['eventservice'] = 'clone'
# elif is_event_service(job) and job['eventservice'] == 4:
# job['eventservice'] = 'jumbo'
# elif job['eventservice'] == 5:
# job['eventservice'] = 'cojumbo'
# else:
# job['eventservice'] = 'ordinary'
# if 'destinationdblock' in job and job['destinationdblock']:
# ddbfields = job['destinationdblock'].split('.')
# if len(ddbfields) == 6 and ddbfields[0] != 'hc_test':
# job['outputfiletype'] = ddbfields[4]
# elif len(ddbfields) >= 7:
# job['outputfiletype'] = ddbfields[6]
# # else:
# # job['outputfiletype'] = None
# # print job['destinationdblock'], job['outputfiletype'], job['pandaid']
#
# try:
# job['homecloud'] = pq_clouds[job['computingsite']]
# except:
# job['homecloud'] = None
# if 'produsername' in job and not job['produsername']:
# if ('produserid' in job) and job['produserid']:
# job['produsername'] = job['produserid']
# else:
# job['produsername'] = 'Unknown'
# if job['transformation']: job['transformation'] = job['transformation'].split('/')[-1]
#
# job['errorinfo'] = errorInfo(job, errorCodes=errorCodes)
#
# job['jobinfo'] = ''
# if is_event_service(job):
# if 'taskbuffererrordiag' in job and job['taskbuffererrordiag'] is None:
# job['taskbuffererrordiag'] = ''
# if 'taskbuffererrordiag' in job and len(job['taskbuffererrordiag']) > 0:
# job['jobinfo'] = job['taskbuffererrordiag']
# elif 'specialhandling' in job and job['specialhandling'] == 'esmerge':
# job['jobinfo'] = 'Event service merge job. '
# elif 'eventservice' in job and job['eventservice'] == 'jumbo':
# job['jobinfo'] = 'Jumbo job. '
# else:
# job['jobinfo'] = 'Event service job. '
#
# if is_debug_mode(job):
# job['jobinfo'] += 'Real-time logging is activated for this job.'
#
# job['duration'] = ""
# job['durationsec'] = 0
# # if job['jobstatus'] in ['finished','failed','holding']:
# if 'endtime' in job and 'starttime' in job and job['starttime']:
# starttime = job['starttime']
# if job['endtime']:
# endtime = job['endtime']
# else:
# endtime = timezone.now()
#
# duration = max(endtime - starttime, timedelta(seconds=0))
# ndays = duration.days
# strduration = str(timedelta(seconds=duration.seconds))
# job['duration'] = "%s:%s" % (ndays, strduration)
# job['durationsec'] = ndays * 24 * 3600 + duration.seconds
# job['durationmin'] = round((ndays * 24 * 3600 + duration.seconds)/60)
#
# # durationmin for active jobs = now - starttime, for non-started = 0
# if not 'durationmin' in job:
# if 'starttime' in job and job['starttime'] is not None and 'endtime' in job and job['endtime'] is None:
# endtime = timezone.now()
# starttime = job['starttime']
# duration = max(endtime - starttime, timedelta(seconds=0))
# ndays = duration.days
# job['durationmin'] = round((ndays * 24 * 3600 + duration.seconds)/60)
# else:
# job['durationmin'] = 0
#
# job['waittime'] = ""
# # if job['jobstatus'] in ['running','finished','failed','holding','cancelled','transferring']:
# if 'creationtime' in job and 'starttime' in job and job['creationtime']:
# creationtime = job['creationtime']
# if job['starttime']:
# starttime = job['starttime']
# elif job['jobstatus'] in ('finished', 'failed', 'closed', 'cancelled'):
# starttime = job['modificationtime']
# else:
# starttime = datetime.now()
# wait = starttime - creationtime
# ndays = wait.days
# strwait = str(timedelta(seconds=wait.seconds))
# job['waittime'] = "%s:%s" % (ndays, strwait)
# if 'currentpriority' in job:
# plo = int(job['currentpriority']) - int(job['currentpriority']) % 100
# phi = plo + 99
# job['priorityrange'] = "%d:%d" % (plo, phi)
# if 'jobsetid' in job and job['jobsetid']:
# plo = int(job['jobsetid']) - int(job['jobsetid']) % 100
# phi = plo + 99
# job['jobsetrange'] = "%d:%d" % (plo, phi)
# if 'corecount' in job and job['corecount'] is None:
# job['corecount'] = 1
# if 'maxpss' in job and isinstance(job['maxpss'], int) and (
# 'actualcorecount' in job and isinstance(job['actualcorecount'], int) and job['actualcorecount'] > 0):
# job['maxpssgbpercore'] = round(job['maxpss']/1024./1024./job['actualcorecount'], 2)
#
# if ('cpuconsumptiontime' in job and job['cpuconsumptiontime'] and job['cpuconsumptiontime'] > 0) and (
# 'actualcorecount' in job and job['actualcorecount'] is not None and job['actualcorecount'] > 0) and (
# 'durationsec' in job and job['durationsec'] is not None and job['durationsec'] > 0):
# job['cpuefficiency'] = round(100.0 * job['cpuconsumptiontime'] / job['durationsec'] / job['actualcorecount'], 2)
#
# # drop duplicate jobs
# droplist = []
# job1 = {}
# newjobs = []
# for job in jobs:
# pandaid = job['pandaid']
# dropJob = 0
# if pandaid in job1:
# ## This is a duplicate. Drop it.
# dropJob = 1
# else:
# job1[pandaid] = 1
# if (dropJob == 0):
# newjobs.append(job)
# jobs = newjobs
#
# # find max and min values of priority and modificationtime for current selection of jobs
# global PLOW, PHIGH
# PLOW = 1000000
# PHIGH = -1000000
# for job in jobs:
# if job['modificationtime'] > request.session['TLAST']: request.session['TLAST'] = job['modificationtime']
# if job['modificationtime'] < request.session['TFIRST']: request.session['TFIRST'] = job['modificationtime']
# if job['currentpriority'] > PHIGH: PHIGH = job['currentpriority']
# if job['currentpriority'] < PLOW: PLOW = job['currentpriority']
# jobs = sorted(jobs, key=lambda x: x['modificationtime'], reverse=True)
#
# _logger.debug('job list cleaned')
# return jobs
def mainPage(request):
valid, response = initRequest(request)
if not valid:
return response
setupView(request)
debuginfo = None
if request.session['debug']:
debuginfo = "<h2>Debug info</h2>"
from django.conf import settings
for name in dir(settings):
debuginfo += "%s = %s<br>" % (name, getattr(settings, name))
debuginfo += "<br>******* Environment<br>"
for env in os.environ:
debuginfo += "%s = %s<br>" % (env, os.environ[env])
if not is_json_request(request):
del request.session['TFIRST']
del request.session['TLAST']
data = {
'prefix': getPrefix(request),
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'debuginfo': debuginfo,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
response = render_to_response('core-mainPage.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
return HttpResponse('json', content_type='text/html')
def helpPage(request):
valid, response = initRequest(request)
if not valid: return response
setupView(request)
del request.session['TFIRST']
del request.session['TLAST']
acronyms = {
'panda': 'PanDA',
'art': 'ART',
'api': 'API',
'qa': 'Q&A',
'idds': 'iDDS',
'gs': 'Global Shares',
'wn': 'WN',
}
# find all help templates
template_files = []
for template_dir in (tuple(djangosettings.TEMPLATES[0]['DIRS']) + get_app_template_dirs('templates')):
for dir, dirnames, filenames in os.walk(template_dir):
for filename in filenames:
if filename.endswith('Help.html'):
template_files.append(filename)
template_files = sorted(list(set(template_files)))
# group by object
camel_case_regex = "(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])"
help_template_dict = {}
for tfn in template_files:
tfn_words = re.split(camel_case_regex, tfn)
tfn_words_humanized = []
for w in tfn_words:
if w.lower() in acronyms:
tfn_words_humanized.append(acronyms[w.lower()])
else:
tfn_words_humanized.append(w.title())
if tfn_words[0] not in help_template_dict:
help_template_dict[tfn_words[0]] = {
'key': tfn_words[0],
'template_names': [],
'anchor': tfn_words[0],
'title': tfn_words_humanized[0],
}
help_template_dict[tfn_words[0]]['template_names'].append({
'name': tfn,
'title': ' '.join([word for word in tfn_words_humanized[:-1]]),
'anchor': tfn.replace('.html', '')
})
help_template_list = list(help_template_dict.values())
# move introduction help to the beginning
help_template_list.insert(0, help_template_list.pop(min([i for i, d in enumerate(help_template_list) if d['key'].lower() == 'introduction'])))
if not is_json_request(request):
data = {
'prefix': getPrefix(request),
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'built': datetime.now().strftime("%H:%M:%S"),
'templates': help_template_list,
}
response = render_to_response('help.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
return HttpResponse('json', content_type='text/html')
def jobParamList(request):
idlist = []
if 'pandaid' in request.session['requestParams']:
idstring = request.session['requestParams']['pandaid']
idstringl = idstring.split(',')
for pid in idstringl:
idlist.append(int(pid))
query = {'pandaid__in': idlist}
jobparams = Jobparamstable.objects.filter(**query).values()
if is_json_request(request):
return HttpResponse(json.dumps(jobparams, cls=DateEncoder), content_type='application/json')
else:
return HttpResponse('not supported', content_type='text/html')
@login_customrequired
def jobList(request, mode=None, param=None):
valid, response = initRequest(request)
if not valid:
return response
dkey = digkey(request)
thread = None
# Here we try to get data from cache
data = getCacheEntry(request, "jobList")
if data is not None:
data = json.loads(data)
if 'istestmonitor' in request.session['requestParams'] and request.session['requestParams']['istestmonitor'] == 'yes':
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
data['request'] = request
if data['eventservice'] == True:
response = render_to_response('jobListES.html', data, content_type='text/html')
else:
response = render_to_response('jobList.html', data, content_type='text/html')
_logger.info('Rendered template with data from cache: {}'.format(time.time() - request.session['req_init_time']))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
if 'dump' in request.session['requestParams'] and request.session['requestParams']['dump'] == 'parameters':
return jobParamList(request)
is_job_meta_required = False
if 'fields' in request.session['requestParams'] and request.session['requestParams']['fields'] and 'metastruct' in request.session['requestParams']['fields']:
is_job_meta_required = True
eventservice = False
if 'jobtype' in request.session['requestParams'] and request.session['requestParams']['jobtype'] == 'eventservice':
eventservice = True
if 'eventservice' in request.session['requestParams'] and (
request.session['requestParams']['eventservice'] == 'eventservice' or request.session['requestParams'][
'eventservice'] == '1' or request.session['requestParams']['eventservice'] == '4' or request.session['requestParams']['eventservice'] == 'jumbo'):
eventservice = True
elif 'eventservice' in request.session['requestParams'] and (
'1' in request.session['requestParams']['eventservice'] or '2' in request.session['requestParams']['eventservice'] or
'4' in request.session['requestParams']['eventservice'] or '5' in request.session['requestParams']['eventservice']):
eventservice = True
elif 'jeditaskid' in request.session['requestParams'] and request.session['requestParams']['jeditaskid']:
try:
jeditaskid = int(request.session['requestParams']['jeditaskid'])
except:
jeditaskid = None
if jeditaskid:
eventservice = is_event_service_task(jeditaskid)
noarchjobs = False
if 'noarchjobs' in request.session['requestParams'] and request.session['requestParams']['noarchjobs'] == '1':
noarchjobs = True
warning = {}
extraquery_files = ' '
if 'fileid' in request.session['requestParams'] or 'ecstate' in request.session['requestParams']:
if 'fileid' in request.session['requestParams'] and request.session['requestParams']['fileid']:
fileid = request.session['requestParams']['fileid']
else:
fileid = None
if 'datasetid' in request.session['requestParams'] and request.session['requestParams']['datasetid']:
datasetid = request.session['requestParams']['datasetid']
else:
datasetid = None
if 'jeditaskid' in request.session['requestParams'] and request.session['requestParams']['jeditaskid']:
jeditaskid = request.session['requestParams']['jeditaskid']
else:
jeditaskid = None
if 'tk' in request.session['requestParams'] and request.session['requestParams']['tk']:
tk = request.session['requestParams']['tk']
del request.session['requestParams']['tk']
else:
tk = None
if jeditaskid and datasetid and fileid:
extraquery_files += """
pandaid in (
(select pandaid from atlas_panda.filestable4
where jeditaskid = {} and datasetid in ( {} ) and fileid = {} )
union all
(select pandaid from atlas_pandaarch.filestable_arch
where jeditaskid = {} and datasetid in ( {} ) and fileid = {} )
) """.format(jeditaskid, datasetid, fileid, jeditaskid, datasetid, fileid)
if 'ecstate' in request.session['requestParams'] and tk and datasetid:
extraquery_files += """
pandaid in (
(select pandaid from atlas_panda.filestable4 where jeditaskid = {} and datasetid in ( {} )
and fileid in (select id from atlas_pandabigmon.TMP_IDS1DEBUG where TRANSACTIONKEY={}) )
union all
(select pandaid from atlas_pandaarch.filestable_arch where jeditaskid = {} and datasetid in ( {} )
and fileid in (select id from atlas_pandabigmon.TMP_IDS1DEBUG where TRANSACTIONKEY={}) )
) """.format(jeditaskid, datasetid, tk, jeditaskid, datasetid, tk)
elif 'jeditaskid' in request.session['requestParams'] and 'datasetid' in request.session['requestParams']:
fileid = None
if 'datasetid' in request.session['requestParams'] and request.session['requestParams']['datasetid']:
datasetid = request.session['requestParams']['datasetid']
else:
datasetid = None
if 'jeditaskid' in request.session['requestParams'] and request.session['requestParams']['jeditaskid']:
jeditaskid = request.session['requestParams']['jeditaskid']
else:
jeditaskid = None
if datasetid and jeditaskid:
extraquery_files += """
pandaid in (
(select pandaid from atlas_panda.filestable4 where jeditaskid = {} and datasetid = {} )
union all
(select pandaid from atlas_pandaarch.filestable_arch where jeditaskid = {} and datasetid = {})
) """.format(jeditaskid, datasetid, jeditaskid, datasetid)
else:
fileid = None
extraquery_tasks = ' '
if 'taskname' in request.session['requestParams'] and 'username' in request.session['requestParams']:
taskname = request.session['requestParams']['taskname']
taskusername = request.session['requestParams']['username']
if taskname.find('*') != -1:
taskname = taskname.replace('*', '%%')
if taskusername.find('*') != -1:
taskusername = taskusername.replace('*', '%%')
extraquery_tasks += """
jeditaskid in (
select jeditaskid from atlas_panda.jedi_tasks where taskname like '{}' and username like '{}'
) """.format(taskname, taskusername)
_logger.debug('Specific params processing: {}'.format(time.time() - request.session['req_init_time']))
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, wildCardExt=True)
_logger.debug('Setup view: {}'.format(time.time() - request.session['req_init_time']))
if len(extraquery_files) > 1:
wildCardExtension += ' AND ' + extraquery_files
if len(extraquery_tasks) > 1:
wildCardExtension += ' AND ' + extraquery_tasks
if query == 'reqtoken' and wildCardExtension is None and LAST_N_HOURS_MAX is None:
data = {
'desc': 'Request token is not found or data is outdated. Please reload the original page.',
}
return render_to_response('message.html', data, content_type='text/html')
jobs = []
if is_json_request(request):
values = [f.name for f in Jobsactive4._meta.get_fields()]
else:
values = [
'corecount', 'jobsubstatus', 'produsername', 'cloud', 'computingsite', 'cpuconsumptiontime', 'jobstatus',
'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'modificationtime', 'pandaid', 'atlasrelease',
'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'currentpriority', 'creationtime',
'starttime', 'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag',
'exeerrorcode', 'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode',
'piloterrordiag', 'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag',
'transexitcode', 'destinationse', 'homepackage', 'inputfileproject', 'inputfiletype', 'attemptnr',
'maxattempt', 'jobname', 'computingelement', 'proddblock', 'destinationdblock', 'reqid', 'minramcount',
'statechangetime', 'nevents', 'jobmetrics',
'noutputdatafiles', 'parentid', 'actualcorecount', 'schedulerid', 'pilotid', 'commandtopilot',
'cmtconfig', 'maxpss']
if not eventservice:
values.extend(['avgvmem', 'maxvmem', 'maxrss'])
if DEPLOYMENT != "POSTGRES":
values.append('nucleus')
values.append('eventservice')
values.append('gshare')
values.append('resourcetype')
values.append('container_name')
totalJobs = 0
showTop = 0
if 'limit' in request.session['requestParams']:
request.session['JOB_LIMIT'] = int(request.session['requestParams']['limit'])
JOB_LIMIT = request.session['JOB_LIMIT']
job_final_states = ['finished', 'failed', 'cancelled', 'closed', 'merging']
harvesterjobstatus = ''
from core.harvester.views import getHarvesterJobs, getCeHarvesterJobs
if 'jobstatus' in request.session['requestParams']:
harvesterjobstatus = request.session['requestParams']['jobstatus']
if 'transferringnotupdated' in request.session['requestParams']:
jobs = stateNotUpdated(request, state='transferring', values=values, wildCardExtension=wildCardExtension)
elif 'statenotupdated' in request.session['requestParams']:
jobs = stateNotUpdated(request, values=values, wildCardExtension=wildCardExtension)
elif 'harvesterinstance' in request.session['requestParams'] and 'workerid' in request.session['requestParams']:
jobs = getHarvesterJobs(request,
instance=request.session['requestParams']['harvesterinstance'],
workerid=request.session['requestParams']['workerid'],
jobstatus=harvesterjobstatus,
fields=values)
elif 'harvesterid' in request.session['requestParams'] and 'workerid' in request.session['requestParams']:
jobs = getHarvesterJobs(request,
instance=request.session['requestParams']['harvesterid'],
workerid=request.session['requestParams']['workerid'],
jobstatus=harvesterjobstatus,
fields=values)
elif ('harvesterinstance' not in request.session['requestParams'] and 'harvesterid' not in request.session['requestParams']) and 'workerid' in request.session['requestParams']:
jobs = getHarvesterJobs(request,
workerid=request.session['requestParams']['workerid'],
jobstatus=harvesterjobstatus,
fields=values)
elif 'harvesterce' in request.session['requestParams']:
jobs = getCeHarvesterJobs(request, computingelment=request.session['requestParams']['harvesterce'])
else:
# exclude time from query for DB tables with active jobs
etquery = copy.deepcopy(query)
if ('modificationtime__castdate__range' in etquery and len(set(['date_to', 'hours']).intersection(request.session['requestParams'].keys())) == 0) or (
'jobstatus' in request.session['requestParams'] and is_job_active(request.session['requestParams']['jobstatus'])):
del etquery['modificationtime__castdate__range']
warning['notimelimit'] = "no time window limiting was applied for active jobs in this selection"
jobs.extend(Jobsdefined4.objects.filter(**etquery).extra(where=[wildCardExtension])[:JOB_LIMIT].values(*values))
jobs.extend(Jobsactive4.objects.filter(**etquery).extra(where=[wildCardExtension])[:JOB_LIMIT].values(*values))
jobs.extend(Jobswaiting4.objects.filter(**etquery).extra(where=[wildCardExtension])[:JOB_LIMIT].values(*values))
jobs.extend(Jobsarchived4.objects.filter(**query).extra(where=[wildCardExtension])[:JOB_LIMIT].values(*values))
listJobs = [Jobsarchived4, Jobsactive4, Jobswaiting4, Jobsdefined4]
if not noarchjobs:
queryFrozenStates = []
if 'jobstatus' in request.session['requestParams']:
queryFrozenStates = list(set(request.session['requestParams']['jobstatus'].split('|')).intersection(job_final_states))
# hard limit is set to 20K
if 'jobstatus' not in request.session['requestParams'] or len(queryFrozenStates) > 0:
if 'limit' not in request.session['requestParams']:
if 'jeditaskid' not in request.session['requestParams']:
request.session['JOB_LIMIT'] = 20000
JOB_LIMIT = 20000
showTop = 1
else:
request.session['JOB_LIMIT'] = 200000
JOB_LIMIT = 200000
else:
request.session['JOB_LIMIT'] = int(request.session['requestParams']['limit'])
JOB_LIMIT = int(request.session['requestParams']['limit'])
if 'modificationtime__castdate__range' in query and (
(datetime.now() - datetime.strptime(query['modificationtime__castdate__range'][0], defaultDatetimeFormat)).days > 2 or
(datetime.now() - datetime.strptime(query['modificationtime__castdate__range'][1], defaultDatetimeFormat)).days > 2):
if 'jeditaskid' in request.session['requestParams'] or (is_json_request(request) and (
'fulllist' in request.session['requestParams'] and request.session['requestParams']['fulllist'] == 'true')):
del query['modificationtime__castdate__range']
archJobs = Jobsarchived.objects.filter(**query).extra(where=[wildCardExtension])[:JOB_LIMIT].values(*values)
listJobs.append(Jobsarchived)
totalJobs = len(archJobs)
jobs.extend(archJobs)
if not is_json_request(request):
thread = Thread(target=totalCount, args=(listJobs, query, wildCardExtension, dkey))
thread.start()
else:
thread = None
_logger.info('Got jobs: {}'.format(time.time() - request.session['req_init_time']))
# If the list is for a particular JEDI task, filter out the jobs superseded by retries
taskids = {}
for job in jobs:
if 'jeditaskid' in job:
taskids[job['jeditaskid']] = 1
# if ES -> nodrop by default
dropmode = True
if 'mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'drop':
dropmode = True
if ('mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'nodrop') or eventservice:
dropmode = False
isReturnDroppedPMerge = False
if 'processingtype' in request.session['requestParams'] and request.session['requestParams']['processingtype'] == 'pmerge':
isReturnDroppedPMerge = True
droplist = []
droppedPmerge = set()
cntStatus = []
if dropmode and (len(taskids) == 1):
jobs, droplist, droppedPmerge = drop_job_retries(jobs, list(taskids.keys())[0], is_return_dropped_jobs= isReturnDroppedPMerge)
_logger.debug('Done droppping if was requested: {}'.format(time.time() - request.session['req_init_time']))
# get attempts of file if fileid in request params
files_attempts_dict = {}
files_attempts = []
if fileid:
if fileid and jeditaskid and datasetid:
fquery = {}
fquery['pandaid__in'] = [job['pandaid'] for job in jobs if len(jobs) > 0]
fquery['fileid'] = fileid
files_attempts.extend(Filestable4.objects.filter(**fquery).values('pandaid', 'attemptnr'))
files_attempts.extend(FilestableArch.objects.filter(**fquery).values('pandaid', 'attemptnr'))
if len(files_attempts) > 0:
files_attempts_dict = dict(zip([f['pandaid'] for f in files_attempts], [ff['attemptnr'] for ff in files_attempts]))
jfquery = {'jeditaskid': jeditaskid, 'datasetid': datasetid, 'fileid': fileid}
jedi_file = JediDatasetContents.objects.filter(**jfquery).values('attemptnr', 'maxattempt', 'failedattempt', 'maxfailure')
if jedi_file and len(jedi_file) > 0:
jedi_file = jedi_file[0]
if len(files_attempts_dict) > 0:
for job in jobs:
if job['pandaid'] in files_attempts_dict:
job['fileattemptnr'] = files_attempts_dict[job['pandaid']]
else:
job['fileattemptnr'] = None
if jedi_file and 'maxattempt' in jedi_file:
job['filemaxattempts'] = jedi_file['maxattempt']
_logger.debug('Got file attempts: {}'.format(time.time() - request.session['req_init_time']))
jobs = clean_job_list(request, jobs, do_add_metadata=is_job_meta_required, do_add_errorinfo=True)
_logger.debug('Cleaned job list: {}'.format(time.time() - request.session['req_init_time']))
jobs = reconstruct_job_consumers(jobs)
_logger.debug('Reconstructed consumers: {}'.format(time.time() - request.session['req_init_time']))
njobs = len(jobs)
jobtype = ''
if 'jobtype' in request.session['requestParams']:
jobtype = request.session['requestParams']['jobtype']
elif '/analysis' in request.path:
jobtype = 'analysis'
elif '/production' in request.path:
jobtype = 'production'
if 'display_limit' in request.session['requestParams']:
if int(request.session['requestParams']['display_limit']) > njobs:
display_limit = njobs
else:
display_limit = int(request.session['requestParams']['display_limit'])
url_nolimit = removeParam(request.get_full_path(), 'display_limit')
else:
display_limit = 1000
url_nolimit = request.get_full_path()
njobsmax = display_limit
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'create-ascending':
jobs = sorted(jobs, key=lambda x:x['creationtime'] if not x['creationtime'] is None else datetime(1900, 1, 1))
if sortby == 'create-descending':
jobs = sorted(jobs, key=lambda x:x['creationtime'] if not x['creationtime'] is None else datetime(1900, 1, 1), reverse=True)
if sortby == 'time-ascending':
jobs = sorted(jobs, key=lambda x:x['modificationtime'] if not x['modificationtime'] is None else datetime(1900, 1, 1))
if sortby == 'time-descending':
jobs = sorted(jobs, key=lambda x:x['modificationtime'] if not x['modificationtime'] is None else datetime(1900, 1, 1), reverse=True)
if sortby == 'statetime':
jobs = sorted(jobs, key=lambda x:x['statechangetime'] if not x['statechangetime'] is None else datetime(1900, 1, 1), reverse=True)
elif sortby == 'priority':
jobs = sorted(jobs, key=lambda x:x['currentpriority'] if not x['currentpriority'] is None else 0, reverse=True)
elif sortby == 'attemptnr':
jobs = sorted(jobs, key=lambda x: x['attemptnr'], reverse=True)
elif sortby == 'duration-ascending':
jobs = sorted(jobs, key=lambda x: x['durationsec'])
elif sortby == 'duration-descending':
jobs = sorted(jobs, key=lambda x: x['durationsec'], reverse=True)
elif sortby == 'duration':
jobs = sorted(jobs, key=lambda x: x['durationsec'])
elif sortby == 'PandaID':
jobs = sorted(jobs, key=lambda x: x['pandaid'], reverse=True)
elif fileid:
sortby = "fileattemptnr-descending"
jobs = sorted(jobs, key=lambda x: x['fileattemptnr'], reverse=True)
elif 'computingsite' in request.session['requestParams']:
sortby = 'time-descending'
jobs = sorted(jobs, key=lambda x: x['modificationtime'] if x['modificationtime'] is not None else datetime(1900, 1, 1), reverse=True)
else:
sortby = "attemptnr-descending,pandaid-descending"
jobs = sorted(jobs, key=lambda x: [-x['attemptnr'],-x['pandaid']])
_logger.debug('Sorted joblist: {}'.format(time.time() - request.session['req_init_time']))
taskname = ''
if 'jeditaskid' in request.session['requestParams'] and '|' not in request.session['requestParams']['jeditaskid']:
taskname = getTaskName('jeditaskid', request.session['requestParams']['jeditaskid'])
if 'taskid' in request.session['requestParams'] and '|' not in request.session['requestParams']['taskid']:
taskname = getTaskName('jeditaskid', request.session['requestParams']['taskid'])
if 'produsername' in request.session['requestParams']:
user = request.session['requestParams']['produsername']
elif 'user' in request.session['requestParams']:
user = request.session['requestParams']['user']
else:
user = None
_logger.debug('Got task and user names: {}'.format(time.time() - request.session['req_init_time']))
# show warning or not
if njobs <= request.session['JOB_LIMIT']:
showwarn = 0
else:
showwarn = 1
# Sort in order to see the most important tasks
sumd, esjobdict = job_summary_dict(request, jobs, standard_fields+['corecount', 'noutputdatafiles', 'actualcorecount', 'schedulerid', 'pilotversion', 'computingelement', 'container_name', 'nevents'])
if sumd:
for item in sumd:
if item['field'] == 'jeditaskid':
item['list'] = sorted(item['list'], key=lambda k: k['kvalue'], reverse=True)
_logger.debug('Built standard params attributes summary: {}'.format(time.time() - request.session['req_init_time']))
if 'jeditaskid' in request.session['requestParams']:
if len(jobs) > 0:
for job in jobs:
if 'maxvmem' in job:
if type(job['maxvmem']) is int and job['maxvmem'] > 0:
job['maxvmemmb'] = "%0.2f" % (job['maxvmem'] / 1000.)
job['avgvmemmb'] = "%0.2f" % (job['avgvmem'] / 1000.)
if 'maxpss' in job:
if type(job['maxpss']) is int and job['maxpss'] > 0:
job['maxpss'] = "%0.2f" % (job['maxpss'] / 1024.)
testjobs = False
if 'prodsourcelabel' in request.session['requestParams'] and request.session['requestParams']['prodsourcelabel'].lower().find('test') >= 0:
testjobs = True
errsByCount, _, _, _, errdSumd, _ = errorSummaryDict(request, jobs, testjobs, output=['errsByCount', 'errdSumd'])
_logger.debug('Built error summary: {}'.format(time.time() - request.session['req_init_time']))
errsByMessage = get_error_message_summary(jobs)
_logger.debug('Built error message summary: {}'.format(time.time() - request.session['req_init_time']))
if not is_json_request(request):
# Here we getting extended data for list of jobs to be shown
jobsToShow = jobs[:njobsmax]
from core.libs import exlib
try:
jobsToShow = exlib.fileList(jobsToShow)
except Exception as e:
_logger.error(e)
_logger.debug(
'Got file info for list of jobs to be shown: {}'.format(time.time() - request.session['req_init_time']))
# Getting PQ status for for list of jobs to be shown
pq_dict = get_panda_queues()
for job in jobsToShow:
if job['computingsite'] in pq_dict:
job['computingsitestatus'] = pq_dict[job['computingsite']]['status']
job['computingsitecomment'] = pq_dict[job['computingsite']]['comment']
_logger.debug('Got extra params for sites: {}'.format(time.time() - request.session['req_init_time']))
# closing thread for counting total jobs in DB without limiting number of rows selection
if thread is not None:
try:
thread.join()
jobsTotalCount = sum(tcount[dkey])
_logger.debug(dkey)
_logger.debug(tcount[dkey])
del tcount[dkey]
_logger.debug(tcount)
_logger.info("Total number of jobs in DB: {}".format(jobsTotalCount))
except:
jobsTotalCount = -1
else:
jobsTotalCount = -1
listPar = []
for key, val in request.session['requestParams'].items():
if key not in ('limit', 'display_limit'):
listPar.append(key + '=' + str(val))
if len(listPar) > 0:
urlParametrs = '&'.join(listPar) + '&'
else:
urlParametrs = None
_logger.info(listPar)
del listPar
if math.fabs(njobs - jobsTotalCount) < 1000 or jobsTotalCount == -1:
jobsTotalCount = None
else:
jobsTotalCount = int(math.ceil((jobsTotalCount + 10000) / 10000) * 10000)
_logger.debug('Total jobs count thread finished: {}'.format(time.time() - request.session['req_init_time']))
# datetime type -> str in order to avoid encoding errors on template
datetime_job_param_names = ['creationtime', 'modificationtime', 'starttime', 'statechangetime', 'endtime']
for job in jobsToShow:
for dtp in datetime_job_param_names:
if job[dtp]:
job[dtp] = job[dtp].strftime(defaultDatetimeFormat)
# comparison of objects
isincomparisonlist = False
clist = []
if request.user.is_authenticated and request.user.is_tester:
cquery = {}
cquery['object'] = 'job'
cquery['userid'] = request.user.id
try:
jobsComparisonList = ObjectsComparison.objects.get(**cquery)
except ObjectsComparison.DoesNotExist:
jobsComparisonList = None
if jobsComparisonList:
try:
clist = json.loads(jobsComparisonList.comparisonlist)
newlist = []
for ce in clist:
try:
ceint = int(ce)
newlist.append(ceint)
except:
pass
clist = newlist
except:
clist = []
_logger.debug('Got comparison job list for user: {}'.format(time.time() - request.session['req_init_time']))
# set up google flow diagram
flowstruct = buildGoogleFlowDiagram(request, jobs=jobs)
_logger.debug('Built google flow diagram: {}'.format(time.time() - request.session['req_init_time']))
xurl = extensibleURL(request)
time_locked_url = removeParam(removeParam(xurl, 'date_from', mode='extensible'), 'date_to', mode='extensible') + \
'date_from=' + request.session['TFIRST'].strftime('%Y-%m-%dT%H:%M') + \
'&date_to=' + request.session['TLAST'].strftime('%Y-%m-%dT%H:%M')
nodurminurl = removeParam(xurl, 'durationmin', mode='extensible')
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
nosorturl = removeParam(nosorturl, 'display_limit', mode='extensible')
#xurl = removeParam(nosorturl, 'mode', mode='extensible')
xurl = nosorturl
# check if there are jobs exceeding timewindow and add warning message
if math.floor((request.session['TLAST'] - request.session['TFIRST']).total_seconds()) > LAST_N_HOURS_MAX * 3600:
warning['timelimitexceeding'] = """
Some of jobs in this listing are outside of the default 'last {} hours' time window,
because this limit was applied to jobs in final state only. Please explicitly add &hours=N to URL,
if you want to force applying the time window limit on active jobs also.""".format(LAST_N_HOURS_MAX)
_logger.debug('Extra data preporation done: {}'.format(time.time() - request.session['req_init_time']))
data = {
'prefix': getPrefix(request),
'errsByCount': errsByCount,
'errsByMessage': json.dumps(errsByMessage),
'errdSumd': errdSumd,
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'jobList': jobsToShow,
'jobtype': jobtype,
'njobs': njobs,
'user': user,
'sumd': sumd,
'xurl': xurl,
'xurlnopref': xurl[5:],
'droplist': droplist,
'ndrops': len(droplist) if len(droplist) > 0 else (- len(droppedPmerge)),
'tfirst': request.session['TFIRST'].strftime(defaultDatetimeFormat),
'tlast': request.session['TLAST'].strftime(defaultDatetimeFormat),
'plow': request.session['PLOW'],
'phigh': request.session['PHIGH'],
'showwarn': showwarn,
'joblimit': request.session['JOB_LIMIT'],
'limit': JOB_LIMIT,
'totalJobs': totalJobs,
'showTop': showTop,
'url_nolimit': url_nolimit,
'display_limit': display_limit,
'sortby': sortby,
'nosorturl': nosorturl,
'nodurminurl': nodurminurl,
'time_locked_url': time_locked_url,
'taskname': taskname,
'flowstruct': flowstruct,
'eventservice': eventservice,
'jobsTotalCount': jobsTotalCount,
'requestString': urlParametrs,
'built': datetime.now().strftime("%H:%M:%S"),
'clist': clist,
'warning': warning,
}
data.update(getContextVariables(request))
setCacheEntry(request, "jobList", json.dumps(data, cls=DateEncoder), 60 * 20)
_logger.debug('Cache was set: {}'.format(time.time() - request.session['req_init_time']))
if eventservice:
response = render_to_response('jobListES.html', data, content_type='text/html')
else:
response = render_to_response('jobList.html', data, content_type='text/html')
_logger.info('Rendered template: {}'.format(time.time() - request.session['req_init_time']))
request = complete_request(request)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
if 'datasets' in request.session['requestParams']:
for job in jobs:
files = []
pandaid = job['pandaid']
files.extend(JediDatasetContents.objects.filter(jeditaskid=job['jeditaskid'], pandaid=pandaid).values())
ninput = 0
dsquery = Q()
counter = 0
if len(files) > 0:
for f in files:
if f['type'] == 'input': ninput += 1
f['fsizemb'] = "%0.2f" % (f['fsize'] / 1000000.)
f['DSQuery'] = {'jeditaskid': job['jeditaskid'], 'datasetid': f['datasetid']}
dsquery = dsquery | Q(Q(jeditaskid=job['jeditaskid']) & Q(datasetid=f['datasetid']))
counter += 1
if counter == 30:
break
dsets = JediDatasets.objects.filter(dsquery).extra(
select={"dummy1": '/*+ INDEX_RS_ASC(ds JEDI_DATASETS_PK) */ 1 '}).values()
if len(dsets) > 0:
for ds in dsets:
for file in files:
if 'DSQuery' in file and file['DSQuery']['jeditaskid'] == ds['jeditaskid'] and \
file['DSQuery']['datasetid'] == ds['datasetid']:
file['dataset'] = ds['datasetname']
del file['DSQuery']
files.extend(Filestable4.objects.filter(jeditaskid=job['jeditaskid'], pandaid=pandaid).values())
if len(files) == 0:
files.extend(FilestableArch.objects.filter(jeditaskid=job['jeditaskid'], pandaid=pandaid).values())
if len(files) > 0:
for f in files:
if 'creationdate' not in f: f['creationdate'] = f['modificationtime']
if 'fileid' not in f: f['fileid'] = f['row_id']
if 'datasetname' not in f and 'dataset' in f: f['datasetname'] = f['dataset']
if 'modificationtime' in f: f['oldfiletable'] = 1
if 'destinationdblock' in f and f['destinationdblock'] is not None:
f['destinationdblock_vis'] = f['destinationdblock'].split('_')[-1]
files = sorted(files, key=lambda x: x['type'])
nfiles = len(files)
logfile = {}
for file in files:
if file['type'] == 'log':
logfile['lfn'] = file['lfn']
logfile['guid'] = file['guid']
if 'destinationse' in file:
logfile['site'] = file['destinationse']
else:
logfilerec = Filestable4.objects.filter(pandaid=pandaid, lfn=logfile['lfn']).values()
if len(logfilerec) == 0:
logfilerec = FilestableArch.objects.filter(pandaid=pandaid, lfn=logfile['lfn']).values()
if len(logfilerec) > 0:
logfile['site'] = logfilerec[0]['destinationse']
logfile['guid'] = logfilerec[0]['guid']
logfile['scope'] = file['scope']
file['fsize'] = int(file['fsize'] / 1000000)
job['datasets'] = files
_logger.info('Got dataset and file info if requested: {}'.format(time.time() - request.session['req_init_time']))
if 'fields' in request.session['requestParams'] and len(jobs) > 0:
fields = request.session['requestParams']['fields'].split(',')
fields = (set(fields) & set(jobs[0].keys()))
if 'pandaid' not in fields:
list(fields).append('pandaid')
for job in jobs:
for field in list(job.keys()):
if field in fields:
pass
else:
del job[field]
data = {
"selectionsummary": sumd,
"jobs": jobs,
"errsByCount": errsByCount,
}
# cache json response for particular usage (HC test monitor for RU)
if 'istestmonitor' in request.session['requestParams'] and request.session['requestParams']['istestmonitor'] == 'yes':
setCacheEntry(request, "jobList", json.dumps(data, cls=DateEncoder), 60 * 10)
response = HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
@never_cache
def descendentjoberrsinfo(request):
valid, response = initRequest(request)
if not valid: return response
data = {}
job_pandaid = job_jeditaskid = -1
if 'pandaid' in request.session['requestParams']:
job_pandaid = int(request.session['requestParams']['pandaid'])
if 'jeditaskid' in request.session['requestParams']:
job_jeditaskid = int(request.session['requestParams']['jeditaskid'])
if (job_pandaid == -1) or (job_jeditaskid == -1):
data = {"error": "no pandaid or jeditaskid supplied"}
del request.session['TFIRST']
del request.session['TLAST']
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
query = setupView(request, hours=365 * 24)
jobs = []
jobs.extend(Jobsdefined4.objects.filter(**query).values())
jobs.extend(Jobsactive4.objects.filter(**query).values())
jobs.extend(Jobswaiting4.objects.filter(**query).values())
jobs.extend(Jobsarchived4.objects.filter(**query).values())
if len(jobs) == 0:
jobs.extend(Jobsarchived.objects.filter(**query).values())
if len(jobs) == 0:
del request.session['TFIRST']
del request.session['TLAST']
data = {"error": "job not found"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
job = jobs[0]
countOfInvocations = []
if not is_event_service(job):
retryquery = {}
retryquery['jeditaskid'] = job['jeditaskid']
retryquery['oldpandaid'] = job['pandaid']
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').reverse().values()
pretries = getSequentialRetries(job['pandaid'], job['jeditaskid'], countOfInvocations)
else:
retryquery = {}
retryquery['jeditaskid'] = job['jeditaskid']
retryquery['oldpandaid'] = job['jobsetid']
retryquery['relationtype'] = 'jobset_retry'
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').reverse().values()
pretries = getSequentialRetries_ES(job['pandaid'], job['jobsetid'], job['jeditaskid'], countOfInvocations)
query = {'jeditaskid': job_jeditaskid}
jobslist = []
for retry in pretries:
jobslist.append(retry['oldpandaid'])
for retry in retries:
jobslist.append(retry['oldpandaid'])
query['pandaid__in'] = jobslist
jobs = []
jobs.extend(Jobsdefined4.objects.filter(**query).values())
jobs.extend(Jobsactive4.objects.filter(**query).values())
jobs.extend(Jobswaiting4.objects.filter(**query).values())
jobs.extend(Jobsarchived4.objects.filter(**query).values())
jobs.extend(Jobsarchived.objects.filter(**query).values())
jobs = clean_job_list(request, jobs, do_add_metadata=False, do_add_errorinfo=True)
errors = {}
for job in jobs:
errors[job['pandaid']] = job['errorinfo']
response = render_to_response('jobDescentErrors.html', {'errors': errors}, content_type='text/html')
request = complete_request(request)
return response
def eventsInfo(request, mode=None, param=None):
if not 'jeditaskid' in request.GET:
data = {}
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
jeditaskid = request.GET['jeditaskid']
cur = connection.cursor()
cur.execute(
"select sum(decode(c.startevent,NULL,c.nevents,endevent-startevent+1)) nevents,c.status from atlas_panda.jedi_datasets d,atlas_panda.jedi_dataset_contents c where d.jeditaskid=c.jeditaskid and d.datasetid=c.datasetid and d.jeditaskid=%s and d.type in ('input','pseudo_input') and d.masterid is null group by c.status;" % (
jeditaskid))
events = cur.fetchall()
cur.close()
data = {}
for ev in events:
data[ev[1]] = ev[0]
data['jeditaskid'] = jeditaskid
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
@login_customrequired
@csrf_exempt
def jobInfo(request, pandaid=None, batchid=None, p2=None, p3=None, p4=None):
valid, response = initRequest(request)
if not valid:
return response
# Here we try to get cached data
data = getCacheEntry(request, "jobInfo")
# data = None
if data is not None:
data = json.loads(data)
data['request'] = request
if data['eventservice'] is True:
response = render_to_response('jobInfoES.html', data, content_type='text/html')
else:
response = render_to_response('jobInfo.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
eventservice = False
query = setupView(request, hours=365 * 24)
jobid = ''
if 'creator' in request.session['requestParams']:
## Find the job that created the specified file.
fquery = {}
fquery['lfn'] = request.session['requestParams']['creator']
fquery['type'] = 'output'
fileq = []
fileq.extend(Filestable4.objects.filter(**fquery).values('pandaid', 'type', 'status'))
if len(fileq) > 0:
try:
pandaid = next(filei['pandaid'] for filei in fileq if filei['status'] != 'failed')
except:
pandaid = None
if not pandaid or len(fileq) == 0:
fileq.extend(FilestableArch.objects.filter(**fquery).values('pandaid', 'type', 'status'))
if fileq and len(fileq) > 0:
try:
pandaid = next(filei['pandaid'] for filei in fileq if filei['status'] != 'failed')
except:
pandaid = None
if pandaid:
jobid = pandaid
try:
query['pandaid'] = int(pandaid)
except:
query['jobname'] = pandaid
if batchid:
jobid = batchid
query['batchid'] = batchid
if 'pandaid' in request.session['requestParams']:
try:
pandaid = int(request.session['requestParams']['pandaid'])
except ValueError:
pandaid = 0
jobid = pandaid
query['pandaid'] = pandaid
elif 'batchid' in request.session['requestParams']:
batchid = request.session['requestParams']['batchid']
jobid = "'" + batchid + "'"
query['batchid'] = batchid
elif 'jobname' in request.session['requestParams']:
jobid = request.session['requestParams']['jobname']
query['jobname'] = jobid
jobs = []
if pandaid or batchid:
startdate = timezone.now() - timedelta(hours=LAST_N_HOURS_MAX)
jobs.extend(Jobsdefined4.objects.filter(**query).values())
jobs.extend(Jobsactive4.objects.filter(**query).values())
jobs.extend(Jobswaiting4.objects.filter(**query).values())
jobs.extend(Jobsarchived4.objects.filter(**query).values())
if len(jobs) == 0:
try:
del query['modificationtime__castdate__range']
except:
pass
jobs.extend(Jobsarchived.objects.filter(**query).values())
jobs = clean_job_list(request, jobs, do_add_metadata=True, do_add_errorinfo=True)
if len(jobs) == 0:
data = {
'prefix': getPrefix(request),
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'pandaid': pandaid,
'job': None,
'jobid': jobid,
}
response = render_to_response('jobInfo.html', data, content_type='text/html')
request = complete_request(request)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
job = {}
colnames = []
columns = []
harvesterInfo = {}
rucioUserName = []
if 'produserid' in jobs[0]:
if 'prodsourcelabel' in jobs[0] and jobs[0]['prodsourcelabel'] == 'user':
dn = jobs[0]['produserid']
try:
CNs = dn.split("/CN=")
if len(CNs) > 1:
int(CNs[-1])
dn = dn[:-(len(CNs[-1])+4)]
except ValueError:
pass
rw = ruciowrapper()
rucioUserName = rw.getRucioAccountByDN(dn)
if len(rucioUserName) > 1:
rucio_username_unique = {}
for un in rucioUserName:
if isinstance(un, dict):
if 'rucio_account' in un and un['rucio_account']:
rucio_username_unique[un['rucio_account']] = 1
elif isinstance(un, str):
rucio_username_unique[un] = 1
rucioUserName = list(rucio_username_unique.keys())
else:
rucioUserName = [jobs[0]['produserid']]
job = {}
try:
job = jobs[0]
except IndexError:
_logger.info('No job found for: {}'.format(jobid))
tquery = {}
tquery['jeditaskid'] = job['jeditaskid']
tquery['storagetoken__isnull'] = False
storagetoken = JediDatasets.objects.filter(**tquery).values('storagetoken')
if storagetoken:
job['destinationse'] = storagetoken[0]['storagetoken']
pandaid = job['pandaid'] if 'pandaid' in job else -1
colnames = job.keys()
colnames = sorted(colnames)
produsername = ''
for k in colnames:
if is_timestamp(k):
try:
val = job[k].strftime(defaultDatetimeFormat)
except:
val = job[k]
else:
val = job[k]
if job[k] == None:
val = ''
continue
pair = {'name': k, 'value': val}
columns.append(pair)
if k == 'produsername':
produsername = job[k]
# get Harvester info
if 'core.harvester' in djangosettings.INSTALLED_APPS:
from core.harvester.utils import isHarvesterJob
job['harvesterInfo'] = isHarvesterJob(job['pandaid'])
if 'harvesterInfo' in job and job['harvesterInfo'] and len(job['harvesterInfo']) > 0:
job['harvesterInfo'] = job['harvesterInfo'][0]
else:
job['harvesterInfo'] = {}
try:
# Check for logfile extracts
logs = Logstable.objects.filter(pandaid=pandaid)
if logs:
logextract = logs[0].log1
else:
logextract = None
except:
traceback.print_exc(file=sys.stderr)
logextract = None
files = []
fileids = []
typeFiles = {}
fileSummary = ''
inputFilesSize = 0
panda_queues = get_panda_queues()
computeSvsAtlasS = get_pq_atlas_sites()
if 'nofiles' not in request.session['requestParams']:
# Get job files. First look in JEDI datasetcontents
_logger.info("Pulling file info")
files.extend(Filestable4.objects.filter(pandaid=pandaid).order_by('type').values())
if len(files) == 0:
files.extend(FilestableArch.objects.filter(pandaid=pandaid).order_by('type').values())
ninput = 0
noutput = 0
npseudo_input = 0
if len(files) > 0:
dquery = {}
dquery['datasetid__in'] = [f['datasetid'] for f in files]
dsets = JediDatasets.objects.filter(**dquery).values('datasetid', 'datasetname')
datasets_dict = {ds['datasetid']: ds['datasetname'] for ds in dsets}
for f in files:
f['destination'] = ' '
if f['type'] == 'input':
ninput += 1
inputFilesSize += f['fsize'] / 1048576.
if f['type'] in typeFiles:
typeFiles[f['type']] += 1
else:
typeFiles[f['type']] = 1
if f['type'] == 'output':
noutput += 1
if len(jobs[0]['jobmetrics']) > 0:
for s in jobs[0]['jobmetrics'].split(' '):
if 'logBucketID' in s:
logBucketID = int(s.split('=')[1])
if logBucketID in [45, 41, 105, 106, 42, 61, 103, 2, 82, 101, 117, 115]: # Bucket Codes for S3 destination
f['destination'] = 'S3'
if f['type'] == 'pseudo_input': npseudo_input += 1
f['fsizemb'] = round(convert_bytes(f['fsize'], output_unit='MB'), 2)
if f['datasetid'] in datasets_dict:
f['datasetname'] = datasets_dict[f['datasetid']]
if f['scope'] + ":" in f['datasetname']:
f['ruciodatasetname'] = f['datasetname'].split(":")[1]
else:
f['ruciodatasetname'] = f['datasetname']
if job['computingsite'] in panda_queues:
if job['computingsite'] in ('CERN-P1'):
f['ddmsite'] = panda_queues[job['computingsite']]['gocname']
else:
f['ddmsite'] = computeSvsAtlasS.get(job['computingsite'], "")
if 'dst' in f['destinationdblocktoken']:
parced = f['destinationdblocktoken'].split("_")
f['ddmsite'] = parced[0][4:]
f['dsttoken'] = parced[1]
files = [x for x in files if x['destination'] != 'S3']
if len(typeFiles) > 0:
inputFilesSize = "%0.2f" % inputFilesSize
for i in typeFiles:
fileSummary += str(i) + ': ' + str(typeFiles[i])
if i == 'input':
fileSummary += ', size: ' + inputFilesSize + ' (MB)'
fileSummary += '; '
fileSummary = fileSummary[:-2]
if len(files) > 0:
for f in files:
if 'creationdate' not in f: f['creationdate'] = f['modificationtime']
if 'fileid' not in f: f['fileid'] = f['row_id']
if 'datasetname' not in f:
if f['scope']+":" in f['dataset']:
f['datasetname'] = f['dataset']
f['ruciodatasetname'] = f['dataset'].split(":")[1]
else:
f['datasetname'] = f['dataset']
f['ruciodatasetname'] = f['dataset']
if 'modificationtime' in f: f['oldfiletable'] = 1
if 'destinationdblock' in f and f['destinationdblock'] is not None:
f['destinationdblock_vis'] = f['destinationdblock'].split('_')[-1]
fileids.append(f['fileid'])
dcquery = {}
dcquery['pandaid'] = pandaid
dcquery['fileid__in'] = fileids
dcfiles = JediDatasetContents.objects.filter(**dcquery).values()
dcfilesDict = {}
if len(dcfiles) > 0:
for dcf in dcfiles:
dcfilesDict[dcf['fileid']] = dcf
files = sorted(files, key=lambda x: x['type'])
nfiles = len(files)
inputfiles = []
logfile = {}
for file in files:
if file['type'] == 'log':
logfile['lfn'] = file['lfn']
logfile['guid'] = file['guid']
if 'destinationse' in file:
logfile['site'] = file['destinationse']
else:
logfilerec = Filestable4.objects.filter(pandaid=pandaid, lfn=logfile['lfn']).values()
if len(logfilerec) == 0:
logfilerec = FilestableArch.objects.filter(pandaid=pandaid, lfn=logfile['lfn']).values()
if len(logfilerec) > 0:
logfile['site'] = logfilerec[0]['destinationse']
logfile['guid'] = logfilerec[0]['guid']
logfile['scope'] = file['scope']
logfile['fileid'] = file['fileid']
file['fsize'] = int(file['fsize'])
if file['type'] == 'input':
file['attemptnr'] = dcfilesDict[file['fileid']]['attemptnr'] if file['fileid'] in dcfilesDict else file['attemptnr']
file['maxattempt'] = dcfilesDict[file['fileid']]['maxattempt'] if file['fileid'] in dcfilesDict else None
inputfiles.append({'jeditaskid': file['jeditaskid'], 'datasetid': file['datasetid'], 'fileid': file['fileid']})
if 'pilotid' in job and job['pilotid'] and job['pilotid'].startswith('http') and '{' not in job['pilotid']:
stdout = job['pilotid'].split('|')[0]
if stdout.endswith('pilotlog.txt'):
stdlog = stdout.replace('pilotlog.txt', 'payload.stdout')
stderr = stdout.replace('pilotlog.txt', 'payload.stderr')
stdjdl = None
else:
stderr = stdout.replace('.out', '.err')
stdlog = stdout.replace('.out', '.log')
stdjdl = stdout.replace('.out', '.jdl')
stdlog = stdout.replace('.out', '.log')
elif len(job['harvesterInfo']) > 0 and 'batchlog' in job['harvesterInfo'] and job['harvesterInfo']['batchlog']:
stdlog = job['harvesterInfo']['batchlog']
stderr = stdlog.replace('.log', '.err')
stdout = stdlog.replace('.log', '.out')
stdjdl = stdlog.replace('.log', '.jdl')
else:
stdout = stderr = stdlog = stdjdl = None
# Check for object store based log
oslogpath = None
pq_object_store_paths = get_pq_object_store_path()
if 'computingsite' in job and job['computingsite'] in pq_object_store_paths:
ospath = pq_object_store_paths[job['computingsite']]
if 'lfn' in logfile:
if ospath.endswith('/'):
oslogpath = ospath + logfile['lfn']
else:
oslogpath = ospath + '/' + logfile['lfn']
# Check for debug info
debugmode = is_debug_mode(job)
debugstdout = None
if debugmode:
if 'showdebug' in request.session['requestParams']:
debugstdoutrec = Jobsdebug.objects.filter(pandaid=pandaid).values()
if len(debugstdoutrec) > 0:
if 'stdout' in debugstdoutrec[0]:
debugstdout = debugstdoutrec[0]['stdout']
# Get job parameters
_logger.info("getting job parameters")
jobparamrec = Jobparamstable.objects.filter(pandaid=pandaid)
jobparams = None
if len(jobparamrec) > 0:
jobparams = jobparamrec[0].jobparameters
# else:
# jobparamrec = JobparamstableArch.objects.filter(pandaid=pandaid)
# if len(jobparamrec) > 0:
# jobparams = jobparamrec[0].jobparameters
esjobstr = ''
evtable = []
if is_event_service(job):
# for ES jobs, prepare the event table
esjobdict = {}
for s in eventservicestatelist:
esjobdict[s] = 0
evalues = 'fileid', 'datasetid', 'def_min_eventid', 'def_max_eventid', 'processed_upto_eventid', 'status', 'job_processid', 'attemptnr', 'eventoffset'
evtable.extend(JediEvents.objects.filter(pandaid=job['pandaid']).order_by('-def_min_eventid').values(*evalues))
for evrange in evtable:
evrange['status'] = eventservicestatelist[evrange['status']]
esjobdict[evrange['status']] += evrange['def_max_eventid'] - evrange['def_min_eventid'] + 1
evrange['attemptnr'] = 10 - evrange['attemptnr']
esjobstr = ''
for s in esjobdict:
if esjobdict[s] > 0:
esjobstr += " {} ({}) ".format(s, esjobdict[s])
else:
evtable = []
# jobset info
jobsetinfo = {}
if ('jobset' in request.session['requestParams'] or is_event_service(job)) and 'jobsetid' in job and job['jobsetid'] > 0:
jobs = []
jsquery = {
'jobsetid': job['jobsetid'],
'produsername': job['produsername'],
}
jvalues = ['pandaid', 'prodsourcelabel', 'processingtype', 'transformation', 'eventservice', 'jobstatus']
jobs.extend(Jobsdefined4.objects.filter(**jsquery).values(*jvalues))
jobs.extend(Jobsactive4.objects.filter(**jsquery).values(*jvalues))
jobs.extend(Jobswaiting4.objects.filter(**jsquery).values(*jvalues))
jobs.extend(Jobsarchived4.objects.filter(**jsquery).values(*jvalues))
jobs.extend(Jobsarchived.objects.filter(**jsquery).values(*jvalues))
jobs = add_job_category(jobs)
job_summary_list = job_states_count_by_param(jobs, param='category')
for row in job_summary_list:
jobsetinfo[row['value']] = sum([jss['count'] for jss in row['job_state_counts']])
# For CORE, pick up parameters from jobparams
if VOMODE == 'core' or ('vo' in job and job['vo'] == 'core'):
coreData = {}
if jobparams:
coreParams = re.match(
'.*PIPELINE_TASK\=([a-zA-Z0-9]+).*PIPELINE_PROCESSINSTANCE\=([0-9]+).*PIPELINE_STREAM\=([0-9\.]+)',
jobparams)
if coreParams:
coreData['pipelinetask'] = coreParams.group(1)
coreData['processinstance'] = coreParams.group(2)
coreData['pipelinestream'] = coreParams.group(3)
else:
coreData = None
if 'jobstatus' in job and (job['jobstatus'] == 'failed' or job['jobstatus'] == 'holding'):
errorinfo = getErrorDescription(job)
if len(errorinfo) > 0:
job['errorinfo'] = errorinfo
if 'transformation' in job and job['transformation'] is not None and job['transformation'].startswith('http'):
job['transformation'] = "<a href='%s'>%s</a>" % (job['transformation'], job['transformation'].split('/')[-1])
if 'metastruct' in job:
job['metadata'] = json.dumps(job['metastruct'], sort_keys=True, indent=4, separators=(',', ': '))
if job['creationtime']:
creationtime = job['creationtime']
now = datetime.now()
tdelta = now - creationtime
job['days_since_creation'] = int(tdelta.days) + 1
isincomparisonlist = False
clist = []
if request.user.is_authenticated and request.user.is_tester:
cquery = {}
cquery['object'] = 'job'
cquery['userid'] = request.user.id
try:
jobsComparisonList = ObjectsComparison.objects.get(**cquery)
except ObjectsComparison.DoesNotExist:
jobsComparisonList = None
if jobsComparisonList:
try:
clist = json.loads(jobsComparisonList.comparisonlist)
newlist = []
for ce in clist:
try:
ceint = int(ce)
newlist.append(ceint)
except:
pass
clist = newlist
except:
clist = []
if job['pandaid'] in clist:
isincomparisonlist = True
# if it is ART test, get test name
art_test = []
if 'core.art' in djangosettings.INSTALLED_APPS and DEPLOYMENT == 'ORACLE_ATLAS':
try:
from core.art.modelsART import ARTTests
except ImportError:
_logger.exception('Failed to import ARTTests model')
artqueue = {'pandaid': pandaid}
art_test.extend(ARTTests.objects.filter(**artqueue).values())
# datetime type -> str in order to avoid encoding errors on template
datetime_job_param_names = ['creationtime', 'modificationtime', 'starttime', 'statechangetime', 'endtime']
datetime_file_param_names = ['creationdate', 'modificationtime']
if job:
for dtp in datetime_job_param_names:
if job[dtp]:
job[dtp] = job[dtp].strftime(defaultDatetimeFormat)
for f in files:
for fp, fpv in f.items():
if fp in datetime_file_param_names and fpv is not None:
f[fp] = f[fp].strftime(defaultDatetimeFormat)
if fpv is None:
f[fp] = ''
prmon_logs = {}
if PRMON_LOGS_DIRECTIO_LOCATION and job.get('jobstatus') in ('finished', 'failed'):
prmon_logs['prmon_summary'] = PRMON_LOGS_DIRECTIO_LOCATION.format(queue_name = job.get('computingsite'),
panda_id = pandaid) + \
'/memory_monitor_summary.json'
prmon_logs['prmon_details'] = PRMON_LOGS_DIRECTIO_LOCATION.format(queue_name = job.get('computingsite'),
panda_id = pandaid) + \
'/memory_monitor_output.txt'
if not is_json_request(request):
del request.session['TFIRST']
del request.session['TLAST']
data = {
'prefix': getPrefix(request),
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'pandaid': pandaid,
'job': job,
'columns': columns,
'arttest': art_test,
'files': files,
'nfiles': nfiles,
'logfile': logfile,
'oslogpath': oslogpath,
'stdout': stdout,
'stderr': stderr,
'stdlog': stdlog,
'stdjdl': stdjdl,
'jobparams': jobparams,
'jobid': jobid,
'coreData': coreData,
'logextract': logextract,
'eventservice': is_event_service(job),
'evtable': evtable[:1000],
'debugmode': debugmode,
'debugstdout': debugstdout,
'jobsetinfo': jobsetinfo,
'esjobstr': esjobstr,
'fileSummary': fileSummary,
'built': datetime.now().strftime("%H:%M:%S"),
'produsername': produsername,
'isincomparisonlist': isincomparisonlist,
'clist': clist,
'inputfiles': inputfiles,
'rucioUserName': rucioUserName,
'prmon_logs': prmon_logs
}
data.update(getContextVariables(request))
setCacheEntry(request, "jobInfo", json.dumps(data, cls=DateEncoder), 60 * 20)
if is_event_service(job):
response = render_to_response('jobInfoES.html', data, content_type='text/html')
else:
response = render_to_response('jobInfo.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
elif is_json_request(request):
del request.session['TFIRST']
del request.session['TLAST']
dsfiles = []
if len(evtable) > 0:
fileids = {}
for evrange in evtable:
fileids[int(evrange['fileid'])] = {}
flist = []
for f in fileids:
flist.append(f)
dsfiles.extend(JediDatasetContents.objects.filter(fileid__in=flist).values())
data = {
'files': files,
'job': job,
'dsfiles': dsfiles,
}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
else:
del request.session['TFIRST']
del request.session['TLAST']
return HttpResponse('not understood', content_type='text/html')
@never_cache
def get_job_relationships(request, pandaid=-1):
"""
Getting job relationships in both directions: downstream (further retries); upstream (past retries).
"""
valid, response = initRequest(request)
if not valid:
return response
direction = ''
if 'direction' in request.session['requestParams'] and request.session['requestParams']['direction']:
direction = request.session['requestParams']['direction']
job = {}
jobs = []
jquery = {
'pandaid': pandaid,
}
jvalues = ['pandaid', 'jeditaskid', 'jobsetid', 'specialhandling', 'eventservice']
jobs.extend(Jobsdefined4.objects.filter(**jquery).values(*jvalues))
jobs.extend(Jobsactive4.objects.filter(**jquery).values(*jvalues))
jobs.extend(Jobswaiting4.objects.filter(**jquery).values(*jvalues))
jobs.extend(Jobsarchived4.objects.filter(**jquery).values(*jvalues))
if len(jobs) == 0:
jobs.extend(Jobsarchived.objects.filter(**jquery).values(*jvalues))
try:
job = jobs[0]
except IndexError:
_logger.exception('No job found with pandaid: {}'.format(pandaid))
message = ''
job_relationships = []
countOfInvocations = []
# look for job retries
if 'jeditaskid' in job and job['jeditaskid'] and job['jeditaskid'] > 0:
if direction == 'downstream':
retries = []
if not is_event_service(job):
retryquery = {
'jeditaskid': job['jeditaskid'],
'oldpandaid': job['pandaid'],
}
job_relationships.extend(JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').reverse().values())
else:
job_relationships = getSequentialRetries_ESupstream(job['pandaid'], job['jobsetid'], job['jeditaskid'], countOfInvocations)
elif direction == 'upstream':
if not is_event_service(job):
job_relationships = getSequentialRetries(job['pandaid'], job['jeditaskid'], countOfInvocations)
else:
job_relationships = getSequentialRetries_ES(job['pandaid'], job['jobsetid'], job['jeditaskid'], countOfInvocations)
else:
message = 'Wrong direction provided, it should be up or down stream.'
else:
job_relationships = None
countOfInvocations = len(countOfInvocations)
data = {
'retries': job_relationships,
'direction': direction,
'message': message,
'countOfInvocations': countOfInvocations,
}
response = render_to_response('jobRelationships.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=-1)
return response
@login_customrequired
def userList(request):
valid, response = initRequest(request)
if not valid:
return response
# Here we try to get cached data
data = getCacheEntry(request, "userList")
# data = None
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('userList.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
nhours = 90 * 24
setupView(request, hours=nhours, limit=-99)
if VOMODE == 'atlas':
view = 'database'
else:
view = 'dynamic'
if 'view' in request.session['requestParams']:
view = request.session['requestParams']['view']
sumd = []
jobsumd = []
userdb = []
userdbl = []
userstats = {}
if view == 'database':
startdate = timezone.now() - timedelta(hours=nhours)
startdate = startdate.strftime(defaultDatetimeFormat)
query = {'lastmod__gte': startdate}
userdb.extend(Users.objects.filter(**query).values())
anajobs = 0
n1000 = 0
n10k = 0
nrecent3 = 0
nrecent7 = 0
nrecent30 = 0
nrecent90 = 0
## Move to a list of dicts and adjust CPU unit
for u in userdb:
u['latestjob'] = u['lastmod']
udict = {}
udict['name'] = u['name']
udict['njobsa'] = u['njobsa'] if u['njobsa'] is not None else 0
udict['cpua1'] = round(u['cpua1'] / 3600.) if u['cpua1'] is not None else 0
udict['cpua7'] = round(u['cpua7'] / 3600.) if u['cpua7'] is not None else 0
udict['cpup1'] = round(u['cpup1'] / 3600.) if u['cpup1'] is not None else 0
udict['cpup7'] = round(u['cpup7'] / 3600.) if u['cpup7'] is not None else 0
if u['latestjob']:
udict['latestjob'] = u['latestjob'].strftime(defaultDatetimeFormat)
udict['lastmod'] = u['lastmod'].strftime(defaultDatetimeFormat)
userdbl.append(udict)
if u['njobsa'] is not None:
if u['njobsa'] > 0:
anajobs += u['njobsa']
if u['njobsa'] >= 1000:
n1000 += 1
if u['njobsa'] >= 10000:
n10k += 1
if u['latestjob'] is not None:
latest = timezone.now() - u['latestjob']
if latest.days < 4:
nrecent3 += 1
if latest.days < 8:
nrecent7 += 1
if latest.days < 31:
nrecent30 += 1
if latest.days < 91:
nrecent90 += 1
userstats['anajobs'] = anajobs
userstats['n1000'] = n1000
userstats['n10k'] = n10k
userstats['nrecent3'] = nrecent3
userstats['nrecent7'] = nrecent7
userstats['nrecent30'] = nrecent30
userstats['nrecent90'] = nrecent90
else:
if VOMODE == 'atlas':
nhours = 12
else:
nhours = 7 * 24
query = setupView(request, hours=nhours, limit=999999)
# looking into user analysis jobs only
query['prodsourcelabel'] = 'user'
# dynamically assemble user summary info
values = ('eventservice', 'produsername', 'cloud', 'computingsite', 'cpuconsumptiontime', 'jobstatus',
'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'pandaid',
'starttime', 'endtime', 'modificationtime',
'atlasrelease', 'processingtype', 'workinggroup', 'currentpriority', 'container_name', 'cmtconfig')
jobs = []
jobs.extend(Jobsdefined4.objects.filter(**query).values(*values))
jobs.extend(Jobsactive4.objects.filter(**query).values(*values))
jobs.extend(Jobswaiting4.objects.filter(**query).values(*values))
jobs.extend(Jobsarchived4.objects.filter(**query).values(*values))
jobs = clean_job_list(request, jobs, do_add_metadata=False, do_add_errorinfo=False)
sumd = user_summary_dict(jobs)
for user in sumd:
if user['dict']['latest']:
user['dict']['latest'] = user['dict']['latest'].strftime(defaultDatetimeFormat)
sumparams = ['jobstatus', 'prodsourcelabel', 'specialhandling', 'transformation', 'processingtype',
'workinggroup', 'priorityrange']
if VOMODE == 'atlas':
sumparams.append('atlasrelease')
else:
sumparams.append('vo')
jobsumd = job_summary_dict(request, jobs, sumparams)[0]
if not is_json_request(request):
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'xurl': extensibleURL(request),
'url': request.path,
'sumd': sumd,
'jobsumd': jobsumd,
'userdb': userdbl,
'userstats': userstats,
'tfirst': request.session['TFIRST'].strftime(defaultDatetimeFormat),
'tlast': request.session['TLAST'].strftime(defaultDatetimeFormat),
'plow': request.session['PLOW'],
'phigh': request.session['PHIGH'],
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
setCacheEntry(request, "userList", json.dumps(data, cls=DateEncoder), 60 * 20)
response = render_to_response('userList.html', data, content_type='text/html')
request = complete_request(request)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
request = complete_request(request)
return HttpResponse(json.dumps(sumd), content_type='application/json')
@login_customrequired
def userInfo(request, user=''):
valid, response = initRequest(request)
if not valid:
return response
fullname = ''
login = ''
userQueryTask = None
userQueryJobs = None
if user == '':
if 'user' in request.session['requestParams']: user = request.session['requestParams']['user']
if 'produsername' in request.session['requestParams']: user = request.session['requestParams']['produsername']
# Here we serve only personal user pages. No user parameter specified
if user == '':
if request.user.is_authenticated:
login = user = request.user.username
fullname = request.user.first_name.replace('\'', '') + ' ' + request.user.last_name
userQueryTask = Q(username=login) | Q(username__startswith=fullname)
userQueryJobs = Q(produsername=login) | Q(produsername__startswith=fullname)
if 'days' in request.session['requestParams']:
days = int(request.session['requestParams']['days'])
else:
days = 7
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby']:
sortby = request.session['requestParams']['sortby']
else:
sortby = None
requestParams = {}
for param in request.session['requestParams']:
requestParams[escape_input(param.strip())] = escape_input(request.session['requestParams'][param.strip()].strip())
request.session['requestParams'] = requestParams
# Tasks owned by the user
query = setupView(request, hours=days*24, limit=999999, querytype='task')
if userQueryTask is None:
query['username__icontains'] = user.strip()
tasks = JediTasks.objects.filter(**query).values()
else:
tasks = JediTasks.objects.filter(**query).filter(userQueryTask).values()
_logger.info('Got {} tasks: {}'.format(len(tasks), time.time() - request.session['req_init_time']))
tasks = cleanTaskList(tasks, sortby=sortby, add_datasets_info=True)
_logger.info('Cleaned tasks and loading datasets info: {}'.format(time.time() - request.session['req_init_time']))
# consumed cpu hours stats for a user
if len(tasks) > 0:
panda_user_name = list(set([t['username'] for t in tasks]))[0]
else:
panda_user_name = fullname if fullname != '' else user.strip()
userstats = get_panda_user_stats(panda_user_name)
_logger.info('Got user statistics: {}'.format(time.time() - request.session['req_init_time']))
# getting most relevant links based on visit statistics
links = {}
if request.user.is_authenticated and (
'user' not in request.session['requestParams'] and 'produsername' not in request.session['requestParams']):
userids = BPUser.objects.filter(email=request.user.email).values('id')
userid = userids[0]['id']
fields = {
'job': copy.deepcopy(standard_fields),
'task': copy.deepcopy(standard_taskfields),
'site': copy.deepcopy(standard_sitefields),
}
links = get_relevant_links(userid, fields)
# new user dashboard
if 'view' in request.session['requestParams'] and request.session['requestParams']['view'] == 'dash':
if query and 'modificationtime__castdate__range' in query:
request.session['timerange'] = query['modificationtime__castdate__range']
plots = prepare_user_dash_plots(tasks)
# put list of tasks to cache for further usage
tk_taskids = random.randrange(100000000)
setCacheEntry(request, tk_taskids, json.dumps(tasks, cls=DateTimeEncoder), 60 * 30, isData=True)
metrics_total = {}
if userstats:
metrics_total['cpua7'] = userstats['cpua7'] if 'cpua7' in userstats else 0
metrics_total['cpup7'] = userstats['cpup7'] if 'cpup7' in userstats else 0
metrics_total = humanize_metrics(metrics_total)
if is_json_request(request):
pass
else:
timestamp_vars = ['modificationtime', 'statechangetime', 'starttime', 'creationdate', 'resquetime',
'endtime', 'lockedtime', 'frozentime', 'ttcpredictiondate', 'ttcrequested']
for task in tasks:
for tp in task:
if tp in timestamp_vars and task[tp] is not None:
task[tp] = task[tp].strftime(defaultDatetimeFormat)
if task[tp] is None:
task[tp] = ''
if task[tp] is True:
task[tp] = 'true'
if task[tp] is False:
task[tp] = 'false'
xurl = extensibleURL(request)
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'timerange': request.session['timerange'],
'built': datetime.now().strftime("%H:%M:%S"),
'tk': tk_taskids,
'xurl': xurl,
'user': user,
'links': links,
'ntasks': len(tasks),
'plots': plots,
'metrics': metrics_total,
'userstats': userstats,
}
response = render_to_response('userDash.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
if 'display_limit_tasks' not in request.session['requestParams']:
display_limit_tasks = 100
else:
display_limit_tasks = int(request.session['requestParams']['display_limit_tasks'])
ntasksmax = display_limit_tasks
url_nolimit_tasks = removeParam(extensibleURL(request), 'display_limit_tasks', mode='extensible') + "display_limit_tasks=" + str(len(tasks))
tasks = getTaskScoutingInfo(tasks, ntasksmax)
_logger.info('Tasks scouting info loaded: {}'.format(time.time() - request.session['req_init_time']))
ntasks = len(tasks)
tasksumd = task_summary_dict(request, tasks)
_logger.info('Tasks summary generated: {}'.format(time.time() - request.session['req_init_time']))
# Jobs
limit = 5000
query, extra_query_str, LAST_N_HOURS_MAX = setupView(request, hours=72, limit=limit, querytype='job', wildCardExt=True)
jobs = []
values = 'eventservice', 'produsername', 'cloud', 'computingsite', 'cpuconsumptiontime', 'jobstatus', 'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'modificationtime', 'pandaid', 'atlasrelease', 'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'currentpriority', 'creationtime', 'starttime', 'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode', 'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag', 'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode', 'homepackage', 'inputfileproject', 'inputfiletype', 'attemptnr', 'jobname', 'proddblock', 'destinationdblock', 'container_name', 'cmtconfig'
if userQueryJobs is None:
query['produsername__icontains'] = user.strip()
jobs.extend(Jobsdefined4.objects.filter(**query).extra(where=[extra_query_str])[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsactive4.objects.filter(**query).extra(where=[extra_query_str])[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobswaiting4.objects.filter(**query).extra(where=[extra_query_str])[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsarchived4.objects.filter(**query).extra(where=[extra_query_str])[:request.session['JOB_LIMIT']].values(*values))
if len(jobs) == 0 or (len(jobs) < limit and LAST_N_HOURS_MAX > 72):
jobs.extend(Jobsarchived.objects.filter(**query).extra(where=[extra_query_str])[:request.session['JOB_LIMIT']].values(*values))
else:
jobs.extend(Jobsdefined4.objects.filter(**query).filter(userQueryJobs).extra(where=[extra_query_str])[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsactive4.objects.filter(**query).filter(userQueryJobs).extra(where=[extra_query_str])[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobswaiting4.objects.filter(**query).filter(userQueryJobs).extra(where=[extra_query_str])[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsarchived4.objects.filter(**query).filter(userQueryJobs).extra(where=[extra_query_str])[:request.session['JOB_LIMIT']].values(*values))
# Here we go to an archive. Separation OR condition is done to enforce Oracle to perform indexed search.
if len(jobs) == 0 or (len(jobs) < limit and LAST_N_HOURS_MAX > 72):
query['produsername__startswith'] = user.strip() #.filter(userQueryJobs)
archjobs = []
# This two filters again to force Oracle search
archjobs.extend(Jobsarchived.objects.filter(**query).filter(Q(produsername=user.strip())).extra(where=[extra_query_str])[:request.session['JOB_LIMIT']].values(*values))
if len(archjobs) > 0:
jobs = jobs+archjobs
elif len(fullname) > 0:
#del query['produsername']
query['produsername__startswith'] = fullname
jobs.extend(Jobsarchived.objects.filter(**query).extra(where=[extra_query_str])[:request.session['JOB_LIMIT']].values(*values))
jobs = clean_job_list(request, jobs, do_add_metadata=False, do_add_errorinfo=True)
# Divide up jobs by jobset and summarize
jobsets = {}
for job in jobs:
if 'jobsetid' not in job or job['jobsetid'] == None: continue
if job['jobsetid'] not in jobsets:
jobsets[job['jobsetid']] = {}
jobsets[job['jobsetid']]['jobsetid'] = job['jobsetid']
jobsets[job['jobsetid']]['jobs'] = []
jobsets[job['jobsetid']]['jobs'].append(job)
for jobset in jobsets:
jobsets[jobset]['sum'] = jobStateSummary(jobsets[jobset]['jobs'])
jobsets[jobset]['njobs'] = len(jobsets[jobset]['jobs'])
tfirst = timezone.now()
tlast = timezone.now() - timedelta(hours=2400)
plow = 1000000
phigh = -1000000
for job in jobsets[jobset]['jobs']:
if job['modificationtime'] > tlast: tlast = job['modificationtime']
if job['modificationtime'] < tfirst: tfirst = job['modificationtime']
if job['currentpriority'] > phigh: phigh = job['currentpriority']
if job['currentpriority'] < plow: plow = job['currentpriority']
jobsets[jobset]['tfirst'] = tfirst.strftime(defaultDatetimeFormat)
jobsets[jobset]['tlast'] = tlast.strftime(defaultDatetimeFormat)
jobsets[jobset]['plow'] = plow
jobsets[jobset]['phigh'] = phigh
jobsetl = []
jsk = jobsets.keys()
jsk = sorted(jsk, reverse=True)
for jobset in jsk:
jobsetl.append(jobsets[jobset])
njobsmax = len(jobs)
if 'display_limit_jobs' in request.session['requestParams'] and int(
request.session['requestParams']['display_limit_jobs']) < len(jobs):
display_limit_jobs = int(request.session['requestParams']['display_limit_jobs'])
else:
display_limit_jobs = 100
njobsmax = display_limit_jobs
url_nolimit_jobs = removeParam(extensibleURL(request), 'display_limit_jobs', mode='extensible') + 'display_limit_jobs=' + str(len(jobs))
sumd = user_summary_dict(jobs)
if not is_json_request(request):
flist = ['jobstatus', 'prodsourcelabel', 'processingtype', 'specialhandling', 'transformation', 'jobsetid',
'jeditaskid', 'computingsite', 'cloud', 'workinggroup', 'homepackage', 'inputfileproject',
'inputfiletype', 'attemptnr', 'priorityrange', 'jobsetrange']
if VOMODE != 'atlas':
flist.append('vo')
else:
flist.append('atlasrelease')
jobsumd, esjobssumd = job_summary_dict(request, jobs, flist)
njobsetmax = 100
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
timestamp_vars = ['modificationtime', 'statechangetime', 'starttime', 'creationdate', 'resquetime',
'endtime', 'lockedtime', 'frozentime', 'ttcpredictiondate']
for task in tasks:
for tp in task:
if tp in timestamp_vars and task[tp] is not None:
task[tp] = task[tp].strftime(defaultDatetimeFormat)
if task[tp] is None:
task[tp] = ''
timestamp_vars = ['modificationtime', 'creationtime']
for job in jobs:
for tsv in timestamp_vars:
if tsv in job and job[tsv]:
job[tsv] = job[tsv].strftime(defaultDatetimeFormat)
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'xurl': xurl,
'nosorturl': nosorturl,
'user': panda_user_name,
'sumd': sumd,
'jobsumd': jobsumd,
'jobList': jobs[:njobsmax],
'njobs': len(jobs),
'display_limit_jobs': display_limit_jobs,
'url_nolimit_jobs': url_nolimit_jobs,
'query': query,
'userstats': userstats,
'tfirst': request.session['TFIRST'].strftime(defaultDatetimeFormat),
'tlast': request.session['TLAST'].strftime(defaultDatetimeFormat),
'plow': request.session['PLOW'],
'phigh': request.session['PHIGH'],
'jobsets': jobsetl[:njobsetmax - 1],
'njobsetmax': njobsetmax,
'njobsets': len(jobsetl),
'url_nolimit_tasks': url_nolimit_tasks,
'display_limit_tasks': display_limit_tasks,
'tasks': tasks[:ntasksmax],
'ntasks': ntasks,
'tasksumd': tasksumd,
'built': datetime.now().strftime("%H:%M:%S"),
'links': links,
}
data.update(getContextVariables(request))
response = render_to_response('userInfo.html', data, content_type='text/html')
request = complete_request(request)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
request = complete_request(request)
resp = sumd
return HttpResponse(json.dumps(resp, default=datetime_handler), content_type='application/json')
def userDashApi(request, agg=None):
"""
:param agg: str: type of aggregation to return
:return: JSON
"""
valid, response = initRequest(request)
if not valid:
return response
AVAILABLE_AGGS = ['initial', 'cons_plots', 'overall_errors']
data = {'msg': '', 'data': {}}
if agg is None or agg not in AVAILABLE_AGGS:
data['msg'] += 'ERROR! Invalid agg passed.'
return HttpResponse(json.dumps(data, default=datetime_handler), content_type='application/json')
tk = None
if 'tk' in request.session['requestParams'] and request.session['requestParams']['tk']:
tk = int(request.session['requestParams']['tk'])
else:
data['msg'] += 'ERROR! Invalid transaction key passed. Please try to reload the page.'
return HttpResponse(json.dumps(data, default=datetime_handler), content_type='application/json')
# getting top errors by task and metrics for labels
if agg == 'initial':
# get taskids from cache
tasks_str = getCacheEntry(request, tk, isData=True)
if tasks_str is not None:
tasks = json.loads(tasks_str)
else:
tasks = []
_logger.info('Got {} tasks from cache: {}'.format(len(tasks), time.time() - request.session['req_init_time']))
# jobs summary
jquery = {
'jobstatus__in': ['finished', 'failed', ],
'jeditaskid__in': [t['jeditaskid'] for t in tasks if 'jeditaskid' in t]
}
err_fields = [
'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode', 'exeerrordiag',
'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag',
'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode',
'produsername'
]
jobs = get_job_list(jquery, values=err_fields)
_logger.info('Got jobs: {}'.format(time.time() - request.session['req_init_time']))
errs_by_code, _, _, errs_by_task, _, _ = errorSummaryDict(request, jobs, False, flist=[], sortby='count')
errs_by_task_dict = {}
for err in errs_by_task:
if err['name'] not in errs_by_task_dict:
errs_by_task_dict[err['name']] = err
_logger.info('Got error summaries: {}'.format(time.time() - request.session['req_init_time']))
metrics = calc_jobs_metrics(jobs, group_by='jeditaskid')
_logger.info('Calculated jobs metrics: {}'.format(time.time() - request.session['req_init_time']))
for t in tasks:
for metric in metrics:
if t['jeditaskid'] in metrics[metric]['group_by']:
t['job_' + metric] = metrics[metric]['group_by'][t['jeditaskid']]
else:
t['job_' + metric] = ''
if t['jeditaskid'] in errs_by_task_dict and t['superstatus'] != 'done':
link_jobs_base = '/jobs/?mode=nodrop&jeditaskid={}&'.format(t['jeditaskid'])
t['top_errors'] = '<br>'.join(
['<a href="{}{}={}">{}</a> [{}] "{}"'.format(
link_jobs_base, err['codename'], err['codeval'], err['count'], err['error'], err['diag']
) for err in errs_by_task_dict[t['jeditaskid']]['errorlist']][:2])
else:
t['top_errors'] = -1
_logger.info('Jobs metrics added to tasks: {}'.format(time.time() - request.session['req_init_time']))
# prepare relevant metrics to show
metrics_total = {m: v['total'] for m, v in metrics.items() if 'total' in v}
metrics_total = humanize_metrics(metrics_total)
data['data']['metrics'] = metrics_total
data['data']['tasks_metrics'] = tasks
# prepare data for datatable
task_list_table_headers = [
'jeditaskid', 'attemptnr', 'tasktype', 'taskname', 'nfiles', 'nfilesfinished', 'nfilesfailed', 'pctfinished',
'superstatus', 'status', 'age',
'job_queuetime', 'job_walltime', 'job_maxpss_per_actualcorecount', 'job_efficiency', 'job_attemptnr',
'errordialog', 'job_failed', 'top_errors',
]
tasks_to_show = []
for t in tasks:
tmp_list = []
for h in task_list_table_headers:
if h in t:
tmp_list.append(t[h])
else:
tmp_list.append("-")
tasks_to_show.append(tmp_list)
data['data']['tasks_metrics'] = tasks_to_show
return HttpResponse(json.dumps(data, default=datetime_handler), content_type='application/json')
@login_customrequired
def siteList(request):
valid, response = initRequest(request)
if not valid: return response
# Here we try to get cached data
data = getCacheEntry(request, "siteList")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('siteList.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
for param in request.session['requestParams']:
request.session['requestParams'][param] = escape_input(request.session['requestParams'][param])
setupView(request, opmode='notime')
query = {}
### Add any extensions to the query determined from the URL
if VOMODE == 'core': query['siteid__contains'] = 'CORE'
prod = False
extraParCondition = '1=1'
for param in request.session['requestParams']:
if param == 'category' and request.session['requestParams'][param] == 'multicloud':
query['multicloud__isnull'] = False
if param == 'category' and request.session['requestParams'][param] == 'analysis':
query['siteid__contains'] = 'ANALY'
if param == 'category' and request.session['requestParams'][param] == 'test':
query['siteid__icontains'] = 'test'
if param == 'category' and request.session['requestParams'][param] == 'production':
prod = True
if param == 'catchall':
wildCards = request.session['requestParams'][param].split('|')
countCards = len(wildCards)
currentCardCount = 1
extraParCondition = '('
for card in wildCards:
extraParCondition += preprocess_wild_card_string(escape_input(card), 'catchall')
if (currentCardCount < countCards): extraParCondition += ' OR '
currentCardCount += 1
extraParCondition += ')'
for field in Schedconfig._meta.get_fields():
if param == field.name and not (param == 'catchall'):
query[param] = escape_input(request.session['requestParams'][param])
siteres = Schedconfig.objects.filter(**query).exclude(cloud='CMS').extra(where=[extraParCondition]).values()
mcpres = Schedconfig.objects.filter(status='online').exclude(cloud='CMS').exclude(siteid__icontains='test').values(
'siteid', 'multicloud', 'cloud').order_by('siteid')
sites = []
for site in siteres:
if 'category' in request.session['requestParams'] and request.session['requestParams'][
'category'] == 'multicloud':
if (site['multicloud'] == 'None') or (not re.match('[A-Z]+', site['multicloud'])): continue
sites.append(site)
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'maxmemory':
sites = sorted(sites, key=lambda x: -x['maxmemory'])
elif 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'maxtime':
sites = sorted(sites, key=lambda x: -x['maxtime'])
elif 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'gocname':
sites = sorted(sites, key=lambda x: x['gocname'])
else:
sites = sorted(sites, key=lambda x: x['siteid'])
if prod:
newsites = []
for site in sites:
if site['siteid'].find('ANALY') >= 0:
pass
elif site['siteid'].lower().find('test') >= 0:
pass
else:
newsites.append(site)
sites = newsites
for site in sites:
if site['maxtime'] and (site['maxtime'] > 0): site['maxtime'] = "%.1f" % (float(site['maxtime']) / 3600.)
site['space'] = "%d" % (site['space'] / 1000.)
if VOMODE == 'atlas' and (
len(request.session['requestParams']) == 0 or 'cloud' in request.session['requestParams']):
clouds = Cloudconfig.objects.filter().exclude(name='CMS').exclude(name='OSG').values()
clouds = sorted(clouds, key=lambda x: x['name'])
mcpsites = {}
for cloud in clouds:
cloud['display'] = True
if 'cloud' in request.session['requestParams'] and request.session['requestParams']['cloud'] != cloud[
'name']: cloud['display'] = False
mcpsites[cloud['name']] = []
for site in sites:
if site['siteid'] == cloud['tier1']:
cloud['space'] = site['space']
cloud['tspace'] = site['tspace'].strftime("%m-%d %H:%M")
for site in mcpres:
mcpclouds = site['multicloud'].split(',')
if cloud['name'] in mcpclouds or cloud['name'] == site['cloud']:
sited = {}
sited['name'] = site['siteid']
sited['cloud'] = site['cloud']
if site['cloud'] == cloud['name']:
sited['type'] = 'home'
else:
sited['type'] = 'mcp'
mcpsites[cloud['name']].append(sited)
cloud['mcpsites'] = ''
for s in mcpsites[cloud['name']]:
if s['type'] == 'home':
cloud['mcpsites'] += "<b>%s</b> " % s['name']
else:
cloud['mcpsites'] += "%s " % s['name']
if cloud['modtime']:
cloud['modtime'] = cloud['modtime'].strftime("%m-%d %H:%M")
else:
clouds = None
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
if not is_json_request(request):
sumd = site_summary_dict(sites, VOMODE=VOMODE)
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'sites': sites,
'clouds': clouds,
'sumd': sumd,
'xurl': xurl,
'nosorturl': nosorturl,
'built': datetime.now().strftime("%H:%M:%S"),
}
if 'cloud' in request.session['requestParams']: data['mcpsites'] = mcpsites[
request.session['requestParams']['cloud']]
# data.update(getContextVariables(request))
##self monitor
setCacheEntry(request, "siteList", json.dumps(data, cls=DateEncoder), 60 * 20)
response = render_to_response('siteList.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
resp = sites
return HttpResponse(json.dumps(resp, cls=DateEncoder), content_type='application/json')
@login_customrequired
def siteInfo(request, site=''):
valid, response = initRequest(request)
if not valid:
return response
if site == '' and 'site' in request.session['requestParams']:
site = request.session['requestParams']['site']
setupView(request)
query = {'siteid__iexact': site}
sites = Schedconfig.objects.filter(**query)
colnames = []
try:
siterec = sites[0]
colnames = siterec.get_all_fields()
if sites[0].lastmod:
sites[0].lastmod = sites[0].lastmod.strftime(defaultDatetimeFormat)
except IndexError:
siterec = None
if len(sites) > 1:
for queue in sites:
if queue['lastmod']:
queue['lastmod'] = queue['lastmod'].strftime(defaultDatetimeFormat)
# get data from new schedconfig_json table
panda_queue = []
pqquery = {'pandaqueue': site}
panda_queues = SchedconfigJson.objects.filter(**pqquery).values()
panda_queue_type = None
if len(panda_queues) > 0:
panda_queue_dict = json.loads(panda_queues[0]['data'])
panda_queue_type = panda_queue_dict['type']
for par, val in panda_queue_dict.items():
val = ', '.join([str(subpar) + ' = ' + str(subval) for subpar, subval in val.items()]) if isinstance(val, dict) else val
panda_queue.append({'param': par, 'value': val})
panda_queue = sorted(panda_queue, key=lambda x: x['param'])
HPC = False
njobhours = 12
try:
if siterec.catchall.find('HPC') >= 0:
HPC = True
njobhours = 48
except AttributeError:
pass
panda_resource = get_panda_resource(siterec)
if not is_json_request(request):
attrs = []
if siterec:
attrs.append({'name': 'GOC name', 'value': siterec.gocname})
if HPC: attrs.append(
{'name': 'HPC', 'value': 'This is a High Performance Computing (HPC) supercomputer queue'})
if siterec.catchall and siterec.catchall.find('log_to_objectstore') >= 0:
attrs.append({'name': 'Object store logs', 'value': 'Logging to object store is enabled'})
if siterec.objectstore and len(siterec.objectstore) > 0:
fields = siterec.objectstore.split('|')
nfields = len(fields)
for nf in range(0, len(fields)):
if nf == 0:
attrs.append({'name': 'Object store location', 'value': fields[0]})
else:
fields2 = fields[nf].split('^')
if len(fields2) > 1:
ostype = fields2[0]
ospath = fields2[1]
attrs.append({'name': 'Object store %s path' % ostype, 'value': ospath})
if siterec.nickname != site:
attrs.append({'name': 'Queue (nickname)', 'value': siterec.nickname})
if len(sites) > 1:
attrs.append({'name': 'Total queues for this site', 'value': len(sites)})
attrs.append({'name': 'Status', 'value': siterec.status})
if siterec.comment_field and len(siterec.comment_field) > 0:
attrs.append({'name': 'Comment', 'value': siterec.comment_field})
attrs.append({'name': 'Cloud', 'value': siterec.cloud})
if siterec.multicloud and len(siterec.multicloud) > 0:
attrs.append({'name': 'Multicloud', 'value': siterec.multicloud})
attrs.append({'name': 'Tier', 'value': siterec.tier})
attrs.append({'name': 'DDM endpoint', 'value': siterec.ddm})
attrs.append({'name': 'Max rss', 'value': "%.1f GB" % (float(siterec.maxrss) / 1000.)})
attrs.append({'name': 'Min rss', 'value': "%.1f GB" % (float(siterec.minrss) / 1000.)})
if siterec.maxtime > 0:
attrs.append({'name': 'Maximum time', 'value': "%.1f hours" % (float(siterec.maxtime) / 3600.)})
attrs.append({'name': 'Space', 'value': "%d TB as of %s" % ((float(siterec.space) / 1000.), siterec.tspace.strftime('%m-%d %H:%M'))})
attrs.append({'name': 'Last modified', 'value': "%s" % (siterec.lastmod.strftime('%Y-%m-%d %H:%M'))})
# get calculated metrics
try:
metrics = get_pq_metrics(siterec.nickname)
except Exception as ex:
metrics = {}
_logger.exception('Failed to get metrics for {}\n {}'.format(siterec.nickname, ex))
if len(metrics) > 0:
for pq, m_dict in metrics.items():
for m in m_dict:
colnames.append({'label': m, 'name': m, 'value': m_dict[m]})
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'site': siterec,
'panda_resource': panda_resource,
'queues': sites,
'colnames': colnames,
'attrs': attrs,
'name': site,
'pq_type': panda_queue_type,
'njobhours': njobhours,
'built': datetime.now().strftime("%H:%M:%S"),
'pandaqueue': panda_queue,
}
data.update(getContextVariables(request))
response = render_to_response('siteInfo.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
return HttpResponse(json.dumps(panda_queue), content_type='application/json')
@login_customrequired
def wnInfo(request, site, wnname='all'):
""" Give worker node level breakdown of site activity. Spot hot nodes, error prone nodes. """
valid, response = initRequest(request)
if not valid:
return response
jobs_url = '?computingsite={}&mode=nodrop'.format(site)
if 'hours' in request.session['requestParams']:
hours = int(request.session['requestParams']['hours'])
elif 'days' in request.session['requestParams']:
hours = 24*int(request.session['requestParams']['days'])
elif 'date_from' in request.session['requestParams'] and 'date_to' in request.session['requestParams']:
hours = 0
else:
hours = 12
jobs_url += '&hours={}'.format(hours)
exclude_params = ['timestamp', 'wnname', ]
for p, v in request.session['requestParams'].items():
if p not in exclude_params:
jobs_url += '&{}={}'.format(p, v)
panda_queues = get_panda_queues()
if site and site not in panda_queues:
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'alert': {'title': 'This site does not exist!',
'message': 'There is no {} registered in the system, please check spelling.'.format(site)},
'built': datetime.now().strftime("%H:%M:%S"),
}
response = render_to_response('wnInfo.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
# Here we try to get cached data
data = getCacheEntry(request, "wnInfo")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('wnInfo.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
errthreshold = 15
wnname_rgx = None
if 'wnname' in request.session['requestParams'] and request.session['requestParams']['wnname']:
wnname_rgx = request.session['requestParams']['wnname']
query = setupView(request, hours=hours, limit=999999)
if wnname != 'all':
query['modificationhost__endswith'] = wnname
elif wnname_rgx is not None:
query['modificationhost__contains'] = wnname_rgx.replace('*', '')
query['computingsite'] = site
fullsummary, plots_data = wn_summary(wnname, query)
if 'sortby' in request.session['requestParams']:
if request.session['requestParams']['sortby'] in sitestatelist:
fullsummary = sorted(fullsummary, key=lambda x: x['states'][request.session['requestParams']['sortby']] if not isinstance(x['states'][request.session['requestParams']['sortby']], dict) else x['states'][request.session['requestParams']['sortby']]['count'],
reverse=True)
elif request.session['requestParams']['sortby'] == 'pctfail':
fullsummary = sorted(fullsummary, key=lambda x: x['pctfail'], reverse=True)
# Remove None wn from failed jobs plot if it is in system, add warning banner
warning = {}
if 'None' in plots_data['failed']:
warning['message'] = '%i failed jobs are excluded from "Failed jobs per WN slot" plot because of None value of modificationhost.' % (plots_data['failed']['None'])
try:
del plots_data['failed']['None']
except:
pass
wnPlotFailedL = sorted([[k, v] for k, v in plots_data['failed'].items()], key=lambda x: x[0])
kys = plots_data['finished'].keys()
kys = sorted(kys)
wnPlotFinishedL = []
for k in kys:
wnPlotFinishedL.append([k, plots_data['finished'][k]])
if not is_json_request(request):
xurl = extensibleURL(request)
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'url': request.path,
'xurl': xurl,
'jurl': jobs_url,
'site': site,
'wnname': wnname,
'user': None,
'summary': fullsummary,
'wnPlotFailed': wnPlotFailedL,
'wnPlotFinished': wnPlotFinishedL,
'hours': hours,
'errthreshold': errthreshold,
'warning': warning,
'built': datetime.now().strftime("%H:%M:%S"),
}
response = render_to_response('wnInfo.html', data, content_type='text/html')
setCacheEntry(request, "wnInfo", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
data = {
'url': request.path,
'site': site,
'wnname': wnname,
'user': None,
'summary': fullsummary,
'wnPlotFailed': wnPlotFailedL,
'wnPlotFinished': wnPlotFinishedL,
'hours': hours,
'errthreshold': errthreshold,
'built': datetime.now().strftime("%H:%M:%S"),
}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
# https://github.com/PanDAWMS/panda-jedi/blob/master/pandajedi/jedicore/JediCoreUtils.py
def getEffectiveFileSize(fsize, startEvent, endEvent, nEvents):
inMB = 1024 * 1024
if fsize in [None, 0]:
# use dummy size for pseudo input
effectiveFsize = inMB
elif nEvents != None and startEvent != None and endEvent != None:
# take event range into account
effectiveFsize = np.long(float(fsize) * float(endEvent - startEvent + 1) / float(nEvents))
else:
effectiveFsize = fsize
# use dummy size if input is too small
if effectiveFsize == 0:
effectiveFsize = inMB
# in MB
effectiveFsize = float(effectiveFsize) / inMB
# return
return effectiveFsize
def calculateRWwithPrio_JEDI(query):
# query = {}
retRWMap = {}
retNREMJMap = {}
values = ['jeditaskid', 'datasetid', 'modificationtime', 'cloud', 'nrem', 'walltime', 'fsize', 'startevent',
'endevent', 'nevents']
###TODO Rework it
if 'schedulerid' in query.keys():
del query['schedulerid']
elif 'schedulerid__startswith' in query.keys():
del query['schedulerid__startswith']
progressEntries = []
progressEntries.extend(GetRWWithPrioJedi3DAYS.objects.filter(**query).values(*values))
allCloudsRW = 0;
allCloudsNREMJ = 0;
if len(progressEntries) > 0:
for progrEntry in progressEntries:
if progrEntry['fsize'] != None:
effectiveFsize = getEffectiveFileSize(progrEntry['fsize'], progrEntry['startevent'],
progrEntry['endevent'], progrEntry['nevents'])
tmpRW = progrEntry['nrem'] * effectiveFsize * progrEntry['walltime']
if not progrEntry['cloud'] in retRWMap:
retRWMap[progrEntry['cloud']] = 0
retRWMap[progrEntry['cloud']] += tmpRW
allCloudsRW += tmpRW
if not progrEntry['cloud'] in retNREMJMap:
retNREMJMap[progrEntry['cloud']] = 0
retNREMJMap[progrEntry['cloud']] += progrEntry['nrem']
allCloudsNREMJ += progrEntry['nrem']
retRWMap['All'] = allCloudsRW
retNREMJMap['All'] = allCloudsNREMJ
for cloudName, rwValue in retRWMap.items():
retRWMap[cloudName] = int(rwValue / 24 / 3600)
return retRWMap, retNREMJMap
@login_customrequired
def dashboard(request, view='all'):
valid, response = initRequest(request)
if not valid:
return response
# if it is region|cloud view redirect to new dash
cloudview = 'region'
if 'cloudview' in request.session['requestParams']:
cloudview = request.session['requestParams']['cloudview']
if view == 'analysis':
cloudview = 'region'
elif view != 'production' and view != 'all':
cloudview = 'N/A'
if ('version' not in request.session['requestParams'] or request.session['requestParams']['version'] != 'old') \
and view in ('all', 'production', 'analysis') and cloudview in ('region', 'world') \
and 'es' not in request.session['requestParams'] and 'mode' not in request.session['requestParams'] \
and not is_json_request(request):
# do redirect
if cloudview == 'world':
return redirect('/dash/world/')
elif cloudview == 'region':
if view == 'production':
return redirect('/dash/region/?jobtype=prod&splitby=jobtype')
elif view == 'analysis':
return redirect('/dash/region/?jobtype=analy&splitby=jobtype')
elif view == 'all':
return redirect('/dash/region/')
# data = getCacheEntry(request, "dashboard", skipCentralRefresh=True)
data = getCacheEntry(request, "dashboard")
if data is not None:
data = json.loads(data)
data['request'] = request
template = data['template']
response = render_to_response(template, data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
taskdays = 3
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
VOMODE = 'atlas'
else:
VOMODE = ''
if VOMODE != 'atlas':
hours = 24 * taskdays
else:
hours = 12
hoursSinceUpdate = 36
estailtojobslinks = ''
extra = "(1=1)"
if view == 'production':
if 'es' in request.session['requestParams'] and request.session['requestParams']['es'].upper() == 'TRUE':
extra = "(not eventservice is null and eventservice in (1, 5) and not specialhandling like '%%sc:%%')"
estailtojobslinks = '&eventservice=eventservice'
elif 'es' in request.session['requestParams'] and request.session['requestParams']['es'].upper() == 'FALSE':
extra = "(not (not eventservice is null and eventservice in (1, 5) and not specialhandling like '%%sc:%%'))"
elif 'esmerge' in request.session['requestParams'] and request.session['requestParams'][
'esmerge'].upper() == 'TRUE':
extra = "(not eventservice is null and eventservice=2 and not specialhandling like '%%sc:%%')"
estailtojobslinks = '&eventservice=2'
noldtransjobs, transclouds, transrclouds = stateNotUpdated(request, state='transferring',
hoursSinceUpdate=hoursSinceUpdate, count=True, wildCardExtension=extra)
elif view == 'analysis':
hours = 3
noldtransjobs = 0
transclouds = []
transrclouds = []
else:
hours = 12
noldtransjobs = 0
transclouds = []
transrclouds = []
errthreshold = 10
query = setupView(request, hours=hours, limit=999999, opmode=view)
if 'mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'task':
return dashTasks(request, hours, view)
if VOMODE != 'atlas':
sortby = 'name'
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
vosummary = vo_summary(query, sortby=sortby)
else:
if view == 'production':
errthreshold = 5
elif view == 'analysis':
errthreshold = 15
else:
errthreshold = 10
vosummary = []
if view == 'production' and (cloudview == 'world' or cloudview == 'cloud'): # cloud view is the old way of jobs distributing;
# just to avoid redirecting
if 'modificationtime__castdate__range' in query:
query = {'modificationtime__castdate__range': query['modificationtime__castdate__range']}
else:
query = {}
values = ['nucleus', 'computingsite', 'jobstatus']
worldJobsSummary = []
estailtojobslinks = ''
if 'days' in request.session['requestParams']:
hours = int(request.session['requestParams']['days'])*24
if 'hours' in request.session['requestParams']:
hours = int(request.session['requestParams']['hours'])
extra = '(1=1)'
if view == 'production':
query['tasktype'] = 'prod'
elif view == 'analysis':
query['tasktype'] = 'anal'
if 'es' in request.session['requestParams'] and request.session['requestParams']['es'].upper() == 'TRUE':
query['es__in'] = [1, 5]
estailtojobslinks = '&eventservice=eventservice|cojumbo'
extra = job_suppression(request)
if 'es' in request.session['requestParams'] and request.session['requestParams']['es'].upper() == 'FALSE':
query['es'] = 0
# This is done for compartibility with /jobs/ results
excludedTimeQuery = copy.deepcopy(query)
jobsarch4statuses = ['finished', 'failed', 'cancelled', 'closed']
if ('modificationtime__castdate__range' in excludedTimeQuery and not 'date_to' in request.session['requestParams']):
del excludedTimeQuery['modificationtime__castdate__range']
worldJobsSummary.extend(CombinedWaitActDefArch4.objects.filter(**excludedTimeQuery).values(*values).extra(where=[extra]).exclude(isarchive=1).annotate(countjobsinstate=Count('jobstatus')).annotate(counteventsinstate=Sum('nevents')))
worldJobsSummary.extend(CombinedWaitActDefArch4.objects.filter(**query).values(*values).extra(where=[extra]).exclude(isarchive=0).annotate(countjobsinstate=Count('jobstatus')).annotate(counteventsinstate=Sum('nevents')))
nucleus = {}
statelist1 = statelist
# del statelist1[statelist1.index('jclosed')]
# del statelist1[statelist1.index('pending')]
if len(worldJobsSummary) > 0:
for jobs in worldJobsSummary:
if jobs['nucleus'] in nucleus:
if jobs['computingsite'] in nucleus[jobs['nucleus']]:
nucleus[jobs['nucleus']][jobs['computingsite']][jobs['jobstatus']] += jobs['countjobsinstate']
if (jobs['jobstatus'] in ('finished','failed','merging')):
nucleus[jobs['nucleus']][jobs['computingsite']]['events'+ jobs['jobstatus']] += jobs['counteventsinstate']
else:
nucleus[jobs['nucleus']][jobs['computingsite']] = {}
for state in statelist1:
nucleus[jobs['nucleus']][jobs['computingsite']][state] = 0
if (state in ('finished', 'failed','merging')):
nucleus[jobs['nucleus']][jobs['computingsite']]['events'+ state] = 0
nucleus[jobs['nucleus']][jobs['computingsite']][jobs['jobstatus']] = jobs['countjobsinstate']
if (state in ('finished', 'failed', 'merging')):
nucleus[jobs['nucleus']][jobs['computingsite']]['events'+ state] = jobs['counteventsinstate']
else:
nucleus[jobs['nucleus']] = {}
nucleus[jobs['nucleus']][jobs['computingsite']] = {}
for state in statelist1:
nucleus[jobs['nucleus']][jobs['computingsite']][state] = 0
if (state in ('finished', 'failed', 'merging')):
nucleus[jobs['nucleus']][jobs['computingsite']]['events'+ state] = 0
nucleus[jobs['nucleus']][jobs['computingsite']][jobs['jobstatus']] = jobs['countjobsinstate']
if (state in ('finished', 'failed', 'merging')):
nucleus[jobs['nucleus']][jobs['computingsite']]['events'+ jobs['jobstatus']] = jobs['counteventsinstate']
nucleusSummary = {}
for nucleusInfo in nucleus:
nucleusSummary[nucleusInfo] = {}
for site in nucleus[nucleusInfo]:
for state in nucleus[nucleusInfo][site]:
if state in nucleusSummary[nucleusInfo]:
nucleusSummary[nucleusInfo][state] += nucleus[nucleusInfo][site][state]
else:
nucleusSummary[nucleusInfo][state] = nucleus[nucleusInfo][site][state]
if not is_json_request(request):
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
if 'TFIRST' in request.session: del request.session['TFIRST']
if 'TLAST' in request.session: del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'url': request.path,
'nucleuses': nucleus,
'nucleussummary': nucleusSummary,
'statelist': statelist1,
'xurl': xurl,
'estailtojobslinks':estailtojobslinks,
'nosorturl': nosorturl,
'user': None,
'hours': hours,
'template': 'worldjobs.html',
'built': datetime.now().strftime("%m-%d %H:%M:%S"),
}
# self monitor
response = render_to_response('worldjobs.html', data, content_type='text/html')
setCacheEntry(request, "dashboard", json.dumps(data, cls=DateEncoder), 60 * 30)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
data = {
'nucleuses': nucleus,
'nucleussummary': nucleusSummary,
'statelist': statelist1,
}
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
elif view == 'objectstore':
mObjectStores, mObjectStoresSummary = objectstore_summary(request, hours=hours)
data = {
'mObjectStoresSummary': mObjectStoresSummary,
'mObjectStores': mObjectStores,
'viewParams': request.session['viewParams'],
'statelist': sitestatelist + ["closed"],
'template': 'dashObjectStore.html',
'built': datetime.now().strftime("%m-%d %H:%M:%S"),
}
response = render_to_response('dashObjectStore.html', data, content_type='text/html')
setCacheEntry(request, "dashboard", json.dumps(data, cls=DateEncoder), 60 * 25)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
notime = True
if len({'date_to', 'hours'}.intersection(request.session['requestParams'].keys())) > 0:
notime = False
fullsummary = cloud_site_summary(query, extra=extra, view=view, cloudview=cloudview, notime=notime)
cloudTaskSummary = wg_task_summary(request, fieldname='cloud', view=view, taskdays=taskdays)
jobsLeft = {}
rw = {}
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
rwData, nRemJobs = calculateRWwithPrio_JEDI(query)
for cloud in fullsummary:
if cloud['name'] in nRemJobs.keys():
jobsLeft[cloud['name']] = nRemJobs[cloud['name']]
if cloud['name'] in rwData.keys():
rw[cloud['name']] = rwData[cloud['name']]
if not is_json_request(request) or 'keephtml' in request.session['requestParams']:
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'url': request.path,
'xurl': xurl,
'nosorturl': nosorturl,
'user': None,
'summary': fullsummary,
'vosummary': vosummary,
'view': view,
'mode': 'site',
'cloudview': cloudview,
'hours': hours,
'errthreshold': errthreshold,
'cloudTaskSummary': cloudTaskSummary,
'taskstates': taskstatedict,
'taskdays': taskdays,
'estailtojobslinks':estailtojobslinks,
'noldtransjobs': noldtransjobs,
'transclouds': transclouds,
'transrclouds': transrclouds,
'hoursSinceUpdate': hoursSinceUpdate,
'jobsLeft': jobsLeft,
'rw': rw,
'template': 'dashboard.html',
'built': datetime.now().strftime("%H:%M:%S"),
}
# self monitor
response = render_to_response('dashboard.html', data, content_type='text/html')
setCacheEntry(request, "dashboard", json.dumps(data, cls=DateEncoder), 60 * 60)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
data = {
'summary': fullsummary,
'vosummary': vosummary,
'view': view,
'mode': 'site',
'cloudview': cloudview,
'hours': hours,
'errthreshold': errthreshold,
'cloudTaskSummary': cloudTaskSummary,
'taskstates': taskstatedict,
'taskdays': taskdays,
'noldtransjobs': noldtransjobs,
'transclouds': transclouds,
'transrclouds': transrclouds,
'hoursSinceUpdate': hoursSinceUpdate,
'jobsLeft': jobsLeft,
'rw': rw,
'built': datetime.now().strftime("%H:%M:%S"),
}
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
@login_customrequired
def dashRegion(request):
"""
A new job summary dashboard for regions that allows to split jobs in Grand Unified Queue
by analy|prod and resource types
Regions column order:
region, status, job type, resource type, Njobstotal, [Njobs by status]
Queues column order:
queue name, type [GU, U, Simple], region, status, job type, resource type, Njobstotal, [Njobs by status]
:param request: request
:return: HTTP response
"""
valid, response = initRequest(request)
if not valid:
return response
if request.path.startswith('/new/dash/'):
return redirect(request.get_full_path().replace('/new/dash/', '/dash/region/'))
# Here we try to get cached data
data = getCacheEntry(request, "JobSummaryRegion")
# data = None
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('JobSummaryRegion.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
if 'splitby' in request.session['requestParams'] and request.session['requestParams']['splitby']:
split_by = request.session['requestParams']['splitby']
else:
split_by = None
if 'region' in request.session['requestParams'] and request.session['requestParams']['region']:
region = request.session['requestParams']['region']
else:
region = 'all'
if 'jobtype' in request.session['requestParams'] and request.session['requestParams']['jobtype']:
jobtype = request.session['requestParams']['jobtype']
else:
jobtype = 'all'
if 'resourcetype' in request.session['requestParams'] and request.session['requestParams']['resourcetype']:
resourcetype = request.session['requestParams']['resourcetype']
else:
resourcetype = 'all'
jquery, extra_str, hours = setupView(request, limit=9999999, querytype='job', wildCardExt=True)
# add queue related request params to query dict
if 'queuetype' in request.session['requestParams'] and request.session['requestParams']['queuetype']:
jquery['queuetype'] = request.session['requestParams']['queuetype']
if 'queuestatus' in request.session['requestParams'] and request.session['requestParams']['queuestatus']:
jquery['queuestatus'] = request.session['requestParams']['queuestatus']
if 'site' in request.session['requestParams'] and request.session['requestParams']['site'] != 'all':
jquery['queuegocname'] = request.session['requestParams']['site']
# get job summary data
jsr_queues_dict, jsr_regions_dict = get_job_summary_region(jquery,
extra=extra_str,
region=region,
jobtype=jobtype,
resourcetype=resourcetype,
split_by=split_by)
if is_json_request(request):
extra_info_params = ['links', ]
extra_info = {ep: False for ep in extra_info_params}
if 'extra' in request.session['requestParams'] and 'links' in request.session['requestParams']['extra']:
extra_info['links'] = True
jsr_queues_dict, jsr_regions_dict = prettify_json_output(jsr_queues_dict, jsr_regions_dict, hours=hours, extra=extra_info)
data = {
'regions': jsr_regions_dict,
'queues': jsr_queues_dict,
}
dump = json.dumps(data, cls=DateEncoder)
return HttpResponse(dump, content_type='application/json')
else:
# transform dict to list and filter out rows depending on split by request param
jsr_queues_list, jsr_regions_list = prepare_job_summary_region(jsr_queues_dict, jsr_regions_dict,
split_by=split_by)
# prepare lists of unique values for drop down menus
select_params_dict = {}
select_params_dict['queuetype'] = sorted(list(set([pq[1] for pq in jsr_queues_list])))
select_params_dict['queuestatus'] = sorted(list(set([pq[3] for pq in jsr_queues_list])))
pq_info_basic = get_basic_info_for_pqs([])
unique_sites_dict = {}
for pq in pq_info_basic:
if pq['site'] not in unique_sites_dict:
unique_sites_dict[pq['site']] = pq['region']
select_params_dict['site'] = sorted([{'site': site, 'region': reg} for site, reg in unique_sites_dict.items()],
key=lambda x: x['site'])
select_params_dict['region'] = sorted(list(set([reg for site, reg in unique_sites_dict.items()])))
xurl = request.get_full_path()
if xurl.find('?') > 0:
xurl += '&'
else:
xurl += '?'
# overwrite view selection params
view_params_str = '<b>Manually entered params</b>: '
supported_params = {f.verbose_name: '' for f in PandaJob._meta.get_fields()}
interactive_params = ['hours', 'days', 'date_from', 'date_to', 'timestamp',
'queuetype', 'queuestatus', 'jobtype', 'resourcetype', 'splitby', 'region', 'site']
for pn, pv in request.session['requestParams'].items():
if pn not in interactive_params and pn in supported_params:
view_params_str += '<b>{}=</b>{} '.format(str(pn), str(pv))
request.session['viewParams']['selection'] = view_params_str if not view_params_str.endswith(': ') else ''
request.session['timerange'] = jquery['modificationtime__castdate__range']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'timerange': request.session['timerange'],
'built': datetime.now().strftime("%H:%M:%S"),
'hours': hours,
'xurl': xurl,
'selectParams': select_params_dict,
'jobstates': statelist,
'regions': jsr_regions_list,
'queues': jsr_queues_list,
'show': 'all',
}
response = render_to_response('JobSummaryRegion.html', data, content_type='text/html')
setCacheEntry(request, "JobSummaryRegion", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
@login_customrequired
def dashNucleus(request):
valid, response = initRequest(request)
if not valid:
return response
# Here we try to get cached data
data = getCacheEntry(request, "JobSummaryNucleus")
# data = None
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('JobSummaryNucleus.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
if 'hours' in request.session['requestParams'] and request.session['requestParams']['hours']:
hours = int(request.session['requestParams']['hours'])
else:
hours = 12
query, extra, nhours = setupView(request, hours=hours, limit=999999, wildCardExt=True)
# get summary data
jsn_nucleus_dict, jsn_satellite_dict = get_job_summary_nucleus(
query,
extra=extra,
job_states_order=copy.deepcopy(statelist),
hs06s=True
)
get_world_hs06_summary(query, extra=extra)
if is_json_request(request):
data = {
'nucleuses': jsn_satellite_dict,
'nucleussummary': jsn_nucleus_dict,
'statelist': copy.deepcopy(statelist),
'built': datetime.now().strftime(defaultDatetimeFormat),
}
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
else:
# convert dict -> list
jsn_nucleus_list, jsn_satellite_list = prepare_job_summary_nucleus(
jsn_nucleus_dict,
jsn_satellite_dict,
job_states_order=copy.deepcopy(statelist)
)
xurl = request.get_full_path()
if xurl.find('?') > 0:
xurl += '&'
else:
xurl += '?'
# overwrite view selection params
view_params_str = '<b>Params</b>: '
supported_params = {f.verbose_name: '' for f in PandaJob._meta.get_fields()}
for pn, pv in request.session['requestParams'].items():
if pn in supported_params:
view_params_str += '<b>{}=</b>{} '.format(str(pn), str(pv))
request.session['viewParams']['selection'] = view_params_str if not view_params_str.endswith(': ') else ''
request.session['timerange'] = query['modificationtime__castdate__range']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'timerange': request.session['timerange'],
'built': datetime.now().strftime("%H:%M:%S"),
'jobstates': statelist,
'show': 'all',
'hours': hours,
'xurl': xurl,
'nuclei': jsn_nucleus_list,
'satellites': jsn_satellite_list,
}
response = render_to_response('JobSummaryNucleus.html', data, content_type='text/html')
setCacheEntry(request, "JobSummaryNucleus", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
@login_customrequired
def dashES(request):
"""
A new ES job summary dashboard
:param request: request
:return: HTTP response
"""
valid, response = initRequest(request)
if not valid:
return response
# Here we try to get cached data
data = getCacheEntry(request, "JobSummaryRegion")
# data = None
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('EventService.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
if 'splitby' in request.session['requestParams'] and request.session['requestParams']['splitby']:
split_by = request.session['requestParams']['splitby']
else:
split_by = None
if 'hours' in request.session['requestParams'] and request.session['requestParams']['hours']:
hours = int(request.session['requestParams']['hours'])
else:
hours = 12
jquery, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=9999999, querytype='job', wildCardExt=True)
# add queue related request params to pqquery dict
pqquery = dict()
if 'queuetype' in request.session['requestParams'] and request.session['requestParams']['queuetype']:
pqquery['queuetype'] = request.session['requestParams']['queuetype']
if 'queuestatus' in request.session['requestParams'] and request.session['requestParams']['queuestatus']:
pqquery['queuestatus'] = request.session['requestParams']['queuestatus']
# get job summary data
jsr_queues_dict, jsr_regions_dict = get_es_job_summary_region(jquery, extra=wildCardExtension, pqquery=pqquery)
if is_json_request(request):
extra_info_params = ['links', ]
extra_info = {ep: False for ep in extra_info_params}
if 'extra' in request.session['requestParams'] and 'links' in request.session['requestParams']['extra']:
extra_info['links'] = True
jsr_queues_dict, jsr_regions_dict = prettify_json_output(jsr_queues_dict, jsr_regions_dict, hours=hours, extra=extra_info)
data = {
'regions': jsr_regions_dict,
'queues': jsr_queues_dict,
}
dump = json.dumps(data, cls=DateEncoder)
return HttpResponse(dump, content_type='application/json')
else:
# transform dict to list and filter out rows depending on split by request param
jsr_queues_list, jsr_regions_list = prepare_es_job_summary_region(jsr_queues_dict, jsr_regions_dict,
split_by=split_by)
# prepare lists of unique values for drop down menus
select_params_dict = {}
select_params_dict['region'] = sorted(list(set([r[0] for r in jsr_regions_list])))
select_params_dict['queuetype'] = sorted(list(set([pq[1] for pq in jsr_queues_list])))
select_params_dict['queuestatus'] = sorted(list(set([pq[3] for pq in jsr_queues_list])))
xurl = request.get_full_path()
if xurl.find('?') > 0:
xurl += '&'
else:
xurl += '?'
# overwrite view selection params
view_params_str = '<b>Manually entered params</b>: '
supported_params = {f.verbose_name: '' for f in PandaJob._meta.get_fields()}
interactive_params = ['hours', 'days', 'date_from', 'date_to', 'timestamp',
'queuetype', 'queuestatus', 'jobtype', 'resourcetype', 'splitby', 'region']
for pn, pv in request.session['requestParams'].items():
if pn not in interactive_params and pn in supported_params:
view_params_str += '<b>{}=</b>{} '.format(str(pn), str(pv))
request.session['viewParams']['selection'] = view_params_str if not view_params_str.endswith(': ') else ''
request.session['timerange'] = jquery['modificationtime__castdate__range']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'timerange': request.session['timerange'],
'built': datetime.now().strftime("%H:%M:%S"),
'hours': hours,
'xurl': xurl,
'selectParams': select_params_dict,
'jobstates': statelist,
'regions': jsr_regions_list,
'queues': jsr_queues_list,
'show': 'all',
}
response = render_to_response('EventService.html', data, content_type='text/html')
setCacheEntry(request, "EventService", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
@login_customrequired
def dashAnalysis(request):
return dashboard(request, view='analysis')
@login_customrequired
def dashProduction(request):
return dashboard(request, view='production')
@login_customrequired
def dashObjectStore(request):
return dashboard(request, view='objectstore')
def dashTasks(request, hours, view='production'):
valid, response = initRequest(request)
if not valid: return response
if view == 'production':
errthreshold = 5
else:
errthreshold = 15
if 'days' in request.session['requestParams']:
taskdays = int(request.session['requestParams']['days'])
else:
taskdays = 7
hours = taskdays * 24
query = setupView(request, hours=hours, limit=999999, opmode=view, querytype='task')
cloudTaskSummary = wg_task_summary(request, fieldname='cloud', view=view, taskdays=taskdays)
# taskJobSummary = dashTaskSummary(request, hours, view) not particularly informative
taskJobSummary = []
if 'display_limit' in request.session['requestParams']:
try:
display_limit = int(request.session['requestParams']['display_limit'])
except:
display_limit = 300
else:
display_limit = 300
cloudview = 'cloud'
if 'cloudview' in request.session['requestParams']:
cloudview = request.session['requestParams']['cloudview']
if view == 'analysis':
cloudview = 'region'
elif view != 'production':
cloudview = 'N/A'
fullsummary = cloud_site_summary(query, view=view, cloudview=cloudview)
jobsLeft = {}
rw = {}
rwData, nRemJobs = calculateRWwithPrio_JEDI(query)
for cloud in fullsummary:
leftCount = 0
if cloud['name'] in nRemJobs.keys():
jobsLeft[cloud['name']] = nRemJobs[cloud['name']]
if cloud['name'] in rwData.keys():
rw[cloud['name']] = rwData[cloud['name']]
if not is_json_request(request):
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'url': request.path,
'xurl': xurl,
'nosorturl': nosorturl,
'user': None,
'view': view,
'mode': 'task',
'hours': hours,
'errthreshold': errthreshold,
'cloudTaskSummary': cloudTaskSummary,
'taskstates': taskstatedict,
'taskdays': taskdays,
'taskJobSummary': taskJobSummary[:display_limit],
'display_limit': display_limit,
'jobsLeft': jobsLeft,
'estailtojobslinks': '',
'rw': rw,
'template': 'dashboard.html',
'built': datetime.now().strftime("%H:%M:%S"),
}
setCacheEntry(request, "dashboard", json.dumps(data, cls=DateEncoder), 60 * 60)
response = render_to_response('dashboard.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
remainingEvents = RemainedEventsPerCloud3dayswind.objects.values('cloud', 'nrem')
remainingEventsSet = {}
for remev in remainingEvents:
remainingEventsSet[remev['cloud']] = remev['nrem']
data = {
'jobsLeft': jobsLeft,
'remainingWeightedEvents': remainingEventsSet,
}
return HttpResponse(json.dumps(data), content_type='application/json')
def taskESExtendedInfo(request):
if 'jeditaskid' in request.GET:
jeditaskid = int(request.GET['jeditaskid'])
else:
return HttpResponse("Not jeditaskid supplied", content_type='text/html')
eventsdict=[]
equery = {'jeditaskid': jeditaskid}
eventsdict.extend(
JediEvents.objects.filter(**equery).values('status').annotate(count=Count('status')).order_by('status'))
for state in eventsdict: state['statusname'] = eventservicestatelist[state['status']]
estaskstr = ''
for s in eventsdict:
estaskstr += " %s(%s) " % (s['statusname'], s['count'])
return HttpResponse(estaskstr, content_type='text/html')
@login_customrequired
def getCSRFToken(request):
c = {}
user = request.user
if user.is_authenticated:
c.update(csrf(request))
return render_to_response("csrftoken.html", c)
else:
resp = {"detail": "User not authenticated. Please login to bigpanda"}
dump = json.dumps(resp, cls=DateEncoder)
response = HttpResponse(dump, content_type='application/json')
return response
@login_customrequired
@csrf_exempt
def taskList(request):
valid, response = initRequest(request)
if not valid: return response
thread = None
dkey = digkey(request)
# Here we try to get cached data
data = getCacheEntry(request, "taskList")
#data = None
if data is not None:
data = json.loads(data)
data['request'] = request
if data['eventservice'] == True:
response = render_to_response('taskListES.html', data, content_type='text/html')
else:
response = render_to_response('taskList.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
if 'limit' in request.session['requestParams']:
limit = int(request.session['requestParams']['limit'])
else:
limit = 5000
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'pctfailed':
limit = 50000
if 'tasktype' in request.session['requestParams'] and request.session['requestParams']['tasktype'].startswith(
'anal'):
hours = 3 * 24
else:
hours = 7 * 24
sortby = "jeditaskid-desc"
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby']:
sortby = request.session['requestParams']['sortby']
eventservice = False
if 'eventservice' in request.session['requestParams'] and (
request.session['requestParams']['eventservice'] == 'eventservice' or request.session['requestParams'][
'eventservice'] == '1'):
eventservice = True
hours = 7 * 24
extraquery = ''
if 'hashtag' in request.session['requestParams']:
hashtagsrt = request.session['requestParams']['hashtag']
if ',' in hashtagsrt:
hashtaglistquery = ''.join("'" + ht + "' ," for ht in hashtagsrt.split(','))
elif '|' in hashtagsrt:
hashtaglistquery = ''.join("'" + ht + "' ," for ht in hashtagsrt.split('|'))
else:
hashtaglistquery = "'" + request.session['requestParams']['hashtag'] + "'"
hashtaglistquery = hashtaglistquery[:-1] if hashtaglistquery[-1] == ',' else hashtaglistquery
extraquery = """JEDITASKID IN ( SELECT HTT.TASKID FROM ATLAS_DEFT.T_HASHTAG H, ATLAS_DEFT.T_HT_TO_TASK HTT WHERE JEDITASKID = HTT.TASKID AND H.HT_ID = HTT.HT_ID AND H.HASHTAG IN ( %s ))""" % (hashtaglistquery)
if 'tape' in request.session['requestParams']:
extraquery = """JEDITASKID IN (SELECT TASKID FROM ATLAS_DEFT.t_production_task where PRIMARY_INPUT in (select DATASET FROM ATLAS_DEFT.T_DATASET_STAGING) )"""
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=9999999, querytype='task', wildCardExt=True)
tmpTableName = get_tmp_table_name()
if 'jeditaskid__in' in query:
taskl = query['jeditaskid__in']
if len(taskl) > DB_N_MAX_IN_QUERY:
transactionKey = insert_to_temp_table(taskl)
selectTail = """jeditaskid in (SELECT tmp.id FROM %s tmp where TRANSACTIONKEY=%i)""" % (tmpTableName, transactionKey)
extraquery = selectTail if len(extraquery) == 0 else extraquery + ' AND ' + selectTail
del query['jeditaskid__in']
if 'modificationtime__castdate__range' in query:
del query['modificationtime__castdate__range']
if len(extraquery) > 0:
if len(wildCardExtension) > 0:
wildCardExtension += ' AND ( ' + extraquery + ' )'
else:
wildCardExtension = extraquery
listTasks = []
if 'statenotupdated' in request.session['requestParams']:
tasks = taskNotUpdated(request, query, wildCardExtension)
else:
#wildCardExtension = "(((UPPER(taskname) LIKE UPPER('%%.%%')) AND (UPPER(taskname) LIKE UPPER('%%mc%%')) AND (UPPER(taskname) LIKE UPPER('%%.CAOD_HIGG5D1.%%')) AND (UPPER(taskname) LIKE UPPER('%%.32-07-8/'))))"
tasks = JediTasksOrdered.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values()
listTasks.append(JediTasksOrdered)
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
pass
thread = Thread(target=totalCount, args=(listTasks, query, wildCardExtension, dkey))
thread.start()
else:
thread = None
# Getting hashtags for task selection
taskl = []
for task in tasks:
taskl.append(task['jeditaskid'])
error_codes_analyser = TasksErrorCodesAnalyser()
error_codes_analyser.schedule_preprocessing(tasks)
transactionKey = insert_to_temp_table(taskl)
# For tasks plots
setCacheEntry(request, transactionKey, taskl, 60 * 20, isData=True)
if DEPLOYMENT == 'ORACLE_ATLAS':
new_cur = connection.cursor()
new_cur.execute(
"""
select htt.TASKID,
LISTAGG(h.hashtag, ',') within GROUP (order by htt.taskid) as hashtags
from ATLAS_DEFT.T_HASHTAG h, ATLAS_DEFT.T_HT_TO_TASK htt , %s tmp
where TRANSACTIONKEY=%i and h.ht_id = htt.ht_id and tmp.id = htt.taskid
GROUP BY htt.TASKID
""" % (tmpTableName, transactionKey)
)
taskhashtags = dictfetchall(new_cur)
else:
taskhashtags = []
datasetstage = []
if 'tape' in request.session['requestParams']:
stagesource = ''
if 'stagesource' in request.session['requestParams'] and request.session['requestParams']['stagesource']!='Unknown':
stagesource = " and t1.SOURCE_RSE='" + request.session['requestParams']['stagesource'].strip().replace("'","''")+"\'"
elif 'stagesource' in request.session['requestParams'] and request.session['requestParams']['stagesource']=='Unknown':
stagesource = ' and t1.SOURCE_RSE is null'
new_cur.execute(
"""
SELECT t1.DATASET, t1.STATUS, t1.STAGED_FILES, t1.START_TIME, t1.END_TIME, t1.RSE, t1.TOTAL_FILES,
t1.UPDATE_TIME, t1.SOURCE_RSE, t2.TASKID FROM ATLAS_DEFT.T_DATASET_STAGING t1
INNER join ATLAS_DEFT.T_ACTION_STAGING t2 on t1.DATASET_STAGING_ID=t2.DATASET_STAGING_ID %s and taskid in (SELECT tmp.id FROM %s tmp where TRANSACTIONKEY=%i)
""" % (stagesource, tmpTableName, transactionKey)
)
datasetstage = dictfetchall(new_cur)
taskslistfiltered = set()
for datasetstageitem in datasetstage:
taskslistfiltered.add(datasetstageitem['TASKID'])
if datasetstageitem['START_TIME']:
datasetstageitem['START_TIME'] = datasetstageitem['START_TIME'].strftime(defaultDatetimeFormat)
else:
datasetstageitem['START_TIME'] = ''
if datasetstageitem['END_TIME']:
datasetstageitem['END_TIME'] = datasetstageitem['END_TIME'].strftime(defaultDatetimeFormat)
else:
datasetstageitem['END_TIME'] = ''
if not datasetstageitem['SOURCE_RSE']:
datasetstageitem['SOURCE_RSE'] = 'Unknown'
if datasetstageitem['UPDATE_TIME']:
datasetstageitem['UPDATE_TIME'] = datasetstageitem['UPDATE_TIME'].strftime(defaultDatetimeFormat)
else:
datasetstageitem['UPDATE_TIME'] = ''
if 'stagesource' in request.session['requestParams']:
newtasks = []
newtaskl = []
for task in tasks:
if task['jeditaskid'] in taskslistfiltered:
newtaskl.append(task['jeditaskid'])
newtasks.append(task)
tasks = newtasks
taskl = newtaskl
eventInfoDict = {}
if eventservice:
#we get here events data
tquery = {}
tasksEventInfo = GetEventsForTask.objects.filter(**tquery).extra(
where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values('jeditaskid', 'totevrem', 'totev')
# We do it because we intermix raw and queryset queries. With next new_cur.execute tasksEventInfo cleares
for tasksEventInfoItem in tasksEventInfo:
listItem = {}
listItem["jeditaskid"] = tasksEventInfoItem["jeditaskid"]
listItem["totevrem"] = tasksEventInfoItem["totevrem"]
listItem["totev"] = tasksEventInfoItem["totev"]
eventInfoDict[tasksEventInfoItem["jeditaskid"]] = listItem
taskids = {}
for taskid in taskhashtags:
taskids[taskid['TASKID']] = taskid['HASHTAGS']
# Filtering tasks if there are a few hashtahgs with 'AND' operand in query
if 'hashtagsrt' in locals() and ',' in hashtagsrt:
thashtags = hashtagsrt.split(',')
newtasks = []
for task in tasks:
if task['jeditaskid'] in taskids.keys():
if all(ht+',' in taskids[task['jeditaskid']]+',' for ht in thashtags):
newtasks.append(task)
tasks = newtasks
hashtags = []
for task in tasks:
# Forming hashtag list for summary attribute table
if task['jeditaskid'] in taskids.keys():
task['hashtag'] = taskids[task['jeditaskid']]
for hashtag in taskids[task['jeditaskid']].split(','):
if hashtag not in hashtags:
hashtags.append(hashtag)
if eventservice:
# Addind event data
if task['jeditaskid'] in eventInfoDict.keys():
task['eventsData'] = eventInfoDict[task['jeditaskid']]
if len(hashtags) > 0:
hashtags = sorted(hashtags, key=lambda h: h.lower())
tasks = cleanTaskList(tasks, sortby=sortby, add_datasets_info=True)
ntasks = len(tasks)
nmax = ntasks
# if 'display_limit' in request.session['requestParams']:
# and int(request.session['requestParams']['display_limit']) < nmax:
# display_limit = int(request.session['requestParams']['display_limit'])
# nmax = display_limit
# url_nolimit = removeParam(request.get_full_path(), 'display_limit')
# else:
# display_limit = 300
# nmax = display_limit
# url_nolimit = request.get_full_path()
if 'display_limit' not in request.session['requestParams']:
display_limit = 100
nmax = display_limit
url_nolimit = request.get_full_path() + "&display_limit=" + str(nmax)
else:
display_limit = int(request.session['requestParams']['display_limit'])
nmax = display_limit
url_nolimit = request.get_full_path() + "&display_limit=" + str(nmax)
# from django.db import connection
# print 'SQL query:', connection.queries
tasks = getTaskScoutingInfo(tasks, nmax)
if 'tape' in request.session['requestParams'] and len(datasetstage)>0:
datasetRSEsHash = {}
for dataset in datasetstage:
datasetRSEsHash[dataset['TASKID']] = dataset['SOURCE_RSE']
for task in tasks:
task['stagesource'] = datasetRSEsHash.get(task['jeditaskid'], 'Unknown')
## For event service, pull the jobs and event ranges
doESCalc = False
if eventservice and doESCalc:
taskl = []
for task in tasks:
taskl.append(task['jeditaskid'])
jquery = {}
jquery['jeditaskid__in'] = taskl
jobs = []
jobs.extend(Jobsactive4.objects.filter(**jquery).values('pandaid', 'jeditaskid'))
jobs.extend(Jobsarchived4.objects.filter(**jquery).values('pandaid', 'jeditaskid'))
taskdict = {}
for job in jobs:
taskdict[job['pandaid']] = job['jeditaskid']
estaskdict = {}
esjobs = []
for job in jobs:
esjobs.append(job['pandaid'])
random.seed()
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
tk_es_jobs = random.randrange(1000000)
# connection.enter_transaction_management()
new_cur = connection.cursor()
executionData = []
for id in esjobs:
executionData.append((id, tk_es_jobs))
query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
# connection.commit()
new_cur.execute(
"""
SELECT /*+ dynamic_sampling(TMP_IDS1 0) cardinality(TMP_IDS1 10) INDEX_RS_ASC(ev JEDI_EVENTS_PANDAID_STATUS_IDX) NO_INDEX_FFS(ev JEDI_EVENTS_PK) NO_INDEX_SS(ev JEDI_EVENTS_PK) */ PANDAID,STATUS FROM ATLAS_PANDA.JEDI_EVENTS ev, %s WHERE TRANSACTIONKEY=%i AND PANDAID = ID
""" % (tmpTableName, tk_es_jobs)
)
evtable = dictfetchall(new_cur)
for ev in evtable:
taskid = taskdict[ev['PANDAID']]
if taskid not in estaskdict:
estaskdict[taskid] = {}
for s in eventservicestatelist:
estaskdict[taskid][s] = 0
evstat = eventservicestatelist[ev['STATUS']]
estaskdict[taskid][evstat] += 1
for task in tasks:
taskid = task['jeditaskid']
if taskid in estaskdict:
estaskstr = ''
for s in estaskdict[taskid]:
if estaskdict[taskid][s] > 0:
estaskstr += " %s(%s) " % (s, estaskdict[taskid][s])
task['estaskstr'] = estaskstr
## set up google flow diagram
flowstruct = buildGoogleFlowDiagram(request, tasks=tasks)
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
nohashtagurl = removeParam(xurl, 'hashtag', mode='extensible')
nohashtagurl = removeParam(xurl, 'hashtag', mode='extensible')
noerrordialogurl = removeParam(xurl, 'hashtag', mode='errordialog')
if thread!=None:
try:
thread.join()
tasksTotalCount = sum(tcount[dkey])
print (dkey)
print (tcount[dkey])
del tcount[dkey]
print (tcount)
print (tasksTotalCount)
except:
tasksTotalCount = -1
else: tasksTotalCount = -1
listPar = []
for key, val in request.session['requestParams'].items():
if (key != 'limit' and key != 'display_limit'):
listPar.append(key + '=' + str(val))
if len(listPar) > 0:
urlParametrs = '&'.join(listPar) + '&'
else:
urlParametrs = None
print (listPar)
del listPar
if (math.fabs(ntasks - tasksTotalCount) < 1000 or tasksTotalCount == -1):
tasksTotalCount = None
else:
tasksTotalCount = int(math.ceil((tasksTotalCount + 10000) / 10000) * 10000)
tasksToShow = tasks[:nmax]
for task in tasksToShow:
if task['creationdate']:
task['creationdate'] = task['creationdate'].strftime(defaultDatetimeFormat)
if task['modificationtime']:
task['modificationtime'] = task['modificationtime'].strftime(defaultDatetimeFormat)
if task['starttime']:
task['starttime'] = task['starttime'].strftime(defaultDatetimeFormat)
if task['statechangetime']:
task['statechangetime'] = task['statechangetime'].strftime(defaultDatetimeFormat)
if task['ttcrequested']:
task['ttcrequested'] = task['ttcrequested'].strftime(defaultDatetimeFormat)
error_summary_table = error_codes_analyser.get_errors_table()
error_summary_table = json.dumps(error_summary_table, cls=DateEncoder)
if is_json_request(request):
# Add datasets info to the json dump
tasks = get_datasets_for_tasklist(tasks)
# getting jobs metadata if it is requested in URL [ATLASPANDA-492]
if 'extra' in request.session['requestParams'] and 'metastruct' in request.session['requestParams']['extra']:
jeditaskids = list(set([task['jeditaskid'] for task in tasks]))
MAX_N_TASKS = 100 # protection against DB overloading
if len(jeditaskids) <= MAX_N_TASKS:
job_pids = []
jobQuery = {
'jobstatus__in': ['finished', 'failed', 'transferring', 'merging', 'cancelled', 'closed', 'holding'],
'jeditaskid__in': jeditaskids
}
job_pids.extend(Jobsarchived4.objects.filter(**jobQuery).values('pandaid', 'jeditaskid', 'jobstatus', 'creationtime'))
job_pids.extend(Jobsarchived.objects.filter(**jobQuery).values('pandaid', 'jeditaskid', 'jobstatus', 'creationtime'))
if len(job_pids) > 0:
jobs = addJobMetadata(job_pids)
taskMetadata = {}
for job in jobs:
if not job['jeditaskid'] in taskMetadata:
taskMetadata[job['jeditaskid']] = {}
if 'metastruct' in job:
taskMetadata[job['jeditaskid']][job['pandaid']] = job['metastruct']
for task in tasks:
if task['jeditaskid'] in taskMetadata:
task['jobs_metadata'] = taskMetadata[task['jeditaskid']]
if 'extra' in request.session['requestParams'] and 'jobstatecount' in request.session['requestParams']['extra']:
js_count_bytask_dict = get_job_state_summary_for_tasklist(tasks)
for task in tasks:
if task['jeditaskid'] in js_count_bytask_dict:
task['job_state_count'] = js_count_bytask_dict[task['jeditaskid']]
else:
task['job_state_count'] = {}
dump = json.dumps(tasks, cls=DateEncoder)
del request.session
return JsonResponse(tasks, encoder=DateEncoder, safe=False)
else:
sumd = task_summary_dict(request, tasks, copy.deepcopy(standard_taskfields) +
['stagesource'] if 'tape' in request.session['requestParams'] else copy.deepcopy(standard_taskfields))
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'tasks': tasksToShow,
'datasetstage': json.dumps(datasetstage, cls=DateEncoder),
'ntasks': ntasks,
'sumd': sumd,
'hashtags': hashtags,
'xurl': xurl,
'nosorturl': nosorturl,
'nohashtagurl': nohashtagurl,
'noerrordialogurl':noerrordialogurl,
'url_nolimit': url_nolimit,
'display_limit': nmax,
'flowstruct': flowstruct,
'eventservice': eventservice,
'requestString': urlParametrs,
'tasksTotalCount': tasksTotalCount,
'built': datetime.now().strftime("%H:%M:%S"),
'idtasks': transactionKey,
'error_summary_table': error_summary_table
}
setCacheEntry(request, "taskList", json.dumps(data, cls=DateEncoder), 60 * 20)
if eventservice:
response = render_to_response('taskListES.html', data, content_type='text/html')
else:
response = render_to_response('taskList.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
@never_cache
def killtasks(request):
valid, response = initRequest(request)
if not valid:
return response
taskid = -1
action = -1
if 'task' in request.session['requestParams']:
taskid = int(request.session['requestParams']['task'])
if 'action' in request.session['requestParams']:
action = int(request.session['requestParams']['action'])
prodsysHost = None
prodsysToken = None
prodsysUrl = None
username = None
fullname = None
if 'prodsysHost' in PRODSYS:
prodsysHost = PRODSYS['prodsysHost']
if 'prodsysToken' in PRODSYS:
prodsysToken = PRODSYS['prodsysToken']
if action == 0:
prodsysUrl = '/prodtask/task_action_ext/finish/'
elif action == 1:
prodsysUrl = '/prodtask/task_action_ext/abort/'
else:
resp = {"detail": "Action is not recognized"}
dump = json.dumps(resp, cls=DateEncoder)
response = HttpResponse(dump, content_type='application/json')
return response
cern_auth_provider = None
user = request.user
# TODO
# temporary while both old and new CERN auth supported
if user.is_authenticated and user.social_auth is not None:
if len(user.social_auth.filter(provider='cernauth2')) > 0:
cern_auth_provider = 'cernauth2'
elif len(user.social_auth.filter(provider='cernoidc')) > 0:
cern_auth_provider = 'cernoidc'
if cern_auth_provider and user.social_auth.get(provider=cern_auth_provider).extra_data is not None and (
'username' in user.social_auth.get(provider=cern_auth_provider).extra_data):
username = user.social_auth.get(provider=cern_auth_provider).extra_data['username']
fullname = user.social_auth.get(provider=cern_auth_provider).extra_data['name']
else:
resp = {"detail": "User not authenticated. Please login to BigPanDAmon with CERN"}
dump = json.dumps(resp, cls=DateEncoder)
response = HttpResponse(dump, content_type='application/json')
return response
if action == 1:
postdata = {"username": username, "task": taskid, "userfullname": fullname}
else:
postdata = {"username": username, "task": taskid, "parameters": [1], "userfullname": fullname}
headers = {
'Content-Type':'application/json',
'Accept': 'application/json',
'Authorization': 'Token ' + prodsysToken
}
conn = urllib3.HTTPSConnectionPool(prodsysHost, timeout=100)
resp = None
# if request.session['IS_TESTER']:
resp = conn.urlopen('POST', prodsysUrl, body=json.dumps(postdata, cls=DateEncoder), headers=headers, retries=1, assert_same_host=False)
# else:
# resp = {"detail": "You are not allowed to test. Sorry"}
# dump = json.dumps(resp, cls=DateEncoder)
# response = HttpResponse(dump, mimetype='text/plain')
# return response
if resp and len(resp.data) > 0:
try:
resp = json.loads(resp.data)
if resp['result'] == "FAILED":
resp['detail'] = 'Result:' + resp['result'] + ' with reason:' + resp['exception']
elif resp['result'] == "OK":
resp['detail'] = 'Action peformed successfully, details: ' + resp['details']
except:
resp = {"detail": "prodsys responce could not be parced"}
else:
resp = {"detail": "Error with sending request to prodsys"}
dump = json.dumps(resp, cls=DateEncoder)
response = HttpResponse(dump, content_type='application/json')
return response
def getTaskScoutingInfo(tasks, nmax):
taskslToBeDisplayed = tasks[:nmax]
tasksIdToBeDisplayed = [task['jeditaskid'] for task in taskslToBeDisplayed]
tquery = {}
tmpTableName = get_tmp_table_name()
transactionKey = random.randrange(1000000)
new_cur = connection.cursor()
if DEPLOYMENT == "POSTGRES":
create_temporary_table(new_cur, tmpTableName)
executionData = []
for id in tasksIdToBeDisplayed:
executionData.append((id, transactionKey))
query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
tasksEventInfo = GetEventsForTask.objects.filter(**tquery).extra(
where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values('jeditaskid', 'totevrem', 'totev')
#We do it because we intermix raw and queryset queries. With next new_cur.execute tasksEventInfo cleares
tasksEventInfoList = []
for tasksEventInfoItem in tasksEventInfo:
listItem = {}
listItem["jeditaskid"] = tasksEventInfoItem["jeditaskid"]
listItem["totevrem"] = tasksEventInfoItem["totevrem"]
listItem["totev"] = tasksEventInfoItem["totev"]
tasksEventInfoList.append(listItem)
tasksEventInfoList.reverse()
failedInScouting = JediDatasets.objects.filter(**tquery).extra(where=["NFILESFAILED > NFILESTOBEUSED AND JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey) ]).values('jeditaskid')
taskStatuses = dict((task['jeditaskid'], task['status']) for task in tasks)
failedInScouting = [item['jeditaskid'] for item in failedInScouting if
(taskStatuses[item['jeditaskid']] in ('failed', 'broken'))]
# scoutingHasCritFailures
tquery['nfilesfailed__gt'] = 0
scoutingHasCritFailures = JediDatasets.objects.filter(**tquery).extra(
where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values('jeditaskid')
scoutingHasCritFailures = [item['jeditaskid'] for item in scoutingHasCritFailures if
(taskStatuses[item['jeditaskid']] in ('scouting'))]
transactionKey = random.randrange(1000000)
executionData = []
for id in scoutingHasCritFailures:
executionData.append((id, transactionKey))
query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
tquery = {}
tquery['nfilesfailed'] = 0
scoutingHasNonCritFailures = JediDatasets.objects.filter(**tquery).extra(
where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values('jeditaskid')
scoutingHasNonCritFailures = [item['jeditaskid'] for item in scoutingHasNonCritFailures if (
taskStatuses[item['jeditaskid']] == 'scouting' and item['jeditaskid'] not in scoutingHasCritFailures)]
transactionKey = random.randrange(1000000)
executionData = []
for id in scoutingHasNonCritFailures:
executionData.append((id, transactionKey))
query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
tquery = {}
tquery['relationtype'] = 'retry'
scoutingHasNonCritFailures = JediJobRetryHistory.objects.filter(**tquery).extra(
where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values('jeditaskid')
scoutingHasNonCritFailures = [item['jeditaskid'] for item in scoutingHasNonCritFailures]
for task in taskslToBeDisplayed:
correspondendEventInfo = []
if tasksEventInfoList and len(tasksEventInfoList) > 0:
correspondendEventInfo = [item for item in tasksEventInfoList if item["jeditaskid"]==task['jeditaskid']] #filter(lambda n: n.get('jeditaskid') == task['jeditaskid'], tasksEventInfo)
if len(correspondendEventInfo) > 0:
task['totevrem'] = int(correspondendEventInfo[0]['totevrem'])
task['totev'] = correspondendEventInfo[0]['totev']
else:
task['totevrem'] = 0
task['totev'] = 0
if (task['jeditaskid'] in failedInScouting):
task['failedscouting'] = True
if (task['jeditaskid'] in scoutingHasCritFailures):
task['scoutinghascritfailures'] = True
if (task['jeditaskid'] in scoutingHasNonCritFailures):
task['scoutinghasnoncritfailures'] = True
return tasks
def getErrorSummaryForEvents(request):
valid, response = initRequest(request)
if not valid: return response
data = {}
eventsErrors = []
print ('getting error summary for events')
if 'jeditaskid' in request.session['requestParams']:
jeditaskid = int(request.session['requestParams']['jeditaskid'])
else:
data = {"error": "no jeditaskid supplied"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), status=404, content_type='application/json')
if 'mode' in request.session['requestParams']:
mode = request.session['requestParams']['mode']
else:
mode = 'drop'
transactionKey = None
if 'tk' in request.session['requestParams'] and request.session['requestParams']['tk']:
try:
transactionKey = int(request.session['requestParams']['tk'])
transactionKey = transactionKey if transactionKey > 0 else None
except:
_logger.debug('Transaction key is not integer, pass it as None')
transactionKeyDJ = None
if 'tkdj' in request.session['requestParams'] and request.session['requestParams']['tkdj']:
try:
transactionKeyDJ = int(request.session['requestParams']['tkdj'])
transactionKeyDJ = transactionKeyDJ if transactionKeyDJ > 0 else None
except:
_logger.debug('Transaction key DJ is not integer, pass it as None')
equery = {}
equery['jeditaskid']=jeditaskid
equery['error_code__isnull'] = False
if mode == 'drop':
eventsErrors = []
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1DEBUG"
else:
tmpTableName = "TMP_IDS1DEBUG"
new_cur = connection.cursor()
if transactionKey:
eequery = """
select error_code,
sum(neventsinjob) as nevents,
sum(nerrorsinjob) as nerrors,
count(pandaid) as njobs,
LISTAGG(case when aff <= 10 then pandaid end,',' ) WITHIN group (order by error_code, aff) as pandaidlist
from (
select pandaid, error_code, neventsinjob, nerrorsinjob,
row_number() over (partition by error_code ORDER BY neventsinjob desc) as aff
from (
(select pandaid, error_code,
sum(DEF_MAX_EVENTID-DEF_MIN_EVENTID+1) as neventsinjob,
count(*) as nerrorsinjob
from ATLAS_PANDA.Jedi_events
where jeditaskid={} and ERROR_CODE is not null
group by error_code, pandaid ) e
join
(select ID from {} where TRANSACTIONKEY={} ) j
on e.pandaid = j.ID))
group by error_code""".format(jeditaskid, tmpTableName, transactionKey)
elif transactionKeyDJ:
eequery = """
select error_code,
sum(neventsinjob) as nevents,
sum(nerrorsinjob) as nerrors,
count(pandaid) as njobs,
LISTAGG(case when aff <= 10 then pandaid end,',' ) WITHIN group (order by error_code, aff) as pandaidlist
from (
select pandaid, error_code, neventsinjob, nerrorsinjob,
row_number() over (partition by error_code ORDER BY neventsinjob desc) as aff
from (
(select pandaid, error_code,
sum(DEF_MAX_EVENTID-DEF_MIN_EVENTID+1) as neventsinjob,
count(*) as nerrorsinjob
from ATLAS_PANDA.Jedi_events
where jeditaskid={} and ERROR_CODE is not null
and pandaid not in ( select ID from {} where TRANSACTIONKEY={} )
group by error_code, pandaid ) e
))
group by error_code""".format(jeditaskid, tmpTableName, transactionKeyDJ)
else:
data = {"error": "no failed events found"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
new_cur.execute(eequery)
eventsErrorsUP = dictfetchall(new_cur)
elif mode == 'nodrop':
# eventsErrors = JediEvents.objects.filter(**equery).values('error_code').annotate(njobs=Count('pandaid',distinct=True),nevents=Sum('def_max_eventid', field='def_max_eventid-def_min_eventid+1'))
new_cur = connection.cursor()
new_cur.execute(
"""select error_code, sum(neventsinjob) as nevents, sum(nerrorsinjob) as nerrors , count(pandaid) as njobs,
LISTAGG(case when aff <= 10 then pandaid end,',' ) WITHIN group (order by error_code, aff) as pandaidlist
from (select pandaid, error_code,
sum(DEF_MAX_EVENTID-DEF_MIN_EVENTID+1) as neventsinjob,
count(*) as nerrorsinjob,
row_number() over (partition by error_code ORDER BY sum(DEF_MAX_EVENTID-DEF_MIN_EVENTID+1) desc) as aff
from ATLAS_PANDA.Jedi_events
where jeditaskid=%i and ERROR_CODE is not null
group by error_code, pandaid)
group by error_code
""" % (jeditaskid)
)
eventsErrorsUP = dictfetchall(new_cur)
else:
data = {"error": "wrong mode specified"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
for error in eventsErrorsUP:
line = dict()
for key, value in error.items():
line[key.lower()] = value
eventsErrors.append(line)
error_codes = get_job_error_desc()
for eventserror in eventsErrors:
try:
eventserror['error_code'] = int(eventserror['error_code'])
if eventserror['error_code'] in error_codes['piloterrorcode'].keys():
eventserror['error_description'] = error_codes['piloterrorcode'][eventserror['error_code']]
else:
eventserror['error_description'] = ''
except:
eventserror['error_description'] = ''
if eventserror['pandaidlist'] and len(eventserror['pandaidlist']) > 0:
eventserror['pandaidlist'] = eventserror['pandaidlist'].split(',')
data = {'errors': eventsErrors}
response = render_to_response('eventsErrorSummary.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def getBrokerageLog(request):
iquery = {}
iquery['type'] = 'prod_brokerage'
iquery['name'] = 'panda.mon.jedi'
if 'taskid' in request.session['requestParams']:
iquery['message__startswith'] = request.session['requestParams']['taskid']
if 'jeditaskid' in request.session['requestParams']:
iquery['message__icontains'] = "jeditaskid=%s" % request.session['requestParams']['jeditaskid']
if 'hours' not in request.session['requestParams']:
hours = 72
else:
hours = int(request.session['requestParams']['hours'])
startdate = timezone.now() - timedelta(hours=hours)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
iquery['bintime__range'] = [startdate, enddate]
records = Pandalog.objects.filter(**iquery).order_by('bintime').reverse()[:request.session['JOB_LIMIT']].values()
sites = {}
for record in records:
message = records['message']
print (message)
def taskprofileplot(request):
jeditaskid = 0
if 'jeditaskid' in request.GET: jeditaskid = int(request.GET['jeditaskid'])
image = None
if jeditaskid != 0:
dp = TaskProgressPlot()
image = dp.get_task_profile(taskid=jeditaskid)
if image is not None:
return HttpResponse(image, content_type="image/png")
else:
return HttpResponse('')
# response = HttpResponse(content_type="image/jpeg")
# red.save(response, "JPEG")
# return response
def taskESprofileplot(request):
jeditaskid = 0
if 'jeditaskid' in request.GET: jeditaskid = int(request.GET['jeditaskid'])
image = None
if jeditaskid != 0:
dp = TaskProgressPlot()
image = dp.get_es_task_profile(taskid=jeditaskid)
if image is not None:
return HttpResponse(image, content_type="image/png")
else:
return HttpResponse('')
# response = HttpResponse(content_type="image/jpeg")
# red.save(response, "JPEG")
# return response
@login_customrequired
def taskProfile(request, jeditaskid=0):
"""A wrapper page for task profile plot"""
valid, response = initRequest(request)
if not valid:
return response
try:
jeditaskid = int(jeditaskid)
except ValueError:
msg = 'Provided jeditaskid: {} is not valid, it must be numerical'.format(jeditaskid)
_logger.exception(msg)
response = HttpResponse(json.dumps(msg), status=400)
if jeditaskid > 0:
task_profile = TaskProgressPlot()
task_profile_start = task_profile.get_task_start(taskid=jeditaskid)
if 'starttime' in task_profile_start:
request.session['viewParams']['selection'] = ', started at ' + task_profile_start['starttime'].strftime(defaultDatetimeFormat)
else:
msg = 'A task with provided jeditaskid does not exist'.format(jeditaskid)
_logger.exception(msg)
response = HttpResponse(json.dumps(msg), status=400)
else:
msg = 'Not valid jeditaskid provided: {}'.format(jeditaskid)
_logger.exception(msg)
response = HttpResponse(json.dumps(msg), status=400)
data = {
'request': request,
'requestParams': request.session['requestParams'],
'viewParams': request.session['viewParams'],
'jeditaskid': jeditaskid,
}
response = render_to_response('taskProgressMonitor.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
# @login_customrequired
@never_cache
def taskProfileData(request, jeditaskid=0):
"""A view that returns data for task profile plot"""
valid, response = initRequest(request)
if not valid:
return response
try:
jeditaskid = int(jeditaskid)
except ValueError:
msg = 'Provided jeditaskid: {} is not valid, it must be numerical'.format(jeditaskid)
_logger.exception(msg)
response = HttpResponse(json.dumps(msg), status=400)
if 'jobtype' in request.session['requestParams'] and request.session['requestParams']['jobtype']:
request_job_types = request.session['requestParams']['jobtype'].split(',')
else:
request_job_types = None
if 'jobstatus' in request.session['requestParams'] and request.session['requestParams']['jobstatus']:
request_job_states = request.session['requestParams']['jobstatus'].split(',')
else:
request_job_states = None
if 'progressunit' in request.session['requestParams'] and request.session['requestParams']['progressunit']:
request_progress_unit = request.session['requestParams']['progressunit']
else:
request_progress_unit = 'jobs'
# get raw profile data
if jeditaskid > 0:
task_profile = TaskProgressPlot()
task_profile_dict = task_profile.get_raw_task_profile_full(taskid=jeditaskid)
else:
msg = 'Not valid jeditaskid provided: {}'.format(jeditaskid)
_logger.exception(msg)
response = HttpResponse(json.dumps(msg), status=400)
# filter raw data corresponding to request params
if request_job_types is not None and len(request_job_types) > 0:
for jt, values in task_profile_dict.items():
if jt not in request_job_types:
task_profile_dict[jt] = []
if request_job_states is not None and len(request_job_states) > 0:
for jt, values in task_profile_dict.items():
temp = []
for v in values:
if v['jobstatus'] in request_job_states:
temp.append(v)
task_profile_dict[jt] = temp
# convert raw data to format acceptable by chart.js library
job_time_names = ['end', 'start', 'creation']
job_types = ['build', 'run', 'merge']
job_states = ['finished', 'failed', 'closed', 'cancelled']
colors = {
'creation': {'finished': 'RGBA(162,198,110,1)', 'failed': 'RGBA(255,176,176,1)',
'closed': 'RGBA(214,214,214,1)', 'cancelled': 'RGBA(255,227,177,1)'},
'start': {'finished': 'RGBA(70,181,117,0.8)', 'failed': 'RGBA(235,0,0,0.8)',
'closed': 'RGBA(100,100,100,0.8)', 'cancelled': 'RGBA(255,165,0,0.8)'},
'end': {'finished': 'RGBA(2,115,0,0.8)', 'failed': 'RGBA(137,0,0,0.8)',
'closed': 'RGBA(0,0,0,0.8)', 'cancelled': 'RGBA(157,102,0,0.8)'},
}
markers = {'build': 'triangle', 'run': 'circle', 'merge': 'crossRot'}
order_mpx = {
'creation': 1,
'start': 2,
'end': 3,
'finished': 7,
'failed': 6,
'closed': 5,
'cancelled': 4,
}
order_dict = {}
for jtn in job_time_names:
for js in job_states:
order_dict[jtn+'_'+js] = order_mpx[js] * order_mpx[jtn]
task_profile_data_dict = {}
for jt in job_types:
if len(task_profile_dict[jt]) > 0:
for js in list(set(job_states) & set([r['jobstatus'] for r in task_profile_dict[jt]])):
for jtmn in job_time_names:
task_profile_data_dict['_'.join((jtmn, js, jt))] = {
'name': '_'.join((jtmn, js, jt)),
'label': jtmn.capitalize() + ' time of a ' + js + ' ' + jt + ' job',
'pointRadius': round(1 + 3.0 * math.exp(-0.0004*len(task_profile_dict[jt]))),
'backgroundColor': colors[jtmn][js],
'borderColor': colors[jtmn][js],
'pointStyle': markers[jt],
'data': [],
}
for jt in job_types:
if jt in task_profile_dict:
rdata = task_profile_dict[jt]
for r in rdata:
for jtn in job_time_names:
task_profile_data_dict['_'.join((jtn, r['jobstatus'], jt))]['data'].append({
't': r[jtn].strftime(defaultDatetimeFormat),
'y': r['indx'] if request_progress_unit == 'jobs' else r['nevents'],
'label': r['pandaid'],
})
# deleting point groups if data is empty
group_to_remove = []
for group in task_profile_data_dict:
if len(task_profile_data_dict[group]['data']) == 0:
group_to_remove.append(group)
for group in group_to_remove:
try:
del task_profile_data_dict[group]
except:
_logger.info('failed to remove key from dict')
# dict -> list
task_profile_data = [v for k, v in task_profile_data_dict.items()]
data = {'plotData': task_profile_data, 'error': ''}
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
@login_customrequired
def userProfile(request, username=""):
"""A wrapper page for task profile plot"""
valid, response = initRequest(request)
if not valid:
return response
try:
username = str(username)
except ValueError:
msg = 'Provided username: {} is not valid, it must be string'.format(username)
_logger.exception(msg)
response = HttpResponse(json.dumps(msg), status=400)
return response
if len(username) > 0:
query = setupView(request, hours=24 * 7, querytype='task', wildCardExt=False)
query['username__icontains'] = username.strip()
tasks = JediTasks.objects.filter(**query).values('jeditaskid')
if len(list(tasks)) > 0:
msg = 'The username exist: {}'.format(username)
else:
msg = 'The username do not exist or no tasks found: {}'.format(username)
response = HttpResponse(json.dumps(msg), status=400)
return response
if query and 'modificationtime__castdate__range' in query:
request.session['timerange'] = query['modificationtime__castdate__range']
else:
msg = 'Not valid username provided: {}'.format(username)
_logger.exception(msg)
response = HttpResponse(json.dumps(msg), status=400)
return response
data = {
'request': request,
'requestParams': request.session['requestParams'],
'viewParams': request.session['viewParams'],
'username': username,
'timerange': request.session['timerange'],
}
response = render_to_response('userProfile.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
@never_cache
def userProfileData(request):
"""A view that returns data for task profile plot"""
valid, response = initRequest(request)
if not valid:
return response
# Here we try to get cached data
data = getCacheEntry(request, "userProfileData", isData=True)
data = None
if data is not None:
data = json.loads(data)
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
if 'username' in request.session['requestParams'] and request.session['requestParams']['username']:
username = str(request.session['requestParams']['username'])
else:
msg = 'No username provided: {} is not valid, it must be string'
_logger.warning(msg)
response = HttpResponse(json.dumps(msg), status=400)
return response
if 'jobtype' in request.session['requestParams'] and request.session['requestParams']['jobtype']:
request_job_types = request.session['requestParams']['jobtype'].split(',')
else:
request_job_types = None
if 'jobstatus' in request.session['requestParams'] and request.session['requestParams']['jobstatus']:
request_job_states = request.session['requestParams']['jobstatus'].split(',')
if 'active' in request.session['requestParams']['jobstatus']:
# replace active with list of real job states
request.session['requestParams']['jobstatus'] = request.session['requestParams']['jobstatus'].replace(
'active',
','.join(list(set(const.JOB_STATES) - set(const.JOB_STATES_FINAL))))
else:
request_job_states = None
# get raw profile data
if len(username) > 0:
query = setupView(request, hours=24 * 7, querytype='job', wildCardExt=False)
user_Dataprofile = UserProfilePlot(username)
user_Dataprofile_dict = user_Dataprofile.get_raw_data_profile_full(query)
else:
msg = 'Not valid username provided: {}'.format(username)
_logger.exception(msg)
response = HttpResponse(json.dumps(msg), status=400)
# filter raw data corresponding to request params
if request_job_types is not None and len(request_job_types) > 0:
for jt, values in user_Dataprofile_dict.items():
if jt not in request_job_types:
user_Dataprofile_dict[jt] = []
if request_job_states is not None and len(request_job_states) > 0:
for jt, values in user_Dataprofile_dict.items():
temp = []
for v in values:
if v['jobstatus'] in request_job_states:
temp.append(v)
user_Dataprofile_dict[jt] = temp
# convert raw data to format acceptable by chart.js library
job_time_names = ['end', 'start', 'creation']
job_types = ['build', 'run', 'merge']
job_states = ['active', 'finished', 'failed', 'closed', 'cancelled']
colors = {
'creation': {'active': 'RGBA(0,169,255,0.75)', 'finished': 'RGBA(162,198,110,0.75)', 'failed': 'RGBA(255,176,176,0.75)',
'closed': 'RGBA(214,214,214,0.75)', 'cancelled': 'RGBA(255,227,177,0.75)'},
'start': {'active': 'RGBA(0,85,183,0.75)', 'finished': 'RGBA(70,181,117,0.8)', 'failed': 'RGBA(235,0,0,0.75)',
'closed': 'RGBA(100,100,100,0.75)', 'cancelled': 'RGBA(255,165,0,0.75)'},
'end': {'active': 'RGBA(0,0,141,0.75)', 'finished': 'RGBA(0,100,0,0.75)', 'failed': 'RGBA(137,0,0,0.75)',
'closed': 'RGBA(0,0,0,0.75)', 'cancelled': 'RGBA(157,102,0,0.75)'},
}
markers = {'build': 'triangle', 'run': 'circle', 'merge': 'crossRot'}
order_mpx = {
'creation': 1,
'start': 2,
'end': 3,
'finished': 4,
'failed': 3,
'closed': 2,
'cancelled': 1,
'active': 5,
}
order_dict = {}
for jtn in job_time_names:
for js in job_states:
order_dict[jtn+'_'+js] = order_mpx[js] * order_mpx[jtn]
user_Dataprofile_data_dict = {}
for jt in job_types:
if len(user_Dataprofile_dict[jt]) > 0:
for js in list(set(job_states) & set([r['jobstatus'] for r in user_Dataprofile_dict[jt]])):
for jtmn in job_time_names:
user_Dataprofile_data_dict['_'.join((jtmn, js, jt))] = {
'name': '_'.join((jtmn, js, jt)),
'label': jtmn.capitalize() + ' time of a ' + js + ' ' + jt + ' job',
'pointRadius': round(1 + 4.0 * math.exp(-0.0004*len(user_Dataprofile_dict[jt]))),
'backgroundColor': colors[jtmn][js],
'borderColor': colors[jtmn][js],
'pointStyle': markers[jt],
'data': [],
}
for jt in job_types:
if jt in user_Dataprofile_dict:
rdata = user_Dataprofile_dict[jt]
for r in rdata:
for jtn in job_time_names:
if jtn in r and r[jtn] is not None:
user_Dataprofile_data_dict['_'.join((jtn, r['jobstatus'], jt))]['data'].append({
't': r[jtn].strftime(defaultDatetimeFormat),
'y': r['indx'],
'label': r['pandaid'],
})
# deleting point groups if data is empty
group_to_remove = []
for group in user_Dataprofile_data_dict:
if len(user_Dataprofile_data_dict[group]['data']) == 0:
group_to_remove.append(group)
for group in group_to_remove:
try:
del user_Dataprofile_data_dict[group]
except:
_logger.info('failed to remove key from dict')
# dict -> list
user_Dataprofile_data = [v for k, v in user_Dataprofile_data_dict.items()]
data = {'plotData': user_Dataprofile_data, 'error': ''}
setCacheEntry(request, "userProfileData", json.dumps(data, cls=DateEncoder), 60 * 30, isData=True)
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
@login_customrequired
def taskInfo(request, jeditaskid=0):
try:
jeditaskid = int(jeditaskid)
except:
jeditaskid = re.findall("\d+", jeditaskid)
jdtstr = ""
for jdt in jeditaskid:
jdtstr = jdtstr+str(jdt)
return redirect('/task/'+jdtstr)
valid, response = initRequest(request)
if not valid:
return response
# return json for dataTables if dt in request params
if 'dt' in request.session['requestParams'] and 'tkiec' in request.session['requestParams']:
tkiec = request.session['requestParams']['tkiec']
data = getCacheEntry(request, tkiec, isData=True)
return HttpResponse(data, content_type='application/json')
# Here we try to get cached data. We get any cached data is available
data = getCacheEntry(request, "taskInfo", skipCentralRefresh=True)
# temporarily turn off caching
# data = None
if data is not None:
data = json.loads(data)
if data is not None:
doRefresh = False
# check the build date of cached data, since data structure changed on 2021-03-22 and
# we need to refresh cached data for ended tasks which we store for 1 month
if 'built' in data and data['built'] is not None:
try:
builtDate = datetime.strptime('2021-'+data['built'], defaultDatetimeFormat)
if builtDate < datetime.strptime('2021-03-22 14:00:00', defaultDatetimeFormat):
doRefresh = True
except:
doRefresh = True
# We still want to refresh tasks if request came from central crawler and task not in the frozen state
if (('REMOTE_ADDR' in request.META) and (request.META['REMOTE_ADDR'] in notcachedRemoteAddress) and
data['task'] and data['task']['status'] not in ['broken', 'aborted']):
doRefresh = True
# we check here whether task status didn't changed for both (user or crawler request)
if data['task'] and data['task']['status'] and data['task']['status'] in ['done', 'finished', 'failed']:
if 'jeditaskid' in request.session['requestParams']: jeditaskid = int(
request.session['requestParams']['jeditaskid'])
if jeditaskid != 0:
query = {'jeditaskid': jeditaskid}
values = ['status', 'superstatus', 'modificationtime']
tasks = JediTasks.objects.filter(**query).values(*values)[:1]
if len(tasks) > 0:
task = tasks[0]
if (task['status'] == data['task']['status'] and task['superstatus'] == data['task']['superstatus'] and
task['modificationtime'].strftime(defaultDatetimeFormat) == data['task']['modificationtime']):
doRefresh = False
else:
doRefresh = True
else:
doRefresh = True
# doRefresh = True
if not doRefresh:
data['request'] = request
if data['eventservice']:
if 'version' not in request.session['requestParams'] or (
'version' in request.session['requestParams'] and request.session['requestParams']['version'] != 'old'):
response = render_to_response('taskInfoESNew.html', data, content_type='text/html')
else:
response = render_to_response('taskInfoES.html', data, content_type='text/html')
else:
response = render_to_response('taskInfo.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
if 'taskname' in request.session['requestParams'] and request.session['requestParams']['taskname'].find('*') >= 0:
return taskList(request)
setupView(request, hours=365 * 24, limit=999999999, querytype='task')
tasks = []
warning = {}
if 'jeditaskid' in request.session['requestParams']:
jeditaskid = int(request.session['requestParams']['jeditaskid'])
mode = 'drop'
if 'mode' in request.session['requestParams']:
mode = request.session['requestParams']['mode']
# if no jeditaskid provided, try to find by task name
if jeditaskid < 1:
if 'taskname' in request.session['requestParams']:
querybyname = {'taskname': request.session['requestParams']['taskname']}
tasks.extend(JediTasks.objects.filter(**querybyname).values())
if len(tasks) > 0:
jeditaskid = tasks[0]['jeditaskid']
else:
return redirect('/tasks/')
# getting task info
taskrec = None
query = {'jeditaskid': jeditaskid}
extra = '(1=1)'
tasks.extend(JediTasks.objects.filter(**query).values())
tasks = cleanTaskList(tasks, add_datasets_info=False)
try:
taskrec = tasks[0]
except IndexError:
_logger.exception('No task with jeditaskid={} found'.format(jeditaskid))
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'columns': None,
}
return render_to_response('taskInfo.html', data, content_type='text/html')
eventservice = False
if 'eventservice' in taskrec and (taskrec['eventservice'] == 1 or taskrec['eventservice'] == 'eventservice'):
eventservice = True
mode = 'nodrop'
# nodrop only for tasks older than 2 years
if get_task_timewindow(taskrec, format_out='datetime')[0] <= datetime.now() - timedelta(days=365*3):
warning['dropmode'] = """The drop mode is unavailable since the data of job retries was cleaned up.
The data shown on the page is in nodrop mode."""
mode = 'nodrop'
warning['archive'] = "The jobs data is moved to the archive, so the links to jobs page is unavailable"
# iDDS section
task_type = checkIfIddsTask(taskrec)
idds_info = None
if task_type == 'hpo':
mode = 'nodrop'
idds_info = {'task_type': 'hpo'}
else:
idds_info = {'task_type': 'idds'}
# prepare ordered list of task params
columns = []
for k, val in taskrec.items():
if is_timestamp(k):
try:
val = taskrec[k].strftime(defaultDatetimeFormat)
except:
val = str(taskrec[k])
if val is None:
val = '-'
# do not add params with empty value
continue
pair = {'name': k, 'value': val}
columns.append(pair)
columns = sorted(columns, key=lambda x: x['name'].lower())
# get task params
taskparams = get_task_params(jeditaskid)
_logger.info('Got task info: {}'.format(time.time() - request.session['req_init_time']))
# load logtxt
logtxt = None
if taskrec and taskrec['errordialog']:
mat = re.match('^.*"([^"]+)"', taskrec['errordialog'])
if mat:
errurl = mat.group(1)
cmd = "curl -s -f --compressed '{}'".format(errurl)
logout = subprocess.getoutput(cmd)
if len(logout) > 0:
loglist = (logout.splitlines())[::-1]
logtxt = '\n'.join(loglist)
_logger.info("Loaded error log using '{}': {}".format(cmd, time.time() - request.session['req_init_time']))
# get datasets list and containers
dsets, dsinfo = datasets_for_task(jeditaskid)
if taskrec:
taskrec['dsinfo'] = dsinfo
taskrec['totev'] = dsinfo['neventsTot']
taskrec['totevproc'] = dsinfo['neventsUsedTot']
taskrec['pctfinished'] = (100 * taskrec['totevproc'] / taskrec['totev']) if (taskrec['totev'] > 0) else ''
taskrec['totevhs06'] = round(dsinfo['neventsTot'] * convert_hs06(taskrec['cputime'], taskrec['cputimeunit'])) if (taskrec['cputime'] and taskrec['cputimeunit'] and dsinfo['neventsTot'] > 0) else None
taskrec['totevoutput'] = dsinfo['neventsOutput'] if 'neventsOutput' in dsinfo else 0
# get input and output containers
inctrs = []
outctrs = []
if 'dsForIN' in taskparams and taskparams['dsForIN'] and isinstance(taskparams['dsForIN'], str):
inctrs = [{
'containername': cin,
'nfiles': 0,
'nfilesfinished': 0,
'nfilesfailed': 0, 'pct': 0
} for cin in taskparams['dsForIN'].split(',')]
# fill the list of input containers with progress info
for inc in inctrs:
for ds in dsets:
if ds['containername'] == inc['containername']:
inc['nfiles'] += ds['nfiles'] if ds['nfiles'] else 0
inc['nfilesfinished'] += ds['nfilesfinished'] if ds['nfilesfinished'] else 0
inc['nfilesfailed'] += ds['nfilesfailed'] if ds['nfilesfailed'] else 0
inc['pct'] = math.floor(100.0*inc['nfilesfinished']/inc['nfiles']) if ds['nfiles'] and ds['nfiles'] > 0 else inc['pct']
outctrs.extend(list(set([ds['containername'] for ds in dsets if ds['type'] in ('output', 'log') and ds['containername']])))
# get dataset locality
if DEPLOYMENT == 'ORACLE_ATLAS':
dataset_locality = get_dataset_locality(jeditaskid)
else:
dataset_locality = {}
for ds in dsets:
if jeditaskid in dataset_locality and ds['datasetid'] in dataset_locality[jeditaskid]:
ds['rse'] = ', '.join([item['rse'] for item in dataset_locality[jeditaskid][ds['datasetid']]])
_logger.info("Loading datasets info: {}".format(time.time() - request.session['req_init_time']))
# getBrokerageLog(request)
# get sum of hs06sec grouped by status
# creating a jquery with timewindow
jquery = copy.deepcopy(query)
jquery['modificationtime__castdate__range'] = get_task_timewindow(taskrec, format_out='str')
if DEPLOYMENT != 'POSTGRES':
hs06sSum = get_hs06s_summary_for_task(jquery)
else:
hs06sSum = {}
_logger.info("Loaded sum of hs06sec grouped by status: {}".format(time.time() - request.session['req_init_time']))
eventssummary = []
if eventservice:
# insert dropped jobs to temporary table if drop mode
transactionKeyDJ = -1
if mode == 'drop':
extra, transactionKeyDJ = insert_dropped_jobs_to_tmp_table(query, extra)
_logger.info("Inserting dropped jobs: {}".format(time.time() - request.session['req_init_time']))
_logger.info('tk of dropped jobs: {}'.format(transactionKeyDJ))
# getting events summary for a ES task
taskrec['totevproc_evst'] = 0
equery = copy.deepcopy(query)
# set timerange for better use of partitioned JOBSARCHIVED
equery['creationdate__range'] = get_task_timewindow(taskrec, format_out='str')
eventssummary = event_summary_for_task(mode, equery, tk_dj=transactionKeyDJ)
for entry in eventssummary:
if 'count' in entry and 'totev' in taskrec and taskrec['totev'] > 0:
entry['pct'] = round(entry['count'] * 100. / taskrec['totev'], 2)
else:
entry['pct'] = 0
status = entry.get("statusname", "-")
if status in ['finished', 'done', 'merged']:
taskrec['totevproc_evst'] += entry.get("count", 0)
# update task dict in data with more accurate events data
if taskrec:
taskrec['pcttotevproc_evst'] = round(100. * taskrec['totevproc_evst'] / taskrec['totev'], 2) if taskrec['totev'] > 0 else ''
taskrec['pctfinished'] = round(100. * taskrec['totevproc'] / taskrec['totev'], 2) if taskrec['totev'] > 0 else ''
_logger.info("Events states summary: {}".format(time.time() - request.session['req_init_time']))
# get corecount and normalized corecount values
ccquery = {
'jeditaskid': jeditaskid,
'jobstatus': 'running',
}
accsum = Jobsactive4.objects.filter(**ccquery).aggregate(accsum=Sum('actualcorecount'))
naccsum = Jobsactive4.objects.filter(**ccquery).aggregate(
naccsum=Sum(F('actualcorecount') * F('hs06') / F('corecount') / Value(10), output_field=FloatField()))
taskrec['accsum'] = accsum['accsum'] if 'accsum' in accsum else 0
taskrec['naccsum'] = naccsum['naccsum'] if 'naccsum' in naccsum else 0
_logger.info("Loaded corecount and normalized corecount: {}".format(time.time() - request.session['req_init_time']))
# update taskrec dict
if taskrec:
if 'tasktype' in taskrec and taskrec['tasktype']:
tmcj_list = get_top_memory_consumers(taskrec)
if len(tmcj_list) > 0 and len([True for job in tmcj_list if job['maxrssratio'] >= 1]) > 0:
warning['memoryleaksuspicion'] = {}
warning['memoryleaksuspicion']['message'] = 'Some jobs in this task consumed a lot of memory. '
warning['memoryleaksuspicion']['message'] += 'We suspect there might be memory leaks or some misconfiguration.'
warning['memoryleaksuspicion']['jobs'] = tmcj_list
if task_type is not None and idds_info is not None:
for itn in idds_info:
if itn in idds_info and isinstance(idds_info[itn], datetime):
idds_info[itn] = idds_info[itn].strftime(defaultDatetimeFormat)
taskrec['idds_info'] = idds_info
if 'ticketsystemtype' in taskrec and taskrec['ticketsystemtype'] == '' and taskparams is not None:
if 'ticketID' in taskparams:
taskrec['ticketid'] = taskparams['ticketID']
if 'ticketSystemType' in taskparams:
taskrec['ticketsystemtype'] = taskparams['ticketSystemType']
if 'creationdate' in taskrec:
taskrec['kibanatimefrom'] = taskrec['creationdate'].strftime("%Y-%m-%dT%H:%M:%SZ")
else:
taskrec['kibanatimefrom'] = None
if taskrec['status'] in ['cancelled', 'failed', 'broken', 'aborted', 'finished', 'done']:
taskrec['kibanatimeto'] = taskrec['modificationtime'].strftime("%Y-%m-%dT%H:%M:%SZ")
else:
taskrec['kibanatimeto'] = datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
if len(set([ds['storagetoken'] for ds in dsets if 'storagetoken' in ds and ds['storagetoken']])) > 0:
taskrec['destination'] = list(set([ds['storagetoken'] for ds in dsets if ds['storagetoken']]))[0]
elif taskrec['cloud'] == 'WORLD':
taskrec['destination'] = taskrec['nucleus']
if hs06sSum:
taskrec['totevprochs06'] = int(hs06sSum['finished']) if 'finished' in hs06sSum else None
taskrec['failedevprochs06'] = int(hs06sSum['failed']) if 'failed' in hs06sSum else None
taskrec['currenttotevhs06'] = int(hs06sSum['total']) if 'total' in hs06sSum else None
taskrec['brokerage'] = 'prod_brokerage' if taskrec['tasktype'] == 'prod' else 'analy_brokerage'
if DEPLOYMENT == 'ORACLE_ATLAS':
taskrec['slice'] = get_prod_slice_by_taskid(jeditaskid) if taskrec['tasktype'] == 'prod' else None
# datetime type -> str in order to avoid encoding errors in template
datetime_task_param_names = ['creationdate', 'modificationtime', 'starttime', 'statechangetime', 'ttcrequested']
datetime_dataset_param_names = ['statechecktime', 'creationtime', 'modificationtime']
if taskrec:
for dtp in datetime_task_param_names:
if taskrec[dtp]:
taskrec[dtp] = taskrec[dtp].strftime(defaultDatetimeFormat)
for dset in dsets:
for dsp, dspv in dset.items():
if dsp in datetime_dataset_param_names and dspv is not None:
dset[dsp] = dset[dsp].strftime(defaultDatetimeFormat)
if dspv is None:
dset[dsp] = ''
try:
del request.session['TFIRST']
del request.session['TLAST']
except:
_logger.exception('Failed to delete TFIRST and TLAST from request session')
if is_json_request(request):
del tasks
del columns
data = {
'task': taskrec,
'taskparams': taskparams,
'datasets': dsets,
}
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
else:
taskparams, jobparams = humanize_task_params(taskparams)
furl = request.get_full_path()
nomodeurl = extensibleURL(request, removeParam(furl, 'mode'))
# decide on data caching time [seconds]
cacheexpiration = 60 * 20 # second/minute * minutes
if taskrec and 'status' in taskrec and taskrec['status'] in const.TASK_STATES_FINAL and (
'dsinfo' in taskrec and 'nfiles' in taskrec['dsinfo'] and isinstance(taskrec['dsinfo']['nfiles'], int) and taskrec['dsinfo']['nfiles'] > 10000):
cacheexpiration = 3600 * 24 * 31 # we store such data a month
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'furl': furl,
'nomodeurl': nomodeurl,
'mode': mode,
'task': taskrec,
'taskparams': taskparams,
'jobparams': jobparams,
'columns': columns,
'jeditaskid': jeditaskid,
'logtxt': logtxt,
'datasets': dsets,
'inctrs': inctrs,
'outctrs': outctrs,
'vomode': VOMODE,
'eventservice': eventservice,
'built': datetime.now().strftime("%m-%d %H:%M:%S"),
'warning': warning,
}
data.update(getContextVariables(request))
if eventservice:
data['eventssummary'] = eventssummary
if 'version' not in request.session['requestParams'] or (
'version' in request.session['requestParams'] and request.session['requestParams']['version'] != 'old'):
# prepare input-centric ES taskInfo
_logger.info("This is input-centric ES taskInfo request")
# get input files state summary
transactionKeyIEC = -1
ifs_summary = []
inputfiles_list, ifs_summary, ifs_tk = input_summary_for_task(taskrec, dsets)
# Putting list of inputs IDs to tmp table for connection with jobList
for tk, ids_list in ifs_tk.items():
tk = insert_to_temp_table(ids_list, tk)
# Putting list of inputs to cache separately for dataTables plugin
transactionKeyIEC = random.randrange(100000000)
setCacheEntry(request, transactionKeyIEC, json.dumps(inputfiles_list, cls=DateTimeEncoder), 60 * 30, isData=True)
_logger.info("Inputs states summary: {}".format(time.time() - request.session['req_init_time']))
# get lighted job summary
jobsummarylight, jobsummarylightsplitted = job_summary_for_task_light(taskrec)
_logger.info("Loaded lighted job summary: {}".format(time.time() - request.session['req_init_time']))
data['iecsummary'] = ifs_summary
data['tkiec'] = transactionKeyIEC
data['jobsummarylight'] = jobsummarylight
data['jobsummarylightsplitted'] = jobsummarylightsplitted
data['tkdj'] = transactionKeyDJ
setCacheEntry(request, "taskInfo", json.dumps(data, cls=DateEncoder), cacheexpiration)
response = render_to_response('taskInfoESNew.html', data, content_type='text/html')
else:
_logger.info("This old style ES taskInfo request")
# getting job summary and plots
plotsDict, jobsummary, scouts, metrics = job_summary_for_task(
jquery, '(1=1)',
mode=mode,
task_archive_flag=get_task_time_archive_flag(get_task_timewindow(taskrec, format_out='datatime')))
data['jobsummary'] = jobsummary
data['plotsDict'] = plotsDict
data['jobscoutids'] = scouts
setCacheEntry(request, "taskInfo", json.dumps(data, cls=DateEncoder), cacheexpiration)
response = render_to_response('taskInfoES.html', data, content_type='text/html')
else:
_logger.info("This is ordinary non-ES task")
# getting job summary and plots
plotsDict, jobsummary, scouts, metrics = job_summary_for_task(
jquery, '(1=1)',
mode=mode,
task_archive_flag=get_task_time_archive_flag(get_task_timewindow(taskrec, format_out='datatime')))
data['jobsummary'] = jobsummary
data['plotsDict'] = plotsDict
data['jobscoutids'] = scouts
data['task'].update(metrics)
setCacheEntry(request, "taskInfo", json.dumps(data, cls=DateEncoder), cacheexpiration)
response = render_to_response('taskInfo.html', data, content_type='text/html')
_logger.info('Rendered template: {}'.format(time.time() - request.session['req_init_time']))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
@login_customrequired
def taskInfoNew(request, jeditaskid=0):
try:
jeditaskid = int(jeditaskid)
except:
jeditaskid = re.findall("\d+", jeditaskid)
jdtstr = ""
for jdt in jeditaskid:
jdtstr = jdtstr+str(jdt)
return redirect('/task/'+jdtstr)
valid, response = initRequest(request)
if not valid:
return response
if 'jeditaskid' in request.session['requestParams']:
jeditaskid = int(request.session['requestParams']['jeditaskid'])
if jeditaskid < 0:
return redirect('/tasks/')
if 'taskname' in request.session['requestParams'] and request.session['requestParams']['taskname'].find('*') >= 0:
return taskList(request)
# return json for dataTables if dt in request params
if 'dt' in request.session['requestParams'] and 'tkiec' in request.session['requestParams']:
tkiec = request.session['requestParams']['tkiec']
data = getCacheEntry(request, tkiec, isData=True)
return HttpResponse(data, content_type='application/json')
# Here we try to get cached data. We get any cached data is available
data = getCacheEntry(request, "taskInfoNew", skipCentralRefresh=True)
# data = None
if data is not None:
data = json.loads(data)
# Temporary protection
if 'built' in data:
### TO DO it should be fixed
try:
builtDate = datetime.strptime('2021-'+data['built'], defaultDatetimeFormat)
if builtDate < datetime.strptime('2020-09-28 11:00:00', defaultDatetimeFormat):
data = None
setCacheEntry(request, "taskInfoNew", json.dumps(data, cls=DateEncoder), 1)
except:
pass
doRefresh = False
# We still want to refresh tasks if request came from central crawler and task not in the frozen state
if (('REMOTE_ADDR' in request.META) and (request.META['REMOTE_ADDR'] in notcachedRemoteAddress) and
data['task'] and data['task']['status'] not in ['broken', 'aborted']):
doRefresh = True
# we check here whether task status didn't changed for both (user or crawler request)
if data is not None and data['task'] and data['task']['status'] and data['task']['status'] in ['done', 'finished', 'failed']:
if 'jeditaskid' in request.session['requestParams']:
jeditaskid = int(request.session['requestParams']['jeditaskid'])
if jeditaskid != 0:
query = {'jeditaskid': jeditaskid}
values = ['status', 'superstatus', 'modificationtime']
tasks = JediTasks.objects.filter(**query).values(*values)[:1]
if len(tasks) > 0:
task = tasks[0]
if (task['status'] == data['task']['status'] and task['superstatus'] == data['task']['superstatus'] and
task['modificationtime'].strftime(defaultDatetimeFormat) == data['task']['modificationtime']):
doRefresh = False
else:
doRefresh = True
else:
doRefresh = True
# temp turning on refresh of all tasks to rewrite cache
# doRefresh = True
if not doRefresh:
data['request'] = request
if data['eventservice']:
response = render_to_response('taskInfoESNew.html', data, content_type='text/html')
else:
response = render_to_response('taskInfo.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
setupView(request, hours=365 * 24, limit=999999999, querytype='task')
mode = 'nodrop'
if 'mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'drop':
mode = 'drop'
eventservice = False
warning = {}
taskrec = None
query = {'jeditaskid': jeditaskid}
extra = '(1=1)'
tasks = JediTasks.objects.filter(**query).values()
try:
taskrec = tasks[0]
except IndexError:
_logger.exception('No task with jeditaskid={} found'.format(jeditaskid))
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'columns': None,
}
return render_to_response('taskInfoESNew.html', data, content_type='text/html')
_logger.info('Got task info: {}'.format(time.time() - request.session['req_init_time']))
if 'eventservice' in taskrec and taskrec['eventservice'] == 1:
eventservice = True
if not eventservice:
return redirect('/task/' + str(jeditaskid))
# prepare ordered list of task params
columns = []
for k, val in taskrec.items():
if is_timestamp(k):
try:
val = taskrec[k].strftime(defaultDatetimeFormat)
except:
val = str(taskrec[k])
if val is None:
val = '-'
# do not add params with empty value
continue
pair = {'name': k, 'value': val}
columns.append(pair)
columns = sorted(columns, key=lambda x: x['name'].lower())
# get task params
taskparams = get_task_params(jeditaskid)
# insert dropped jobs to temporary table if drop mode
transactionKeyDJ = -1
if mode == 'drop':
extra, transactionKeyDJ = insert_dropped_jobs_to_tmp_table(query, extra)
_logger.info("Inserting dropped jobs: {}".format(time.time() - request.session['req_init_time']))
_logger.info('tk of dropped jobs: {}'.format(transactionKeyDJ))
# get datasets info
dsets, dsinfo = datasets_for_task(jeditaskid)
if taskrec:
taskrec['dsinfo'] = dsinfo
taskrec['totev'] = dsinfo['neventsTot']
taskrec['totevproc'] = dsinfo['neventsUsedTot']
taskrec['totevhs06'] = dsinfo['neventsTot'] * taskrec['cputime'] if (taskrec['cputime'] is not None and dsinfo['neventsTot'] > 0) else None
taskrec['totevoutput'] = dsinfo['neventsOutput'] if 'neventsOutput' in dsinfo else 0
# get input and output containers
inctrs = []
outctrs = []
if taskparams and 'dsForIN' in taskparams:
inctrs = [taskparams['dsForIN'], ]
outctrs.extend(list(set([ds['containername'] for ds in dsets if ds['type'] in ('output', 'log') and ds['containername']])))
# get dataset locality
dataset_locality = get_dataset_locality(jeditaskid)
for ds in dsets:
if jeditaskid in dataset_locality and ds['datasetid'] in dataset_locality[jeditaskid]:
ds['rse'] = ', '.join([item['rse'] for item in dataset_locality[jeditaskid][ds['datasetid']]])
_logger.info("Loading datasets info: {}".format(time.time() - request.session['req_init_time']))
# get event state summary
neventsProcTot = 0
event_summary_list = []
if eventservice:
event_summary_list = event_summary_for_task(mode, query, tk_dj=transactionKeyDJ)
for entry in event_summary_list:
entry['pct'] = round(entry['count'] * 100. / taskrec['totev'], 2) if 'totev' in taskrec and taskrec['totev'] > 0 and 'count' in entry else 0
status = entry.get("statusname", "-")
if status in ['finished', 'done', 'merged']:
neventsProcTot += entry.get("count", 0)
_logger.info("Events states summary: {}".format(time.time() - request.session['req_init_time']))
if taskrec:
taskrec['totevproc_evst'] = neventsProcTot
taskrec['pcttotevproc_evst'] = round(100. * neventsProcTot / taskrec['totev'],2) if (taskrec['totev'] > 0) else ''
taskrec['pctfinished'] = round(100 * taskrec['totevproc'] / taskrec['totev'],2) if (taskrec['totev'] > 0) else ''
# get input files state summary
transactionKeyIEC = -1
ifs_summary = []
if eventservice:
# getting inputs states summary
inputfiles_list, ifs_summary, ifs_tk = input_summary_for_task(taskrec, dsets)
# Putting list of inputs IDs to tmp table for connection with jobList
for tk, ids_list in ifs_tk.items():
tk = insert_to_temp_table(ids_list, tk)
# Putting list of inputs to cache separately for dataTables plugin
transactionKeyIEC = random.randrange(100000000)
setCacheEntry(request, transactionKeyIEC, json.dumps(inputfiles_list, cls=DateTimeEncoder), 60 * 30, isData=True)
_logger.info("Inputs states summary: {}".format(time.time() - request.session['req_init_time']))
# get lighted job summary
jobsummarylight, jobsummarylightsplitted = job_summary_for_task_light(taskrec)
_logger.info("Loaded lighted job summary: {}".format(time.time() - request.session['req_init_time']))
# get sum of hs06sec grouped by status
# creating a jquery with timewindow
jquery = copy.deepcopy(query)
jquery['modificationtime__castdate__range'] = get_task_timewindow(taskrec, format_out='str')
hs06sSum = get_hs06s_summary_for_task(jquery)
_logger.info("Loaded sum of hs06sec grouped by status: {}".format(time.time() - request.session['req_init_time']))
# get corecount and normalized corecount values
ccquery = {}
ccquery['jeditaskid'] = jeditaskid
ccquery['jobstatus'] = 'running'
accsum = Jobsactive4.objects.filter(**ccquery).aggregate(accsum=Sum('actualcorecount'))
naccsum = Jobsactive4.objects.filter(**ccquery).aggregate(naccsum=Sum(F('actualcorecount')*F('hs06')/F('corecount')/Value(10), output_field=FloatField()))
_logger.info("Loaded corecount and normalized corecount: {}".format(time.time() - request.session['req_init_time']))
# load logtxt
logtxt = None
if taskrec and taskrec['errordialog']:
mat = re.match('^.*"([^"]+)"', taskrec['errordialog'])
if mat:
errurl = mat.group(1)
cmd = "curl -s -f --compressed '{}'".format(errurl)
logout = subprocess.getoutput(cmd)
if len(logout) > 0:
loglist = (logout.splitlines())[::-1]
logtxt = '\n'.join(loglist)
_logger.info("Loaded error log using '{}': {}".format(cmd, time.time() - request.session['req_init_time']))
# update taskrec dict
if taskrec:
if 'ticketsystemtype' in taskrec and taskrec['ticketsystemtype'] == '' and taskparams is not None:
if 'ticketID' in taskparams:
taskrec['ticketid'] = taskparams['ticketID']
if 'ticketSystemType' in taskparams:
taskrec['ticketsystemtype'] = taskparams['ticketSystemType']
if 'creationdate' in taskrec:
taskrec['kibanatimefrom'] = taskrec['creationdate'].strftime("%Y-%m-%dT%H:%M:%SZ")
else:
taskrec['kibanatimefrom'] = None
if taskrec['status'] in ['cancelled', 'failed', 'broken', 'aborted', 'finished', 'done']:
taskrec['kibanatimeto'] = taskrec['modificationtime'].strftime("%Y-%m-%dT%H:%M:%SZ")
else:
taskrec['kibanatimeto'] = datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
if len(set([ds['storagetoken'] for ds in dsets if 'storagetoken' in ds and ds['storagetoken']])) > 0:
taskrec['destination'] = list(set([ds['storagetoken'] for ds in dsets if ds['storagetoken']]))[0]
elif taskrec['cloud'] == 'WORLD':
taskrec['destination'] = taskrec['nucleus']
if hs06sSum:
taskrec['totevprochs06'] = int(hs06sSum['finished']) if 'finished' in hs06sSum else None
taskrec['failedevprochs06'] = int(hs06sSum['failed']) if 'failed' in hs06sSum else None
taskrec['currenttotevhs06'] = int(hs06sSum['total']) if 'total' in hs06sSum else None
taskrec['brokerage'] = 'prod_brokerage' if taskrec['tasktype'] == 'prod' else 'analy_brokerage'
taskrec['accsum'] = accsum['accsum'] if 'accsum' in accsum else 0
taskrec['naccsum'] = naccsum['naccsum'] if 'naccsum' in naccsum else 0
taskrec['slice'] = get_prod_slice_by_taskid(jeditaskid)
# datetime type -> str in order to avoid encoding cached on template
datetime_task_param_names = ['creationdate', 'modificationtime', 'starttime', 'statechangetime', 'ttcrequested']
datetime_dataset_param_names = ['statechecktime', 'creationtime', 'modificationtime']
if taskrec:
for dtp in datetime_task_param_names:
if taskrec[dtp]:
taskrec[dtp] = taskrec[dtp].strftime(defaultDatetimeFormat)
for dset in dsets:
for dsp, dspv in dset.items():
if dsp in datetime_dataset_param_names and dspv is not None:
dset[dsp] = dset[dsp].strftime(defaultDatetimeFormat)
if dspv is None:
dset[dsp] = ''
try:
del request.session['TFIRST']
del request.session['TLAST']
except:
_logger.exception('Failed to delete TFIRST and TLAST from request session')
if is_json_request(request):
del tasks
del columns
data = {
'task': taskrec,
'taskparams': taskparams,
'datasets': dsets,
}
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
else:
taskparams, jobparams = humanize_task_params(taskparams)
furl = request.get_full_path()
nomodeurl = extensibleURL(request, removeParam(furl, 'mode'))
data = {
'furl': furl,
'nomodeurl': nomodeurl,
'mode': mode,
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'task': taskrec,
'taskparams': taskparams,
'jobparams': jobparams,
'columns': columns,
'jobsummarylight': jobsummarylight,
'jobsummarylightsplitted': jobsummarylightsplitted,
'eventssummary': event_summary_list,
'jeditaskid': jeditaskid,
'logtxt': logtxt,
'datasets': dsets,
'inctrs': inctrs,
'outctrs': outctrs,
'vomode': VOMODE,
'eventservice': eventservice,
'tkdj': transactionKeyDJ,
'tkiec': transactionKeyIEC,
'iecsummary': ifs_summary,
'built': datetime.now().strftime("%m-%d %H:%M:%S"),
'warning': warning,
}
data.update(getContextVariables(request))
cacheexpiration = 60*20 # second/minute * minutes
if taskrec and 'status' in taskrec:
totaljobs = 0
for state in jobsummarylight:
totaljobs += state['count']
if taskrec['status'] in ['broken', 'aborted', 'done', 'finished', 'failed'] and totaljobs > 5000:
cacheexpiration = 3600*24*31 # we store such data a month
setCacheEntry(request, "taskInfoNew", json.dumps(data, cls=DateEncoder), cacheexpiration)
if eventservice:
response = render_to_response('taskInfoESNew.html', data, content_type='text/html')
else:
response = render_to_response('taskInfo.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def getEventsDetails(request, mode='drop', jeditaskid=0):
"""
A view for ES task Info page to get events details in different states
"""
valid, response = initRequest(request)
if not valid: return response
tmpTableName = 'ATLAS_PANDABIGMON.TMP_IDS1DEBUG'
if 'jeditaskid' in request.session['requestParams'] and request.session['requestParams']['jeditaskid']:
jeditaskid = request.session['requestParams']['jeditaskid']
try:
jeditaskid = int(jeditaskid)
except:
return HttpResponse(status=404)
extrastr = ''
if mode == 'drop':
if 'tkd' in request.session['requestParams'] and request.session['requestParams']['tkd']:
transactionKey = request.session['requestParams']['tkd']
extrastr += " AND pandaid not in ( select id from {0} where TRANSACTIONKEY = {1})".format(tmpTableName,
transactionKey)
else:
return HttpResponse(status=404)
sqlRequest = """
select /*+ INDEX_RS_ASC(e JEDI_EVENTS_PK) NO_INDEX_FFS(e JEDI_EVENTS_PK) */
j.computingsite, j.COMPUTINGELEMENT,e.objstore_id,e.status,count(e.status) as nevents
from atlas_panda.jedi_events e
join
(select computingsite, computingelement,pandaid from ATLAS_PANDA.JOBSARCHIVED4 where jeditaskid={} {}
UNION
select computingsite, computingelement,pandaid from ATLAS_PANDAARCH.JOBSARCHIVED where jeditaskid={} {}
) j
on (e.jeditaskid={} and e.pandaid=j.pandaid)
group by j.computingsite, j.COMPUTINGELEMENT, e.objstore_id, e.status""".format(jeditaskid, extrastr, jeditaskid, extrastr, jeditaskid)
cur = connection.cursor()
cur.execute(sqlRequest)
ossummary = cur.fetchall()
cur.close()
ossummarynames = ['computingsite', 'computingelement', 'objectstoreid', 'statusindex', 'nevents']
objectStoreDict = [dict(zip(ossummarynames, row)) for row in ossummary]
for row in objectStoreDict: row['statusname'] = eventservicestatelist[row['statusindex']]
return HttpResponse(json.dumps(objectStoreDict, cls=DateEncoder), content_type='application/json')
def taskchain(request):
valid, response = initRequest(request)
jeditaskid = -1
if 'jeditaskid' in request.session['requestParams']:
jeditaskid = int(request.session['requestParams']['jeditaskid'])
if jeditaskid == -1:
data = {"error": "no jeditaskid supplied"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
new_cur = connection.cursor()
taskChainSQL = "SELECT * FROM table(ATLAS_PANDABIGMON.GETTASKSCHAIN_TEST(%i))" % jeditaskid
new_cur.execute(taskChainSQL)
taskChain = new_cur.fetchall()
results = ["".join(map(str, r)) for r in taskChain]
ts = "".join(results)
data = {
'viewParams': request.session['viewParams'],
'taskChain': ts,
'jeditaskid': jeditaskid
}
response = render_to_response('taskchain.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def ganttTaskChain(request):
from django.db import connections
valid, response = initRequest(request)
jeditaskid = -1
if 'jeditaskid' in request.session['requestParams']:
jeditaskid = int(request.session['requestParams']['jeditaskid'])
if jeditaskid == -1:
data = {"error": "no jeditaskid supplied"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
new_cur = connections["deft_adcr"].cursor()
sql_request_str = chainsql.query.replace('%i', str(jeditaskid))
new_cur.execute(sql_request_str)
results = new_cur.fetchall()
results_list = ["".join(map(str, r)) for r in results]
results_str = results_list[0].replace("\n", "")
substr_end = results_str.index(">")
data = {
'viewParams': request.session['viewParams'],
'ganttTaskChain': results_str[substr_end+1:],
'jeditaskid': jeditaskid,
'request': request,
}
response = render_to_response('ganttTaskChain.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def getJobSummaryForTask(request, jeditaskid=-1):
valid, response = initRequest(request)
if not valid:
return response
try:
jeditaskid = int(jeditaskid)
except:
return HttpResponse(status=404)
if jeditaskid == -1:
return HttpResponse(status=404)
# possible values of infotype are jobssummary, plots, scouts. Provided type will be returned, other put in cache.
if 'infotype' in request.session['requestParams'] and request.session['requestParams']['infotype']:
infotype = request.session['requestParams']['infotype']
else:
return HttpResponse(status=404)
if 'es' in request.session['requestParams'] and request.session['requestParams']['es'] == 'True':
es = True
else:
es = False
if 'mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'drop':
mode = 'drop'
else:
mode = 'nodrop'
data = getCacheEntry(request, "jobSummaryForTask"+str(jeditaskid)+mode, isData=True)
data = None
if data is not None:
data = json.loads(data)
data['request'] = request
if infotype == 'jobsummary':
response = render_to_response('jobSummaryForTask.html', data, content_type='text/html')
elif infotype == 'scouts':
response = render_to_response('scoutsForTask.html', data, content_type='text/html')
elif infotype == 'plots':
response = HttpResponse(json.dumps(data['plotsDict'], cls=DateEncoder), content_type='application/json')
else:
response = HttpResponse(status=404)
return response
extra = '(1=1)'
query = {
'jeditaskid': jeditaskid,
}
if mode == 'drop':
start = time.time()
extra, transactionKeyDJ = insert_dropped_jobs_to_tmp_table(query, extra)
end = time.time()
_logger.info("Inserting dropped jobs: {} sec".format(end - start))
_logger.debug('tk of dropped jobs: {}'.format(transactionKeyDJ))
# pass mode='nodrop' as we already took dropping into account in extra query str
plotsDict, jobsummary, jobScoutIDs, metrics = job_summary_for_task(query, extra=extra, mode='nodrop')
alldata = {
'jeditaskid': jeditaskid,
'request': request,
'jobsummary': jobsummary,
'jobScoutIDs': jobScoutIDs,
'plotsDict': plotsDict,
}
setCacheEntry(request, 'jobSummaryForTask'+str(jeditaskid)+mode, json.dumps(alldata, cls=DateEncoder), 60 * 10, isData=True)
if infotype == 'jobsummary':
data = {
'jeditaskid': jeditaskid,
'mode': mode,
'jobsummary': jobsummary,
}
response = render_to_response('jobSummaryForTask.html', data, content_type='text/html')
elif infotype == 'scouts':
data = {
'jeditaskid': jeditaskid,
'jobscoutids': jobScoutIDs,
}
response = render_to_response('scoutsForTask.html', data, content_type='text/html')
elif infotype == 'plots':
response = HttpResponse(json.dumps(plotsDict, cls=DateEncoder), content_type='application/json')
else:
response = HttpResponse(status=204)
if response:
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def jobStateSummary(jobs):
global statelist
statecount = {}
for state in statelist:
statecount[state] = 0
for job in jobs:
statecount[job['jobstatus']] += 1
return statecount
def taskFlowDiagram(request, jeditaskid=-1):
"""
Prepare data for task flow chart
:param request:
:param jeditaskid:
:return:
"""
data = {'data': ''}
try:
jeditaskid = int(jeditaskid)
except:
jeditaskid = 0
_logger.exception('jeditaskid={} must be int'.format(jeditaskid))
if jeditaskid < 0:
data['error'] = 'No jeditaskid provided'
elif jeditaskid == 0:
data['error'] = 'Not valid jeditaskid provided'
else:
data['data'] = get_task_flow_data(jeditaskid)
response = HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
return response
def getTaskName(tasktype, taskid):
taskname = ''
if tasktype == 'taskid':
taskname = ''
elif tasktype == 'jeditaskid' and taskid and taskid != 'None':
tasks = JediTasks.objects.filter(jeditaskid=taskid).values('taskname')
if len(tasks) > 0:
taskname = tasks[0]['taskname']
return taskname
tcount = {}
lock = Lock()
def totalCount(panJobList, query, wildCardExtension, dkey):
print ('Thread started')
lock.acquire()
try:
tcount.setdefault(dkey,[])
for panJob in panJobList:
wildCardExtension = wildCardExtension.replace('%20', ' ')
wildCardExtension = wildCardExtension.replace('%2520', ' ')
wildCardExtension = wildCardExtension.replace('%252540', '@')
wildCardExtension = wildCardExtension.replace('%2540', '@')
wildCardExtension = wildCardExtension.replace('+', ' ')
wildCardExtension = wildCardExtension.replace('%', ' ')
tcount[dkey].append(panJob.objects.filter(**query).extra(where=[wildCardExtension]).count())
finally:
lock.release()
print ('Thread finished')
def digkey (rq):
sk = rq.session.session_key
qt = rq.session['qtime']
if sk is None:
sk = random.randrange(1000000)
hashkey = hashlib.sha256((str(sk) + ' ' + qt).encode('utf-8'))
return hashkey.hexdigest()
@login_customrequired
def errorSummary(request):
start_time = time.time()
valid, response = initRequest(request)
thread = None
dkey = digkey(request)
if not valid:
return response
_logger.info('Initialized request: {}'.format(time.time() - request.session['req_init_time']))
# Here we try to get cached data
data = getCacheEntry(request, "errorSummary")
if data is not None:
_logger.info('Got cached data: {}'.format(time.time() - request.session['req_init_time']))
data = json.loads(data)
data['request'] = request
# Filtering data due to user settings
# if 'ADFS_LOGIN' in request.session and request.session['ADFS_LOGIN'] and 'IS_TESTER' in request.session and request.session['IS_TESTER']:
if request.user.is_authenticated and request.user.is_tester:
data = filterErrorData(request, data)
if data['errHist']:
for list in data['errHist']:
try:
list[0] = datetime.strptime(list[0],"%Y-%m-%dT%H:%M:%S")
except:
pass
_logger.info('Processed cached data: {}'.format(time.time() - request.session['req_init_time']))
response = render_to_response('errorSummary.html', data, content_type='text/html')
_logger.info('Rendered template from cached data: {}'.format(time.time() - request.session['req_init_time']))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
testjobs = False
if 'prodsourcelabel' in request.session['requestParams'] and request.session['requestParams']['prodsourcelabel'].lower().find('test') >= 0:
testjobs = True
jobtype = ''
if 'jobtype' in request.session['requestParams']:
jobtype = request.session['requestParams']['jobtype']
elif '/analysis' in request.path:
jobtype = 'analysis'
elif '/production' in request.path:
jobtype = 'production'
elif testjobs:
jobtype = 'rc_test'
if jobtype == '':
hours = 3
limit = 100000
elif jobtype.startswith('anal'):
hours = 6
limit = 100000
elif 'JOB_LIMIT' in request.session:
hours = 6
limit = request.session['JOB_LIMIT']
else:
hours = 12
limit = 100000
if 'hours' in request.session['requestParams']:
hours = int(request.session['requestParams']['hours'])
if 'limit' in request.session['requestParams']:
limit = int(request.session['requestParams']['limit'])
if 'display_limit' in request.session['requestParams']:
display_limit = int(request.session['requestParams']['display_limit'])
else:
display_limit = 9999999
xurlsubst = extensibleURL(request)
xurlsubstNoSite = xurlsubst
# Preprocess request to cover all sites for cloud to view jobs assigned to the World
if ('cloud' in request.session['requestParams']) and ('computingsite' not in request.session['requestParams']) and (
request.session['requestParams']['cloud'] != 'WORLD') and (
'|' not in request.session['requestParams']['cloud']):
cloud = request.session['requestParams']['cloud']
del request.session['requestParams']['cloud']
panda_queues = get_panda_queues()
sites = set([site['site'] for site in panda_queues.values() if site['cloud'] == cloud])
siteStr = ""
for site in sites:
siteStr += "|" + site
siteStr = siteStr[1:]
request.session['requestParams']['computingsite'] = siteStr
# this substitution is nessessary to propagate update in the xurl
updatedRequest = ""
updatedRequestNoSite = ""
for param in request.session['requestParams']:
updatedRequest += '&' + param + '=' + request.session['requestParams'][param]
if param != 'computingsite':
updatedRequestNoSite += '&' + param + '=' + request.session['requestParams'][param]
updatedRequest = updatedRequest[1:]
updatedRequestNoSite = updatedRequestNoSite[1:]
xurlsubst = '/errors/?' + updatedRequest + '&'
xurlsubstNoSite = '/errors/?' + updatedRequestNoSite + '&'
_logger.info('Processed specific params: {}'.format(time.time() - request.session['req_init_time']))
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=limit, wildCardExt=True)
_logger.info('Finished set up view: {}'.format(time.time() - request.session['req_init_time']))
if not testjobs and 'jobstatus' not in request.session['requestParams']:
query['jobstatus__in'] = ['failed', 'holding']
jobs = []
values = (
'eventservice', 'produsername', 'produserid', 'pandaid', 'cloud', 'computingsite', 'cpuconsumptiontime',
'jobstatus', 'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'modificationtime',
'atlasrelease', 'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'starttime',
'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode',
'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag',
'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode',
'destinationse', 'currentpriority', 'computingelement', 'gshare', 'reqid', 'actualcorecount', 'computingelement'
)
if testjobs:
jobs.extend(
Jobsdefined4.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values(*values))
jobs.extend(
Jobswaiting4.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values(*values))
listJobs = Jobsactive4, Jobsarchived4, Jobsdefined4, Jobswaiting4
jobs.extend(
Jobsactive4.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values(*values))
jobs.extend(
Jobsarchived4.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values(*values))
if (((datetime.now() - datetime.strptime(query['modificationtime__castdate__range'][0], "%Y-%m-%d %H:%M:%S")).days > 1) or \
((datetime.now() - datetime.strptime(query['modificationtime__castdate__range'][1],
"%Y-%m-%d %H:%M:%S")).days > 1)):
jobs.extend(
Jobsarchived.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values(
*values))
listJobs = Jobsactive4, Jobsarchived4, Jobsdefined4, Jobswaiting4, Jobsarchived
if not is_json_request(request):
thread = Thread(target=totalCount, args=(listJobs, query, wildCardExtension, dkey))
thread.start()
else:
thread = None
_logger.info('Got jobs: {}'.format(time.time() - request.session['req_init_time']))
jobs = clean_job_list(request, jobs, do_add_metadata=False, do_add_errorinfo=True)
_logger.info('Cleaned jobs list: {}'.format(time.time() - request.session['req_init_time']))
error_message_summary = get_error_message_summary(jobs)
_logger.info('Prepared new error message summary: {}'.format(time.time() - request.session['req_init_time']))
njobs = len(jobs)
# Build the error summary.
errsByCount, errsBySite, errsByUser, errsByTask, sumd, errHist = errorSummaryDict(request, jobs, testjobs, errHist=True)
_logger.info('Error summary built: {}'.format(time.time() - request.session['req_init_time']))
# Build the state summary and add state info to site error summary
notime = False # behave as it used to before introducing notime for dashboards. Pull only 12hrs.
statesummary = cloud_site_summary(query, extra=wildCardExtension, view=jobtype, cloudview='region', notime=notime)
sitestates = {}
savestates = ['finished', 'failed', 'cancelled', 'holding', ]
for cloud in statesummary:
for site in cloud['sites']:
sitename = cloud['sites'][site]['name']
sitestates[sitename] = {}
for s in savestates:
sitestates[sitename][s] = cloud['sites'][site]['states'][s]['count']
sitestates[sitename]['pctfail'] = cloud['sites'][site]['pctfail']
for site in errsBySite:
sitename = site['name']
if sitename in sitestates:
for s in savestates:
if s in sitestates[sitename]: site[s] = sitestates[sitename][s]
if 'pctfail' in sitestates[sitename]: site['pctfail'] = sitestates[sitename]['pctfail']
_logger.info('Built errors by site summary: {}'.format(time.time() - request.session['req_init_time']))
taskname = ''
if not testjobs:
# Build the task state summary and add task state info to task error summary
taskstatesummary = task_summary(query, limit=limit, view=jobtype)
_logger.info('Prepared data for errors by task summary: {}'.format(time.time() - request.session['req_init_time']))
taskstates = {}
for task in taskstatesummary:
taskid = task['taskid']
taskstates[taskid] = {}
for s in savestates:
taskstates[taskid][s] = task['states'][s]['count']
if 'pctfail' in task:
taskstates[taskid]['pctfail'] = task['pctfail']
for task in errsByTask:
taskid = task['name']
if taskid in taskstates:
for s in savestates:
if s in taskstates[taskid]:
task[s] = taskstates[taskid][s]
if 'pctfail' in taskstates[taskid]:
task['pctfail'] = taskstates[taskid]['pctfail']
if 'jeditaskid' in request.session['requestParams']:
taskname = getTaskName('jeditaskid', request.session['requestParams']['jeditaskid'])
_logger.info('Built errors by task summary: {}'.format(time.time() - request.session['req_init_time']))
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
else:
sortby = 'alpha'
flowstruct = buildGoogleFlowDiagram(request, jobs=jobs)
_logger.info('Built google diagram: {}'.format(time.time() - request.session['req_init_time']))
if thread is not None:
try:
thread.join()
jobsErrorsTotalCount = sum(tcount[dkey])
print(dkey)
print(tcount[dkey])
del tcount[dkey]
print(tcount)
print(jobsErrorsTotalCount)
except:
jobsErrorsTotalCount = -1
else:
jobsErrorsTotalCount = -1
_logger.info('Finished thread counting total number of jobs: {}'.format(time.time() - request.session['req_init_time']))
listPar =[]
for key, val in request.session['requestParams'].items():
if (key!='limit' and key!='display_limit'):
listPar.append(key + '=' + str(val))
if len(listPar)>0:
urlParametrs = '&'.join(listPar)+'&'
else:
urlParametrs = None
print (listPar)
del listPar
if (math.fabs(njobs-jobsErrorsTotalCount)<1000):
jobsErrorsTotalCount = None
else:
jobsErrorsTotalCount = int(math.ceil((jobsErrorsTotalCount+10000)/10000)*10000)
_logger.info('Formed list of params: {}'.format(time.time() - request.session['req_init_time']))
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
nosorturl = removeParam(request.get_full_path(), 'sortby')
xurl = extensibleURL(request)
time_locked_url = removeParam(removeParam(xurl, 'date_from', mode='extensible'), 'date_to', mode='extensible') + \
'date_from=' + request.session['TFIRST'].strftime('%Y-%m-%dT%H:%M') + \
'&date_to=' + request.session['TLAST'].strftime('%Y-%m-%dT%H:%M')
jobsurl = xurlsubst.replace('/errors/', '/jobs/')
jobsurlNoSite = xurlsubstNoSite.replace('/errors/', '')
TFIRST = request.session['TFIRST'].strftime(defaultDatetimeFormat)
TLAST = request.session['TLAST'].strftime(defaultDatetimeFormat)
del request.session['TFIRST']
del request.session['TLAST']
_logger.info('Extra data preparation for template: {}'.format(time.time() - request.session['req_init_time']))
data = {
'prefix': getPrefix(request),
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'requestString': urlParametrs,
'jobtype': jobtype,
'njobs': njobs,
'hours': LAST_N_HOURS_MAX,
'limit': limit,
'user': None,
'xurl': xurl,
'xurlsubst': xurlsubst,
'xurlsubstNoSite': xurlsubstNoSite,
'jobsurlNoSite': jobsurlNoSite,
'jobsurl': jobsurl,
'nosorturl': nosorturl,
'time_locked_url': time_locked_url,
'errsByCount': errsByCount,
'errsBySite': errsBySite[:display_limit] if len(errsBySite) > display_limit else errsBySite,
'errsByUser': errsByUser[:display_limit] if len(errsByUser) > display_limit else errsByUser,
'errsByTask': errsByTask[:display_limit] if len(errsByTask) > display_limit else errsByTask,
'sumd': sumd,
'errHist': errHist,
'errsByMessage': json.dumps(error_message_summary),
'tfirst': TFIRST,
'tlast': TLAST,
'sortby': sortby,
'taskname': taskname,
'flowstruct': flowstruct,
'jobsErrorsTotalCount': jobsErrorsTotalCount,
'display_limit': display_limit,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
setCacheEntry(request, "errorSummary", json.dumps(data, cls=DateEncoder), 60 * 20)
_logger.info('Set cache: {}'.format(time.time() - request.session['req_init_time']))
# Filtering data due to user settings
if request.user.is_authenticated and request.user.is_tester:
data = filterErrorData(request, data)
response = render_to_response('errorSummary.html', data, content_type='text/html')
_logger.info('Rendered template: {}'.format(time.time() - request.session['req_init_time']))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
elif 'fields' in request.session['requestParams'] and request.session['requestParams']['fields']:
del request.session['TFIRST']
del request.session['TLAST']
fields = request.session['requestParams']['fields'].split(',')
data = {}
if 'jobSummary' in fields:
data['jobSummary'] = sumd
if 'errsByCount' in fields:
data['errsByCount'] = errsByCount
if 'errsBySite' in fields:
data['errsBySite'] = errsBySite
if 'errsByUser' in fields:
data['errsByUser'] = errsByUser
if 'errsByTask' in fields:
data['errsByTask'] = errsByTask
return HttpResponse(json.dumps(data), content_type='application/json')
else:
del request.session['TFIRST']
del request.session['TLAST']
resp = []
for job in jobs:
resp.append({'pandaid': job['pandaid'], 'status': job['jobstatus'], 'prodsourcelabel': job['prodsourcelabel'],
'produserid': job['produserid']})
return HttpResponse(json.dumps(resp), content_type='application/json')
@login_customrequired
def incidentList(request):
valid, response = initRequest(request)
if not valid: return response
# Here we try to get cached data
data = getCacheEntry(request, "incidents")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('incidents.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
if 'days' in request.session['requestParams']:
hours = int(request.session['requestParams']['days']) * 24
else:
if 'hours' not in request.session['requestParams']:
hours = 24 * 3
else:
hours = int(request.session['requestParams']['hours'])
setupView(request, hours=hours, limit=9999999)
pq_clouds = get_pq_clouds()
iquery = {}
cloudQuery = Q()
startdate = timezone.now() - timedelta(hours=hours)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
iquery['at_time__range'] = [startdate, enddate]
if 'site' in request.session['requestParams']:
iquery['description__contains'] = 'queue=%s' % request.session['requestParams']['site']
if 'category' in request.session['requestParams']:
iquery['description__startswith'] = '%s:' % request.session['requestParams']['category']
if 'comment' in request.session['requestParams']:
iquery['description__contains'] = '%s' % request.session['requestParams']['comment']
if 'notifier' in request.session['requestParams']:
iquery['description__contains'] = 'DN=%s' % request.session['requestParams']['notifier']
if 'cloud' in request.session['requestParams']:
sites = [site for site, cloud in pq_clouds.items() if cloud == request.session['requestParams']['cloud']]
for site in sites:
cloudQuery = cloudQuery | Q(description__contains='queue=%s' % site)
incidents = []
incidents.extend(Incidents.objects.filter(**iquery).filter(cloudQuery).order_by('at_time').reverse().values())
sumd = {}
pars = {}
incHist = {}
for inc in incidents:
desc = inc['description']
desc = desc.replace(' ', ' ')
parsmat = re.match('^([a-z\s]+):\s+queue=([^\s]+)\s+DN=(.*)\s\s\s*([A-Za-z^ \.0-9]*)$', desc)
tm = inc['at_time']
tm = tm - timedelta(minutes=tm.minute % 30, seconds=tm.second, microseconds=tm.microsecond)
if not tm in incHist: incHist[tm] = 0
incHist[tm] += 1
if parsmat:
pars['category'] = parsmat.group(1)
pars['site'] = parsmat.group(2)
pars['notifier'] = parsmat.group(3)
pars['type'] = inc['typekey']
if pars['site'] in pq_clouds:
pars['cloud'] = pq_clouds[pars['site']]
if parsmat.group(4): pars['comment'] = parsmat.group(4)
else:
parsmat = re.match('^([A-Za-z\s]+):.*$', desc)
if parsmat:
pars['category'] = parsmat.group(1)
else:
pars['category'] = desc[:10]
for p in pars:
if p not in sumd:
sumd[p] = {}
sumd[p]['param'] = p
sumd[p]['vals'] = {}
if pars[p] not in sumd[p]['vals']:
sumd[p]['vals'][pars[p]] = {}
sumd[p]['vals'][pars[p]]['name'] = pars[p]
sumd[p]['vals'][pars[p]]['count'] = 0
sumd[p]['vals'][pars[p]]['count'] += 1
## convert incident components to URLs. Easier here than in the template.
if 'site' in pars:
inc['description'] = re.sub('queue=[^\s]+', 'queue=<a href="%ssite=%s">%s</a>' % (
extensibleURL(request), pars['site'], pars['site']), inc['description'])
inc['at_time'] = inc['at_time'].strftime(defaultDatetimeFormat)
## convert to ordered lists
suml = []
for p in sumd:
itemd = {}
itemd['param'] = p
iteml = []
kys = sumd[p]['vals'].keys()
kys.sort(key=lambda y: y.lower())
for ky in kys:
iteml.append({'kname': ky, 'kvalue': sumd[p]['vals'][ky]['count']})
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x: x['param'].lower())
kys = incHist.keys()
kys = sorted(kys)
incHistL = []
for k in kys:
incHistL.append([k, incHist[k]])
del request.session['TFIRST']
del request.session['TLAST']
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'user': None,
'incidents': incidents,
'sumd': suml,
'incHist': incHistL,
'xurl': extensibleURL(request),
'hours': hours,
'ninc': len(incidents),
'built': datetime.now().strftime("%H:%M:%S"),
}
setCacheEntry(request, "incidents", json.dumps(data, cls=DateEncoder), 60 * 20)
response = render_to_response('incidents.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
clearedInc = []
for inc in incidents:
entry = {}
entry['at_time'] = inc['at_time'].isoformat()
entry['typekey'] = inc['typekey']
entry['description'] = inc['description']
clearedInc.append(entry)
jsonResp = json.dumps(clearedInc)
return HttpResponse(jsonResp, content_type='application/json')
def esatlasPandaLoggerJson(request):
valid, response = initRequest(request)
if not valid:
return response
connection = create_esatlas_connection()
s = Search(using=connection, index='atlas_jedilogs-*')
s.aggs.bucket('jediTaskID', 'terms', field='jediTaskID', size=1000)\
.bucket('type', 'terms', field='fields.type.keyword') \
.bucket('logLevel', 'terms', field='logLevel.keyword')
res = s.execute()
print('query completed')
jdListFinal = []
for agg in res['aggregations']['jediTaskID']['buckets']:
name = agg['key']
for types in agg['type']['buckets']:
type = types['key']
for levelnames in types['logLevel']['buckets']:
jdlist = {}
levelname = levelnames['key']
jdlist['jediTaskID'] = str(name)
jdlist['Type'] = type
jdlist['LevelName'] = levelname
jdlist['Count'] = levelnames['doc_count']
jdListFinal.append(jdlist)
return HttpResponse(json.dumps(jdListFinal), content_type='application/json')
def esatlasPandaLogger(request):
valid, response = initRequest(request)
if not valid:
return response
connection = create_esatlas_connection()
today = time.strftime("%Y.%m.%d")
pandaDesc = {
"panda.log.RetrialModule": ["cat1","Retry module to apply rules on failed jobs"],
"panda.log.Serveraccess": ["cat2","Apache request log"],
"panda.log.Servererror": ["cat2","Apache errors"],
"panda.log.PilotRequests": ["cat2", "Pilot requests"],
"panda.log.Entry": ["cat2","Entry point to the PanDA server"],
"panda.log.UserIF": ["cat2", "User interface"],
"panda.log.DBProxy": ["cat2", "Filtered messages of DB interactions"],
"panda.log.Adder": ["cat3", "Add output files to datasets and trigger output aggregation"],
"panda.log.Finisher": ["cat3", "Finalization procedures for jobs"],
"panda.log.Closer": ["cat3", "Close internal datasets once all associated jobs are done"],
"panda.log.Setupper": ["cat3", "Setup internal datasets for data transfers"],
"panda.log.copyArchive": ["cat3", "Various actions, such as kill and poll based on timeout parameters"],
"panda.log.DynDataDistributer": ["cat3", "PD2P"],
"panda.log.Activator": ["cat3", "Activates jobs based on input transfers"],
"panda.log.datasetManager": ["cat3", "Manages datasets states"],
"panda.log.Watcher": ["cat3", "Watchdog for jobs, e.g. lost heartbeat"],
"panda.log.broker": ["cat4", "Brokerage"],
"panda.log.runRebro": ["cat4", "Identifies jobs to rebroker"],
"panda.log.prioryMassage": ["cat4", "Priority management for user jobs"],
"panda.log.Initializer": ["cat8", "Initializes connections to the DB"],
"panda.log.ConBridge": ["cat5", "DB connections"],
"panda.log.ProcessLimiter": ["cat6", "Limit number of forked processes in PanDA"],
"panda.log.Utils": ["cat8", "Aux functions"],
"panda.log.Notifier": ["cat7", "User email notification agent"],
"panda.log.Panda": ["cat8", "Some messages are redirected here"],
}
pandaCat = ['cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8']
jediDesc = {
"panda.log.AtlasProdTaskBroker": ["cat1","Production task brokerage"],
"panda.log.TaskBroker": ["cat7","Task brokerage factory"],
"panda.log.AtlasProdJobBroker": ["cat1","Production job brokerage"],
"panda.log.AtlasAnalJobBroker": ["cat1", "Analysis job brokerage"],
"panda.log.JobBroker": ["cat7","Job brokerage factory"],
"panda.log.AtlasProdJobThrottler": ["cat2", "Throttles generation of production jobs based on defined limits"],
"panda.log.JobThrottler": ["cat7", "Job throttler factory"],
"panda.log.JobGenerator": ["cat2", "Generates job for a task"],
"panda.log.JobSplitter": ["cat2", "Job splitter, used by the job generator"],
"panda.log.TaskRefiner": ["cat3", "Generates tasks in JEDI from definitions found in DEFT"],
"panda.log.TaskSetupper": ["cat7", "Procedures for task setup. Base class"],
"panda.log.AtlasTaskSetupper": ["cat3", "ATLAS procedures for task setup"],
"panda.log.TaskCommando": ["cat3", "Executes task commands from DEFT"],
"panda.log.PostProcessor": ["cat3", "Post processes tasks"],
"panda.log.Activator": ["cat4", "Activates jobs based on DDM messages"],
"panda.log.Closer": ["cat4", "Closes internal datasets once all associated jobs are done"],
"panda.log.ContentsFeeder": ["cat4", "Feeds file contents for tasks"],
"panda.log.AtlasDDMClient": ["cat4", "DDM client"],
"panda.log.AtlasProdWatchDog": ["cat5", "Production task watchdog"],
"panda.log.AtlasAnalWatchDog": ["cat5", "Analysis task watchdog"],
"panda.log.WatchDog": ["cat5", "Watchdog"],
"panda.log.JediDBProxy": ["cat6", "Filtered JEDI DB interactions"],
"panda.log.TaskBuffer": ["cat7", "PanDA server task buffer"],
"panda.log.JediTaskBuffer": ["cat7", "JEDI task buffer"],
"panda.log.DBProxyPool": ["cat7", "DB connection pool interactions"],
}
jediCat = ['cat1','cat2','cat3','cat4','cat5','cat6','cat7']
indices = ['atlas_pandalogs-', 'atlas_jedilogs-']
panda = {}
jedi = {}
for index in indices:
s = Search(using=connection, index=index + str(today))
s.aggs.bucket('logName', 'terms', field='logName.keyword', size=1000) \
.bucket('type', 'terms', field='fields.type.keyword', size=1000) \
.bucket('logLevel', 'terms', field='logLevel.keyword')
res = s.execute()
if index == "atlas_pandalogs-":
for cat in pandaCat:
panda[cat] = {}
for agg in res['aggregations']['logName']['buckets']:
if agg['key'] not in pandaDesc:
pandaDesc[agg['key']] = [list(panda.keys())[-1], "New log type. No description"]
cat = pandaDesc[agg['key']][0]
name = agg['key']
panda[cat][name] = {}
for types in agg['type']['buckets']:
type = types['key']
panda[cat][name][type] = {}
for levelnames in types['logLevel']['buckets']:
levelname = levelnames['key']
panda[cat][name][type][levelname] = {}
panda[cat][name][type][levelname]['logLevel'] = levelname
panda[cat][name][type][levelname]['lcount'] = str(levelnames['doc_count'])
elif index == "atlas_jedilogs-":
for cat in jediCat:
jedi[cat] = {}
for agg in res['aggregations']['logName']['buckets']:
if agg['key'] not in jediDesc:
jediDesc[agg['key']] = [list(jedi.keys())[-1], "New log type. No description"]
cat = jediDesc[agg['key']][0]
name = agg['key']
jedi[cat][name] = {}
for types in agg['type']['buckets']:
type = types['key']
jedi[cat][name][type] = {}
for levelnames in types['logLevel']['buckets']:
levelname = levelnames['key']
jedi[cat][name][type][levelname] = {}
jedi[cat][name][type][levelname]['logLevel'] = levelname
jedi[cat][name][type][levelname]['lcount'] = str(levelnames['doc_count'])
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'user': None,
'panda': panda,
'pandadesc':pandaDesc,
'jedi': jedi,
'jedidesc':jediDesc,
'time': time.strftime("%Y-%m-%d"),
}
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
response = render_to_response('esatlasPandaLogger.html', data, content_type='text/html')
return response
def pandaLogger(request):
valid, response = initRequest(request)
if not valid: return response
getrecs = False
iquery = {}
if 'category' in request.session['requestParams']:
iquery['name'] = request.session['requestParams']['category']
getrecs = True
if 'type' in request.session['requestParams']:
val = escape_input(request.session['requestParams']['type'])
iquery['type__in'] = val.split('|')
getrecs = True
if 'level' in request.session['requestParams']:
iquery['levelname'] = request.session['requestParams']['level'].upper()
getrecs = True
if 'taskid' in request.session['requestParams']:
iquery['message__startswith'] = request.session['requestParams']['taskid']
getrecs = True
if 'jeditaskid' in request.session['requestParams']:
iquery['message__icontains'] = "jeditaskid=%s" % request.session['requestParams']['jeditaskid']
getrecs = True
if 'site' in request.session['requestParams']:
iquery['message__icontains'] = "site=%s " % request.session['requestParams']['site']
getrecs = True
if 'pandaid' in request.session['requestParams']:
iquery['pid'] = request.session['requestParams']['pandaid']
getrecs = True
if 'hours' not in request.session['requestParams']:
if getrecs:
hours = 72
else:
hours = 24
else:
hours = int(request.session['requestParams']['hours'])
setupView(request, hours=hours, limit=9999999)
startdate = timezone.now() - timedelta(hours=hours)
startdate = startdate.strftime(defaultDatetimeFormat)
if 'startdate' in request.session['requestParams'] and len(request.session['requestParams']['startdate']) > 1:
startdate = request.session['requestParams']['startdate']
enddate = timezone.now().strftime(defaultDatetimeFormat)
if 'enddate' in request.session['requestParams'] and len(request.session['requestParams']['startdate']) > 1:
enddate = request.session['requestParams']['enddate']
iquery['bintime__range'] = [startdate, enddate]
print (iquery)
counts = Pandalog.objects.filter(**iquery).values('name', 'type', 'levelname').annotate(
Count('levelname')).order_by('name', 'type', 'levelname')
if getrecs:
records = Pandalog.objects.filter(**iquery).order_by('bintime').reverse()[
:request.session['JOB_LIMIT']].values()
## histogram of logs vs. time, for plotting
logHist = {}
for r in records:
r['message'] = r['message'].replace('<', '')
r['message'] = r['message'].replace('>', '')
r['levelname'] = r['levelname'].lower()
tm = r['bintime']
tm = tm - timedelta(minutes=tm.minute % 30, seconds=tm.second, microseconds=tm.microsecond)
if not tm in logHist: logHist[tm] = 0
logHist[tm] += 1
kys = logHist.keys()
kys = sorted(kys)
logHistL = []
for k in kys:
logHistL.append([k, logHist[k]])
else:
records = None
logHistL = None
logs = {}
totcount = 0
for inc in counts:
name = inc['name']
type = inc['type']
level = inc['levelname']
count = inc['levelname__count']
totcount += count
if name not in logs:
logs[name] = {}
logs[name]['name'] = name
logs[name]['count'] = 0
logs[name]['types'] = {}
logs[name]['count'] += count
if type not in logs[name]['types']:
logs[name]['types'][type] = {}
logs[name]['types'][type]['name'] = type
logs[name]['types'][type]['count'] = 0
logs[name]['types'][type]['levels'] = {}
logs[name]['types'][type]['count'] += count
if level not in logs[name]['types'][type]['levels']:
logs[name]['types'][type]['levels'][level] = {}
logs[name]['types'][type]['levels'][level]['name'] = level.lower()
logs[name]['types'][type]['levels'][level]['count'] = 0
logs[name]['types'][type]['levels'][level]['count'] += count
## convert to ordered lists
logl = []
for l in logs:
itemd = {}
itemd['name'] = logs[l]['name']
itemd['types'] = []
for t in logs[l]['types']:
logs[l]['types'][t]['levellist'] = []
for v in logs[l]['types'][t]['levels']:
logs[l]['types'][t]['levellist'].append(logs[l]['types'][t]['levels'][v])
logs[l]['types'][t]['levellist'] = sorted(logs[l]['types'][t]['levellist'], key=lambda x: x['name'])
typed = {}
typed['name'] = logs[l]['types'][t]['name']
itemd['types'].append(logs[l]['types'][t])
itemd['types'] = sorted(itemd['types'], key=lambda x: x['name'])
logl.append(itemd)
logl = sorted(logl, key=lambda x: x['name'])
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'user': None,
'logl': logl,
'records': records,
'ninc': totcount,
'logHist': logHistL,
'xurl': extensibleURL(request),
'hours': hours,
'getrecs': getrecs,
'built': datetime.now().strftime("%H:%M:%S"),
}
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
response = render_to_response('pandaLogger.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
resp = data
return HttpResponse(json.dumps(resp, cls=DateEncoder), content_type='application/json')
# def percentile(N, percent, key=lambda x:x):
# """
# Find the percentile of a list of values.
#
# @parameter N - is a list of values. Note N MUST BE already sorted.
# @parameter percent - a float value from 0.0 to 1.0.
# @parameter key - optional key function to compute value from each element of N.
#
# @return - the percentile of the values
# """
# if not N:
# return None
# k = (len(N)-1) * percent
# f = math.floor(k)
# c = math.ceil(k)
# if f == c:
# return key(N[int(k)])
# d0 = key(N[int(f)]) * (c-k)
# d1 = key(N[int(c)]) * (k-f)
# return d0+d1
def ttc(request):
valid, response = initRequest(request)
if not valid: return response
data = {}
jeditaskid = -1
if 'jeditaskid' in request.session['requestParams']:
jeditaskid = int(request.session['requestParams']['jeditaskid'])
if jeditaskid == -1:
data = {"error": "no jeditaskid supplied"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
query = {'jeditaskid': jeditaskid}
task = JediTasks.objects.filter(**query).values('jeditaskid', 'taskname', 'workinggroup', 'tasktype',
'processingtype', 'ttcrequested', 'starttime', 'endtime',
'creationdate', 'status')
if len(task) == 0:
data = {"error": ("jeditaskid " + str(jeditaskid) + " does not exist")}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
taskrec = task[0]
if taskrec['tasktype'] != 'prod' or taskrec['ttcrequested'] == None:
data = {"error": "TTC for this type of task has not implemented yet"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
if taskrec['ttcrequested']:
taskrec['ttc'] = taskrec['ttcrequested']
taskevents = GetEventsForTask.objects.filter(**query).values('jeditaskid', 'totev', 'totevrem')
taskev = None
if len(taskevents) > 0:
taskev = taskevents[0]
cur = connection.cursor()
cur.execute("SELECT * FROM table(ATLAS_PANDABIGMON.GETTASKPROFILE('%s'))" % taskrec['jeditaskid'])
taskprofiled = cur.fetchall()
cur.close()
keys = ['endtime', 'starttime', 'nevents', 'njob']
taskprofile = [{'endtime': taskrec['starttime'], 'starttime': taskrec['starttime'], 'nevents': 0, 'njob': 0}]
taskprofile = taskprofile + [dict(zip(keys, row)) for row in taskprofiled]
maxt = (taskrec['ttc'] - taskrec['starttime']).days * 3600 * 24 + (taskrec['ttc'] - taskrec['starttime']).seconds
neventsSum = 0
for job in taskprofile:
job['ttccoldline'] = 100. - ((job['endtime'] - taskrec['starttime']).days * 3600 * 24 + (
job['endtime'] - taskrec['starttime']).seconds) * 100 / float(maxt)
job['endtime'] = job['endtime'].strftime("%Y-%m-%d %H:%M:%S")
job['ttctime'] = job['endtime']
job['starttime'] = job['starttime'].strftime("%Y-%m-%d %H:%M:%S")
neventsSum += job['nevents']
if taskev and taskev['totev'] > 0:
job['tobedonepct'] = 100. - neventsSum * 100. / taskev['totev']
else:
job['tobedonepct'] = None
taskprofile.insert(len(taskprofile), {'endtime': taskprofile[len(taskprofile) - 1]['endtime'],
'starttime': taskprofile[len(taskprofile) - 1]['starttime'],
'ttctime': taskrec['ttc'].strftime("%Y-%m-%d %H:%M:%S"),
'tobedonepct': taskprofile[len(taskprofile) - 1]['tobedonepct'],
'ttccoldline': 0})
progressForBar = []
if taskev['totev'] > 0:
taskrec['percentage'] = ((neventsSum) * 100 / taskev['totev'])
else: taskrec['percentage'] = None
if taskrec['percentage'] != None:
taskrec['percentageok'] = taskrec['percentage'] - 5
else: taskrec['percentageok'] = None
if taskrec['status'] == 'running':
taskrec['ttcbasedpercentage'] = ((datetime.now() - taskrec['starttime']).days * 24 * 3600 + (
datetime.now() - taskrec['starttime']).seconds) * 100 / (
(taskrec['ttcrequested'] - taskrec['creationdate']).days * 24 * 3600 + (
taskrec['ttcrequested'] - taskrec['creationdate']).seconds) if datetime.now() < \
taskrec[
'ttc'] else 100
progressForBar = [100, taskrec['percentage'], taskrec['ttcbasedpercentage']]
if taskrec['ttc']:
taskrec['ttc'] = taskrec['ttc'].strftime(defaultDatetimeFormat)
if taskrec['creationdate']:
taskrec['creationdate'] = taskrec['creationdate'].strftime(defaultDatetimeFormat)
if taskrec['starttime']:
taskrec['starttime'] = taskrec['starttime'].strftime(defaultDatetimeFormat)
if taskrec['endtime']:
taskrec['endtime'] = taskrec['endtime'].strftime(defaultDatetimeFormat)
data = {
'request': request,
'task': taskrec,
'progressForBar': progressForBar,
'profile': taskprofile,
'built': datetime.now().strftime("%H:%M:%S"),
}
response = render_to_response('ttc.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
#@cache_page(60 * 20)
@login_customrequired
def workingGroups(request):
valid, response = initRequest(request)
if not valid:
return response
# Here we try to get cached data
data = getCacheEntry(request, "workingGroups")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('workingGroups.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
taskdays = 3
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
VOMODE = 'atlas'
else:
VOMODE = ''
if VOMODE != 'atlas':
days = 30
else:
days = taskdays
errthreshold = 15
hours = days * 24
query = setupView(request, hours=hours, limit=999999)
query['workinggroup__isnull'] = False
# WG task summary
tasksummary = wg_task_summary(request, view='working group', taskdays=taskdays)
# WG job summary
if 'workinggroup' in request.session['requestParams'] and request.session['requestParams']['workinggroup']:
query['workinggroup'] = request.session['requestParams']['workinggroup']
wgsummary = wg_summary(query)
if not is_json_request(request):
xurl = extensibleURL(request)
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'url': request.path,
'xurl': xurl,
'user': None,
'wgsummary': wgsummary,
'taskstates': taskstatedict,
'tasksummary': tasksummary,
'hours': hours,
'days': days,
'errthreshold': errthreshold,
'built': datetime.now().strftime("%H:%M:%S"),
}
setCacheEntry(request, "workingGroups", json.dumps(data, cls=DateEncoder), 60 * 20)
response = render_to_response('workingGroups.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
resp = []
return HttpResponse(json.dumps(resp), content_type='application/json')
@login_customrequired
def datasetInfo(request):
valid, response = initRequest(request)
if not valid: return response
setupView(request, hours=365 * 24, limit=999999999)
query = {}
dsets = []
dsrec = None
colnames = []
columns = []
if 'datasetname' in request.session['requestParams']:
dataset = request.session['requestParams']['datasetname']
query['datasetname'] = request.session['requestParams']['datasetname']
elif 'datasetid' in request.session['requestParams']:
dataset = request.session['requestParams']['datasetid']
query['datasetid'] = request.session['requestParams']['datasetid']
else:
dataset = None
if 'jeditaskid' in request.session['requestParams']:
query['jeditaskid'] = int(request.session['requestParams']['jeditaskid'])
if dataset:
dsets = JediDatasets.objects.filter(**query).values()
if len(dsets) == 0:
startdate = timezone.now() - timedelta(hours=30 * 24)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
query = {'modificationdate__range': [startdate, enddate]}
if 'datasetname' in request.session['requestParams']:
query['name'] = request.session['requestParams']['datasetname']
elif 'datasetid' in request.session['requestParams']:
query['vuid'] = request.session['requestParams']['datasetid']
moredsets = Datasets.objects.filter(**query).values()
if len(moredsets) > 0:
dsets = moredsets
for ds in dsets:
ds['datasetname'] = ds['name']
ds['creationtime'] = ds['creationdate'].strftime(defaultDatetimeFormat)
ds['modificationtime'] = ds['modificationdate'].strftime(defaultDatetimeFormat)
ds['nfiles'] = ds['numberfiles']
ds['datasetid'] = ds['vuid']
if len(dsets) > 0:
dsrec = dsets[0]
dataset = dsrec['datasetname']
colnames = dsrec.keys()
colnames = sorted(colnames)
for k in colnames:
if is_timestamp(k):
try:
val = dsrec[k].strftime(defaultDatetimeFormat)
except:
val = dsrec[k]
else:
val = dsrec[k]
if dsrec[k] == None:
val = ''
continue
pair = {'name': k, 'value': val}
columns.append(pair)
del request.session['TFIRST']
del request.session['TLAST']
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'dsrec': dsrec,
'datasetname': dataset,
'columns': columns,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
response = render_to_response('datasetInfo.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
return HttpResponse(json.dumps(dsrec, cls=DateEncoder), content_type='application/json')
@login_customrequired
def datasetList(request):
valid, response = initRequest(request)
if not valid:
return response
setupView(request, hours=365 * 24, limit=999999999)
query = {}
extrastr = '(1=1)'
if 'datasetname' in request.session['requestParams']:
query['datasetname__icontains'] = request.session['requestParams']['datasetname'] if ':' not in request.session['requestParams']['datasetname'] else request.session['requestParams']['datasetname'].split(':')[1]
if 'containername' in request.session['requestParams']:
query['datasetname'] = request.session['requestParams']['containername']
if 'jeditaskid' in request.session['requestParams']:
query['jeditaskid'] = int(request.session['requestParams']['jeditaskid'])
dsets = []
if len(query) > 0 or len(extrastr) > 5:
dsets = JediDatasets.objects.filter(**query).extra(where=[extrastr]).values()
dsets = sorted(dsets, key=lambda x: x['datasetname'].lower())
del request.session['TFIRST']
del request.session['TLAST']
if not is_json_request(request):
# redirect to datasetInfo if only one dataset found
if len(dsets) == 1:
return redirect('/datasetInfo/?datasetname=' + dsets[0]['datasetname'])
timestamp_vars = ['modificationtime', 'statechangetime', 'starttime', 'creationdate', 'resquetime',
'endtime', 'lockedtime', 'frozentime', 'creationtime', 'statechecktime']
for ds in dsets:
for p in ds:
if p in timestamp_vars and ds[p] is not None:
ds[p] = ds[p].strftime(defaultDatetimeFormat)
if ds[p] is None:
ds[p] = ''
if ds[p] is True:
ds[p] = 'true'
if ds[p] is False:
ds[p] = 'false'
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'datasets': dsets,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
response = render_to_response('datasetList.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
return HttpResponse(json.dumps(dsets, cls=DateEncoder), content_type='application/json')
@login_customrequired
def fileInfo(request):
valid, response = initRequest(request)
if not valid:
return response
files = []
frec = None
columns = []
tquery = setupView(request, hours=365 * 24, limit=999999999, wildCardExt=False)
query = {'creationdate__castdate__range': tquery['modificationtime__castdate__range']}
if 'filename' in request.session['requestParams']:
file = request.session['requestParams']['filename']
query['lfn'] = request.session['requestParams']['filename']
elif 'lfn' in request.session['requestParams']:
file = request.session['requestParams']['lfn']
query['lfn'] = request.session['requestParams']['lfn']
elif 'fileid' in request.session['requestParams']:
file = request.session['requestParams']['fileid']
query['fileid'] = request.session['requestParams']['fileid']
elif 'guid' in request.session['requestParams']:
file = request.session['requestParams']['guid']
query['guid'] = request.session['requestParams']['guid']
else:
file = None
if 'pandaid' in request.session['requestParams'] and request.session['requestParams']['pandaid'] != '':
query['pandaid'] = request.session['requestParams']['pandaid']
if 'jeditaskid' in request.session['requestParams'] and request.session['requestParams']['jeditaskid'] != '':
query['jeditaskid'] = request.session['requestParams']['jeditaskid']
if 'scope' in request.session['requestParams']:
query['scope'] = request.session['requestParams']['scope']
if 'datasetid' in request.session['requestParams']:
query['datasetid'] = request.session['requestParams']['datasetid']
if file or ('pandaid' in query and query['pandaid'] is not None) or ('jeditaskid' in query and query['jeditaskid'] is not None):
files = JediDatasetContents.objects.filter(**query).values()
if len(files) == 0:
fquery = {k: v for k, v in query.items() if k != 'creationdate__castdate__range' }
fquery['modificationtime__castdate__range'] = tquery['modificationtime__castdate__range']
morefiles = Filestable4.objects.filter(**fquery).values()
if len(morefiles) == 0:
morefiles = FilestableArch.objects.filter(**fquery).values()
if len(morefiles) > 0:
files = morefiles
for f in files:
f['creationdate'] = f['modificationtime']
f['fileid'] = f['row_id']
f['datasetname'] = f['dataset']
f['oldfiletable'] = 1
if len(files) > 0:
# get dataset names for files
dids = list(set([f['datasetid'] for f in files]))
dquery = {}
extra = ' (1=1) '
if len(dids) < DB_N_MAX_IN_QUERY:
dquery['datasetid__in'] = dids
else:
random.seed()
transactionKey = random.randrange(1000000)
tmpTableName = get_tmp_table_name()
insert_to_temp_table(dids, transactionKey)
extra += 'AND DATASETID in (SELECT ID FROM {} WHERE TRANSACTIONKEY={})'.format(tmpTableName, transactionKey)
datasets = JediDatasets.objects.filter(**dquery).extra(where=[extra]).values('datasetname', 'datasetid')
dataset_names_dict = {}
for d in datasets:
dataset_names_dict[d['datasetid']] = d['datasetname']
for f in files:
f['fsizemb'] = "%0.2f" % (f['fsize'] / 1000000.)
if 'datasetid' in f and f['datasetid'] in dataset_names_dict and dataset_names_dict[f['datasetid']]:
f['datasetname'] = dataset_names_dict[f['datasetid']]
else:
f['datasetname'] = ''
# filter out files if dataset name in request params
if 'datasetname' in request.session['requestParams'] and request.session['requestParams']['datasetname']:
files = [f for f in files if f['datasetname'] == request.session['requestParams']['datasetname']]
files = sorted(files, key=lambda x: x['pandaid'] if x['pandaid'] is not None else False, reverse=True)
frec = files[0]
file = frec['lfn']
colnames = frec.keys()
colnames = sorted(colnames)
for k in colnames:
if is_timestamp(k):
try:
val = frec[k].strftime(defaultDatetimeFormat)
except:
val = frec[k]
else:
val = frec[k]
if frec[k] is None:
val = ''
continue
pair = {'name': k, 'value': val}
columns.append(pair)
for f in files:
f['startevent'] = f['startevent'] + 1 if 'startevent' in f and f['startevent'] is not None else -1
f['endevent'] = f['endevent'] + 1 if 'endevent' in f and f['endevent'] is not None else -1
for p in ('maxattempt', 'attemptnr', 'pandaid'):
f[p] = f[p] if p in f and f[p] is not None else -1
if 'creationdate' in f and f['creationdate'] is not None:
f['creationdate'] = f['creationdate'].strftime(defaultDatetimeFormat)
if not is_json_request(request):
del request.session['TFIRST']
del request.session['TLAST']
if frec and 'creationdate' in frec and frec['creationdate'] is None:
frec['creationdate'] = frec['creationdate'].strftime(defaultDatetimeFormat)
files_list = []
plot_data = []
if len(files) > 0:
# filter files params for a table
file_param_names = [
'lfn', 'datasetname', 'jeditaskid', 'pandaid', 'type', 'status', 'procstatus', 'creationdate',
'startevent', 'endevent', 'attemptnr', 'maxattempt'
]
files_list = [{k: v for k, v in f.items() if k in file_param_names} for f in files]
# prepare data for a plot
plot_data = {
'data': [],
'options': {
'timeFormat': '%Y-%m-%d',
'labels': ['Date', 'Number of occurrences, daily']
}
}
df = pd.DataFrame([{'creationdate': f['creationdate'], 'pandaid': f['pandaid']} for f in files_list])
df['creationdate'] = pd.to_datetime(df['creationdate'])
df = df.groupby(pd.Grouper(freq='1D', key='creationdate')).count()
plot_data['data'] = [df.reset_index()['creationdate'].tolist(), df['pandaid'].values.tolist()]
plot_data['data'][0] = [t.strftime('%Y-%m-%d') for t in plot_data['data'][0]]
plot_data['data'][0].insert(0, plot_data['options']['labels'][0])
plot_data['data'][1].insert(0, plot_data['options']['labels'][1])
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'frec': frec,
'files': files_list,
'filename': file,
'columns': columns,
'built': datetime.now().strftime("%H:%M:%S"),
'plotData': plot_data,
}
data.update(getContextVariables(request))
response = render_to_response('fileInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
data = {
'frec': frec,
'files': files,
'filename': file,
'columns': columns,
}
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
@login_customrequired
def fileList(request):
valid, response = initRequest(request)
if not valid: return response
setupView(request, hours=365 * 24, limit=999999999)
query = {}
files = []
defaultlimit = 1000
frec = None
colnames = []
columns = []
datasetname = ''
datasetid = 0
#### It's dangerous when dataset name is not unique over table
if 'datasetname' in request.session['requestParams']:
datasetname = request.session['requestParams']['datasetname']
dsets = JediDatasets.objects.filter(datasetname=datasetname).values()
if len(dsets) > 0:
datasetid = dsets[0]['datasetid']
elif 'datasetid' in request.session['requestParams']:
datasetid = request.session['requestParams']['datasetid']
dsets = JediDatasets.objects.filter(datasetid=datasetid).values()
if len(dsets) > 0:
datasetname = dsets[0]['datasetname']
else:
data = {
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
"errormessage": "No datasetid or datasetname was provided",
}
return render_to_response('errorPage.html', data, content_type='text/html')
extraparams = ''
if 'procstatus' in request.session['requestParams'] and request.session['requestParams']['procstatus']:
query['procstatus'] = request.session['requestParams']['procstatus']
extraparams += '&procstatus=' + request.session['requestParams']['procstatus']
dataset = []
nfilestotal = 0
nfilesunique = 0
if int(datasetid) > 0:
query['datasetid'] = datasetid
nfilestotal = JediDatasetContents.objects.filter(**query).count()
nfilesunique = JediDatasetContents.objects.filter(**query).values('lfn').distinct().count()
del request.session['TFIRST']
del request.session['TLAST']
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'limit': defaultlimit,
'datasetid': datasetid,
'nfilestotal': nfilestotal,
'nfilesunique': nfilesunique,
'extraparams': extraparams,
'datasetname': datasetname,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
response = render_to_response('fileList.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
return HttpResponse(json.dumps(files), content_type='application/json')
def loadFileList(request, datasetid=-1):
valid, response = initRequest(request)
if not valid: return response
setupView(request, hours=365 * 24, limit=999999999)
query = {}
files = []
limit = 1000
if 'limit' in request.session['requestParams']:
limit = int(request.session['requestParams']['limit'])
if 'procstatus' in request.session['requestParams'] and request.session['requestParams']['procstatus']:
query['procstatus'] = request.session['requestParams']['procstatus']
sortOrder = 'lfn'
if int(datasetid) > 0:
query['datasetid'] = datasetid
files.extend(JediDatasetContents.objects.filter(**query).values().order_by(sortOrder)[:limit])
pandaids = []
for f in files:
pandaids.append(f['pandaid'])
query = {}
extra_str = '(1=1)'
files_ft = []
files_ft_dict = {}
if len(pandaids) > DB_N_MAX_IN_QUERY:
tk = insert_to_temp_table(pandaids)
extra_str = 'pandaid in (select id from {} where transactionkey={} )'.format(get_tmp_table_name(), tk)
else:
query['pandaid__in'] = pandaids
# JEDITASKID, DATASETID, FILEID
files_ft.extend(
Filestable4.objects.filter(**query).extra(where=[extra_str]).values('fileid', 'dispatchdblock', 'scope', 'destinationdblock'))
if len(files_ft) == 0:
files_ft.extend(
FilestableArch.objects.filter(**query).extra(where=[extra_str]).values('fileid', 'dispatchdblock', 'scope', 'destinationdblock'))
if len(files_ft) > 0:
for f in files_ft:
files_ft_dict[f['fileid']] = f
for f in files:
f['fsizemb'] = "%0.2f" % (f['fsize'] / 1000000.)
ruciolink_base = 'https://rucio-ui.cern.ch/did?scope='
f['ruciolink'] = ''
if f['fileid'] in files_ft_dict:
name_param = ''
if len(files_ft_dict[f['fileid']]['dispatchdblock']) > 0:
name_param = 'dispatchdblock'
elif len(files_ft_dict[f['fileid']]['destinationdblock']) > 0:
name_param = 'destinationdblock'
if len(name_param) > 0:
if files_ft_dict[f['fileid']][name_param].startswith(files_ft_dict[f['fileid']]['scope']):
ruciolink_base += files_ft_dict[f['fileid']]['scope']
else:
ruciolink_base += files_ft_dict[f['fileid']][name_param].split('.')[0]
f['ruciolink'] = ruciolink_base + '&name=' + files_ft_dict[f['fileid']][name_param]
f['creationdatecut'] = f['creationdate'].strftime('%Y-%m-%d')
f['creationdate'] = f['creationdate'].strftime(defaultDatetimeFormat)
dump = json.dumps(files, cls=DateEncoder)
return HttpResponse(dump, content_type='application/json')
@login_customrequired
def workQueues(request):
valid, response = initRequest(request)
data = getCacheEntry(request, "workQueues")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('workQueues.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
if not valid: return response
setupView(request, hours=180 * 24, limit=9999999)
query = {}
for param in request.session['requestParams']:
for field in JediWorkQueue._meta.get_fields():
if param == field.name:
query[param] = request.session['requestParams'][param]
queues = []
queues.extend(JediWorkQueue.objects.filter(**query).order_by('queue_type', 'queue_order').values())
del request.session['TFIRST']
del request.session['TLAST']
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'queues': queues,
'xurl': extensibleURL(request),
'built': datetime.now().strftime("%H:%M:%S"),
}
response = render_to_response('workQueues.html', data, content_type='text/html')
setCacheEntry(request, "workQueues", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
return HttpResponse(json.dumps(queues), content_type='application/json')
def stateNotUpdated(request, state='transferring', hoursSinceUpdate=36, values=standard_fields, count=False,
wildCardExtension='(1=1)'):
valid, response = initRequest(request)
if not valid:
return response
query = setupView(request, opmode='notime', limit=99999999)
pq_clouds = get_pq_clouds()
if 'jobstatus' in request.session['requestParams']:
state = request.session['requestParams']['jobstatus']
if 'transferringnotupdated' in request.session['requestParams']:
hoursSinceUpdate = int(request.session['requestParams']['transferringnotupdated'])
if 'statenotupdated' in request.session['requestParams']:
hoursSinceUpdate = int(request.session['requestParams']['statenotupdated'])
moddate = timezone.now() - timedelta(hours=hoursSinceUpdate)
moddate = moddate.strftime(defaultDatetimeFormat)
mindate = timezone.now() - timedelta(hours=24 * 30)
mindate = mindate.strftime(defaultDatetimeFormat)
query['statechangetime__lte'] = moddate
# query['statechangetime__gte'] = mindate
query['jobstatus'] = state
if count:
jobs = []
jobs.extend(
Jobsactive4.objects.filter(**query).extra(where=[wildCardExtension]).values('cloud', 'computingsite',
'jobstatus').annotate(
Count('jobstatus')))
jobs.extend(
Jobsdefined4.objects.filter(**query).extra(where=[wildCardExtension]).values('cloud', 'computingsite',
'jobstatus').annotate(
Count('jobstatus')))
jobs.extend(
Jobswaiting4.objects.filter(**query).extra(where=[wildCardExtension]).values('cloud', 'computingsite',
'jobstatus').annotate(
Count('jobstatus')))
ncount = 0
perCloud = {}
perRCloud = {}
for cloud in cloudList:
perCloud[cloud] = 0
perRCloud[cloud] = 0
for job in jobs:
site = job['computingsite']
if site in pq_clouds:
cloud = pq_clouds[site]
if not cloud in perCloud:
perCloud[cloud] = 0
perCloud[cloud] += job['jobstatus__count']
cloud = job['cloud']
if not cloud in perRCloud:
perRCloud[cloud] = 0
perRCloud[cloud] += job['jobstatus__count']
ncount += job['jobstatus__count']
perCloudl = []
for c in perCloud:
pcd = {'name': c, 'count': perCloud[c]}
perCloudl.append(pcd)
perCloudl = sorted(perCloudl, key=lambda x: x['name'])
perRCloudl = []
for c in perRCloud:
pcd = {'name': c, 'count': perRCloud[c]}
perRCloudl.append(pcd)
perRCloudl = sorted(perRCloudl, key=lambda x: x['name'])
return ncount, perCloudl, perRCloudl
else:
jobs = []
jobs.extend(Jobsactive4.objects.filter(**query).extra(where=[wildCardExtension]).values(*values))
jobs.extend(Jobsdefined4.objects.filter(**query).extra(where=[wildCardExtension]).values(*values))
jobs.extend(Jobswaiting4.objects.filter(**query).extra(where=[wildCardExtension]).values(*values))
return jobs
def taskNotUpdated(request, query, state='submitted', hoursSinceUpdate=36, values=[], count=False,
wildCardExtension='(1=1)'):
valid, response = initRequest(request)
if not valid: return response
# query = setupView(request, opmode='notime', limit=99999999)
if 'status' in request.session['requestParams']: state = request.session['requestParams']['status']
if 'statenotupdated' in request.session['requestParams']: hoursSinceUpdate = int(
request.session['requestParams']['statenotupdated'])
moddate = timezone.now() - timedelta(hours=hoursSinceUpdate)
moddate = moddate.strftime(defaultDatetimeFormat)
mindate = timezone.now() - timedelta(hours=24 * 30)
mindate = mindate.strftime(defaultDatetimeFormat)
query['statechangetime__lte'] = moddate
# query['statechangetime__gte'] = mindate
query['status'] = state
job = ''
if count:
tasks = JediTasks.objects.filter(**query).extra(where=[wildCardExtension]).values('name', 'status').annotate(
Count('status'))
statecounts = {}
for s in taskstatelist:
statecounts[s] = {}
statecounts[s]['count'] = 0
statecounts[s]['name'] = s
ncount = 0
for task in tasks:
state = task['status']
statecounts[state]['count'] += task['status__count']
ncount += job['status__count']
return ncount, statecounts
else:
tasks = JediTasks.objects.filter(**query).extra(where=[wildCardExtension]).values()
return tasks
def buildGoogleFlowDiagram(request, jobs=[], tasks=[]):
## set up google flow diagram
if 'requestParams' not in request.session or 'flow' not in request.session['requestParams']: return None
flowstruct = {}
if len(jobs) > 0:
flowstruct['maxweight'] = len(jobs)
flowrows = buildGoogleJobFlow(jobs)
elif len(tasks) > 0:
flowstruct['maxweight'] = len(tasks)
flowrows = buildGoogleTaskFlow(request, tasks)
else:
return None
flowstruct['columns'] = [['string', 'From'], ['string', 'To'], ['number', 'Weight']]
flowstruct['rows'] = flowrows[:3000]
return flowstruct
def buildGoogleJobFlow(jobs):
cloudd = {}
mcpcloudd = {}
mcpshownd = {}
errd = {}
errshownd = {}
sited = {}
statd = {}
errcountd = {}
sitecountd = {}
siteshownd = {}
ptyped = {}
ptypecountd = {}
ptypeshownd = {}
for job in jobs:
errinfo = errorInfo(job, nchars=40, mode='string')
jobstatus = job['jobstatus']
for js in ('finished', 'holding', 'merging', 'running', 'cancelled', 'transferring', 'starting'):
if jobstatus == js: errinfo = js
if errinfo not in errcountd: errcountd[errinfo] = 0
errcountd[errinfo] += 1
cloud = job['homecloud']
mcpcloud = job['cloud']
ptype = job['processingtype']
if ptype not in ptypecountd: ptypecountd[ptype] = 0
ptypecountd[ptype] += 1
site = job['computingsite']
if site not in sitecountd: sitecountd[site] = 0
sitecountd[site] += 1
if cloud not in cloudd: cloudd[cloud] = {}
if site not in cloudd[cloud]: cloudd[cloud][site] = 0
cloudd[cloud][site] += 1
if mcpcloud not in mcpcloudd: mcpcloudd[mcpcloud] = {}
if cloud not in mcpcloudd[mcpcloud]: mcpcloudd[mcpcloud][cloud] = 0
mcpcloudd[mcpcloud][cloud] += 1
if jobstatus not in errd: errd[jobstatus] = {}
if errinfo not in errd[jobstatus]: errd[jobstatus][errinfo] = 0
errd[jobstatus][errinfo] += 1
if site not in sited: sited[site] = {}
if errinfo not in sited[site]: sited[site][errinfo] = 0
sited[site][errinfo] += 1
if jobstatus not in statd: statd[jobstatus] = {}
if errinfo not in statd[jobstatus]: statd[jobstatus][errinfo] = 0
statd[jobstatus][errinfo] += 1
if ptype not in ptyped: ptyped[ptype] = {}
if errinfo not in ptyped[ptype]: ptyped[ptype][errinfo] = 0
ptyped[ptype][errinfo] += 1
flowrows = []
for mcpcloud in mcpcloudd:
for cloud in mcpcloudd[mcpcloud]:
n = mcpcloudd[mcpcloud][cloud]
if float(n) / len(jobs) > 0.0:
mcpshownd[mcpcloud] = 1
flowrows.append(["%s MCP" % mcpcloud, cloud, n])
othersited = {}
othersiteErrd = {}
for cloud in cloudd:
if cloud not in mcpshownd: continue
for e in cloudd[cloud]:
n = cloudd[cloud][e]
if float(sitecountd[e]) / len(jobs) > .01:
siteshownd[e] = 1
flowrows.append([cloud, e, n])
else:
flowrows.append([cloud, 'Other sites', n])
othersited[e] = n
# for jobstatus in errd:
# for errinfo in errd[jobstatus]:
# flowrows.append( [ errinfo, jobstatus, errd[jobstatus][errinfo] ] )
for e in errcountd:
if float(errcountd[e]) / len(jobs) > .01:
errshownd[e] = 1
for site in sited:
nother = 0
for e in sited[site]:
n = sited[site][e]
if site in siteshownd:
sitename = site
else:
sitename = "Other sites"
if e in errshownd:
errname = e
else:
errname = 'Other errors'
flowrows.append([sitename, errname, n])
if errname not in othersiteErrd: othersiteErrd[errname] = 0
othersiteErrd[errname] += n
# for e in othersiteErrd:
# if e in errshownd:
# flowrows.append( [ 'Other sites', e, othersiteErrd[e] ] )
for ptype in ptyped:
if float(ptypecountd[ptype]) / len(jobs) > .05:
ptypeshownd[ptype] = 1
ptname = ptype
else:
ptname = "Other processing types"
for e in ptyped[ptype]:
n = ptyped[ptype][e]
if e in errshownd:
flowrows.append([e, ptname, n])
else:
flowrows.append(['Other errors', ptname, n])
return flowrows
def buildGoogleTaskFlow(request, tasks):
analysis = False
if 'requestParams' in request.session:
analysis = 'tasktype' in request.session['requestParams'] and request.session['requestParams'][
'tasktype'].startswith('anal')
ptyped = {}
reqd = {}
statd = {}
substatd = {}
trfd = {}
filestatd = {}
cloudd = {}
reqsized = {}
reqokd = {}
## count the reqid's. Use only the biggest (in file count) if too many.
for task in tasks:
if not analysis and 'deftreqid' not in task: continue
req = int(task['reqid'])
dsinfo = task['dsinfo']
nfiles = dsinfo['nfiles']
if req not in reqsized: reqsized[req] = 0
reqsized[req] += nfiles
## Veto requests that are all done etc.
if task['superstatus'] != 'done': reqokd[req] = 1
if not analysis:
for req in reqsized:
# de-prioritize requests not specifically OK'd for inclusion
if req not in reqokd: reqsized[req] = 0
nmaxreq = 10
if len(reqsized) > nmaxreq:
reqkeys = reqsized.keys()
reqsortl = sorted(reqkeys, key=reqsized.__getitem__, reverse=True)
reqsortl = reqsortl[:nmaxreq - 1]
else:
reqsortl = reqsized.keys()
for task in tasks:
ptype = task['processingtype']
# if 'jedireqid' not in task: continue
req = int(task['reqid'])
if not analysis and req not in reqsortl: continue
stat = task['superstatus']
substat = task['status']
# trf = task['transpath']
trf = task['taskname']
cloud = task['cloud']
if cloud == '': cloud = 'No cloud assigned'
dsinfo = task['dsinfo']
nfailed = dsinfo['nfilesfailed']
nfinished = dsinfo['nfilesfinished']
nfiles = dsinfo['nfiles']
npending = nfiles - nfailed - nfinished
if ptype not in ptyped: ptyped[ptype] = {}
if req not in ptyped[ptype]: ptyped[ptype][req] = 0
ptyped[ptype][req] += nfiles
if req not in reqd: reqd[req] = {}
if stat not in reqd[req]: reqd[req][stat] = 0
reqd[req][stat] += nfiles
if trf not in trfd: trfd[trf] = {}
if stat not in trfd[trf]: trfd[trf][stat] = 0
trfd[trf][stat] += nfiles
if stat not in statd: statd[stat] = {}
if substat not in statd[stat]: statd[stat][substat] = 0
statd[stat][substat] += nfiles
if substat not in substatd: substatd[substat] = {}
if 'finished' not in substatd[substat]:
for filestat in ('finished', 'failed', 'pending'):
substatd[substat][filestat] = 0
substatd[substat]['finished'] += nfinished
substatd[substat]['failed'] += nfailed
substatd[substat]['pending'] += npending
if cloud not in cloudd: cloudd[cloud] = {}
if 'finished' not in cloudd[cloud]:
for filestat in ('finished', 'failed', 'pending'):
cloudd[cloud][filestat] = 0
cloudd[cloud]['finished'] += nfinished
cloudd[cloud]['failed'] += nfailed
cloudd[cloud]['pending'] += npending
flowrows = []
if analysis:
## Don't include request, task for analysis
for trf in trfd:
for stat in trfd[trf]:
n = trfd[trf][stat]
flowrows.append([trf, 'Task %s' % stat, n])
else:
for ptype in ptyped:
for req in ptyped[ptype]:
n = ptyped[ptype][req]
flowrows.append([ptype, 'Request %s' % req, n])
for req in reqd:
for stat in reqd[req]:
n = reqd[req][stat]
flowrows.append(['Request %s' % req, 'Task %s' % stat, n])
for stat in statd:
for substat in statd[stat]:
n = statd[stat][substat]
flowrows.append(['Task %s' % stat, 'Substatus %s' % substat, n])
for substat in substatd:
for filestat in substatd[substat]:
if filestat not in substatd[substat]: continue
n = substatd[substat][filestat]
flowrows.append(['Substatus %s' % substat, 'File status %s' % filestat, n])
for cloud in cloudd:
for filestat in cloudd[cloud]:
if filestat not in cloudd[cloud]: continue
n = cloudd[cloud][filestat]
flowrows.append(['File status %s' % filestat, cloud, n])
return flowrows
def g4exceptions(request):
valid, response = initRequest(request)
setupView(request, hours=365 * 24, limit=999999999)
if 'hours' in request.session['requestParams']:
hours = int(request.session['requestParams']['hours'])
else:
hours = 3
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, wildCardExt=True)
query['jobstatus__in'] = ['failed', 'holding']
query['exeerrorcode'] = 68
query['exeerrordiag__icontains'] = 'G4 exception'
values = 'pandaid', 'atlasrelease', 'exeerrorcode', 'exeerrordiag', 'jobstatus', 'transformation'
jobs = []
jobs.extend(
Jobsactive4.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(
*values))
jobs.extend(
Jobsarchived4.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(
*values))
if (((datetime.now() - datetime.strptime(query['modificationtime__castdate__range'][0], "%Y-%m-%d %H:%M:%S")).days > 1) or \
((datetime.now() - datetime.strptime(query['modificationtime__castdate__range'][1],
"%Y-%m-%d %H:%M:%S")).days > 1)):
jobs.extend(
Jobsarchived.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(
*values))
if 'amitag' in request.session['requestParams']:
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
transactionKey = random.randrange(1000000)
# connection.enter_transaction_management()
new_cur = connection.cursor()
for job in jobs:
new_cur.execute("INSERT INTO %s(ID,TRANSACTIONKEY) VALUES (%i,%i)" % (
tmpTableName, job['pandaid'], transactionKey)) # Backend dependable
# connection.commit()
new_cur.execute(
"SELECT JOBPARAMETERS, PANDAID FROM ATLAS_PANDA.JOBPARAMSTABLE WHERE PANDAID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (
tmpTableName, transactionKey))
mrecs = dictfetchall(new_cur)
# connection.commit()
# connection.leave_transaction_management()
jobsToRemove = set()
for rec in mrecs:
acceptJob = True
parameters = rec['JOBPARAMETERS'].read()
tagName = "--AMITag"
startPos = parameters.find(tagName)
if startPos == -1:
acceptJob = False
endPos = parameters.find(" ", startPos)
AMITag = parameters[startPos + len(tagName) + 1:endPos]
if AMITag != request.session['requestParams']['amitag']:
acceptJob = False
if acceptJob == False:
jobsToRemove.add(rec['PANDAID'])
jobs = filter(lambda x: not x['pandaid'] in jobsToRemove, jobs)
jobs = addJobMetadata(jobs)
errorFrequency = {}
errorJobs = {}
for job in jobs:
if (job['metastruct']['executor'][0]['logfileReport']['countSummary']['FATAL'] > 0):
message = job['metastruct']['executor'][0]['logfileReport']['details']['FATAL'][0]['message']
exceptMess = message[message.find("G4Exception :") + 14: message.find("issued by :") - 1]
if exceptMess not in errorFrequency:
errorFrequency[exceptMess] = 1
else:
errorFrequency[exceptMess] += 1
if exceptMess not in errorJobs:
errorJobs[exceptMess] = []
errorJobs[exceptMess].append(job['pandaid'])
else:
errorJobs[exceptMess].append(job['pandaid'])
resp = {'errorFrequency': errorFrequency, 'errorJobs': errorJobs}
del request.session['TFIRST']
del request.session['TLAST']
return HttpResponse(json.dumps(resp), content_type='application/json')
def initSelfMonitor(request):
import psutil
if 'hostname' in request.session:
server = request.session['hostname']
else: server = '-'
if 'HTTP_X_FORWARDED_FOR' in request.META:
remote = request.META['HTTP_X_FORWARDED_FOR']
else:
remote = request.META['REMOTE_ADDR']
urlProto = request.META['wsgi.url_scheme']
if 'HTTP_X_FORWARDED_PROTO' in request.META:
urlProto = request.META['HTTP_X_FORWARDED_PROTO']
urlProto = str(urlProto) + "://"
try:
urls = urlProto + request.META['SERVER_NAME'] + request.META['REQUEST_URI']
except:
if 'SERVER_PORT' in request.META:
port =':' + request.META['SERVER_PORT']
else: port = ''
if 'PATH_INFO' in request.META:
path = request.META['PATH_INFO']
else: path=''
if 'QUERY_STRING' in request.META and request.META['QUERY_STRING']!="":
qstring= '?'+request.META['QUERY_STRING']
else: qstring =''
urls = urlProto + request.META['SERVER_NAME'] + port + path + qstring
print (urls)
qtime = str(timezone.now())
load = psutil.cpu_percent(interval=1)
mem = psutil.virtual_memory().percent
if 'HTTP_REFERER' in request.META:
refferer = request.META['HTTP_REFERER']
else:
refferer = '-'
if 'HTTP_USER_AGENT' in request.META:
useragent = request.META['HTTP_USER_AGENT']
else:
useragent = '-'
request.session["qtime"] = qtime
request.session["load"] = load
request.session["remote"] = remote
request.session["mem"] = mem
request.session["urls"] = urls
request.session["refferer"] = refferer
request.session["useragent"] = useragent
from PIL import Image
import urllib.request
import io
whitelist = ["triumf.ca", "cern.ch"]
def image(request):
if ('url' in request.GET):
param = request.build_absolute_uri()
url = param[param.index("=")+1:len(param)]
for urlw in whitelist:
pattern = "^((http[s]?):\/)?\/?([^:\/\s]+"+urlw+")"
urlConfim = re.findall(pattern,url)
if (len(urlConfim)>0):
break
if (len(urlConfim)==0):
return redirect('/static/images/22802286-denied-red-grunge-stamp.png')
try:
data = getCacheEntry(request, "imagewrap")
if data is not None:
data = base64.b64decode(data)
response = HttpResponse(data, content_type='image/jpg')
patch_response_headers(response, cache_timeout=10 * 60)
return response
else:
with urllib.request.urlopen(url) as fd:
image_file = io.BytesIO(fd.read())
im = Image.open(image_file)
rgb_im = im.convert('RGB')
response = HttpResponse(content_type='image/jpg')
rgb_im.save(response, "JPEG")
byte_io = BytesIO()
rgb_im.save(byte_io, 'JPEG')
data = base64.b64encode(byte_io.getvalue())
setCacheEntry(request, "imagewrap", data, 60 * 10)
patch_response_headers(response, cache_timeout=10 * 60)
return response
except Exception as ex:
print(ex)
return redirect('/static/images/404-not-found-site.gif')
else:
return redirect('/static/images/error_z0my4n.png')
def grafana_image(request):
if ('url' in request.GET):
param = request.build_absolute_uri()
url = param[param.index("=")+1:len(param)]
for urlw in whitelist:
pattern = "^((http[s]?):\/)?\/?([^:\/\s]+"+urlw+")"
urlConfim = re.findall(pattern,url)
if (len(urlConfim) > 0):
break
if (len(urlConfim) == 0):
return redirect('/static/images/22802286-denied-red-grunge-stamp.png')
try:
data = getCacheEntry(request, "grafanaimagewrap")
if data is not None:
data = base64.b64decode(data)
response = HttpResponse(data, content_type='image/jpg')
patch_response_headers(response, cache_timeout=10 * 60)
return response
if 'Authorization' in GRAFANA:
grafana_token = GRAFANA['Authorization']
import requests
headers = {"Authorization": grafana_token}
r = requests.get(url, headers=headers)
r.raise_for_status()
with io.BytesIO(r.content) as f:
with Image.open(f) as img:
rgb_im = img.convert('RGB')
response = HttpResponse(content_type='image/jpg')
rgb_im.save(response, "JPEG")
byte_io = BytesIO()
rgb_im.save(byte_io, 'JPEG')
data = base64.b64encode(byte_io.getvalue())
setCacheEntry(request, "grafanaimagewrap", data, 60 * 60)
return response
except Exception as ex:
return redirect('/static/images/404-not-found-site.gif')
else:
return redirect('/static/images/error_z0my4n.png')
def handler500(request):
response = render_to_response('500.html', {},
context_instance=RequestContext(request))
response.status_code = 500
return response
def getBadEventsForTask(request):
if 'jeditaskid' in request.GET:
jeditaskid = int(request.GET['jeditaskid'])
else:
return HttpResponse("Not jeditaskid supplied", content_type='text/html')
mode = 'drop'
if 'mode' in request.GET and request.GET['mode'] == 'nodrop':
mode = 'nodrop'
data = []
cursor = connection.cursor()
plsql = """select DATASETID, ERROR_CODE, RTRIM(XMLAGG(XMLELEMENT(E,DEF_MIN_EVENTID,',').EXTRACT('//text()')
ORDER BY DEF_MIN_EVENTID).GetClobVal(),',') as bb,
RTRIM(XMLAGG(XMLELEMENT(E,PANDAID,',').EXTRACT('//text()') ORDER BY PANDAID).GetClobVal(),',') AS PANDAIDS,
count(*) from
atlas_panda.jedi_events where jeditaskid=%d and attemptnr = 1 group by DATASETID, ERROR_CODE """ % jeditaskid
if mode == 'drop':
plsql = """select DATASETID, ERROR_CODE, RTRIM(XMLAGG(XMLELEMENT(E,DEF_MIN_EVENTID,',').EXTRACT('//text()')
ORDER BY DEF_MIN_EVENTID).GetClobVal(),',') as bb,
RTRIM(XMLAGG(XMLELEMENT(E,PANDAID,',').EXTRACT('//text()') ORDER BY PANDAID).GetClobVal(),',') AS PANDAIDS,
count(*) from
atlas_panda.jedi_events where jeditaskid=%d and attemptnr = 1
and PANDAID IN (SELECT PANDAID FROM ATLAS_PANDA.JEDI_DATASET_CONTENTS where jeditaskid=%d and type in ('input', 'pseudo_input'))
group by DATASETID, ERROR_CODE """ % (jeditaskid, jeditaskid)
cursor.execute(plsql)
evtable = cursor.fetchall()
errorCodes = get_job_error_desc()
for row in evtable:
dataitem = {}
dataitem['DATASETID'] = row[0]
dataitem['ERROR_CODE'] = (errorCodes['piloterrorcode'][row[1]] + " (" +str(row[1])+ ")") if row[1] in errorCodes['piloterrorcode'] else row[1]
dataitem['EVENTS'] = list(set( str(row[2].read()).split(',') )) if not row[2] is None else None
dataitem['PANDAIDS'] = list(set( str(row[3].read()).split(',') )) if not row[3] is None else None
if dataitem['EVENTS']: dataitem['EVENTS'] = sorted(dataitem['EVENTS'])
dataitem['COUNT'] = row[4]
data.append(dataitem)
cursor.close()
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
def getEventsChunks(request):
if 'jeditaskid' in request.GET:
jeditaskid = int(request.GET['jeditaskid'])
else:
return HttpResponse("Not jeditaskid supplied", content_type='text/html')
# We reconstruct here jobsets retries
sqlRequest = """SELECT OLDPANDAID, NEWPANDAID, MAX(LEV) as LEV, MIN(PTH) as PTH FROM (
SELECT OLDPANDAID, NEWPANDAID, LEVEL as LEV, CONNECT_BY_ISLEAF as IL, SYS_CONNECT_BY_PATH(OLDPANDAID, ',') PTH FROM (
SELECT OLDPANDAID, NEWPANDAID FROm ATLAS_PANDA.JEDI_JOB_RETRY_HISTORY WHERE JEDITASKID=%s and RELATIONTYPE='jobset_retry')t1 CONNECT BY OLDPANDAID=PRIOR NEWPANDAID
)t2 GROUP BY OLDPANDAID, NEWPANDAID;""" % str(jeditaskid)
cur = connection.cursor()
cur.execute(sqlRequest)
datasetsChunks = cur.fetchall()
cur.close()
jobsetretries = {}
eventsChunks = []
for datasetsChunk in datasetsChunks:
jobsetretries[datasetsChunk[1]] = datasetsChunk[3].split(',')[1:]
eventsChunksValues = 'lfn', 'attemptnr', 'startevent', 'endevent', 'pandaid', 'status', 'jobsetid', 'failedattempt', 'maxfailure', 'maxattempt'
queryChunks = {'jeditaskid': jeditaskid, 'startevent__isnull': False, 'type': 'input'}
eventsChunks.extend(
JediDatasetContents.objects.filter(**queryChunks).order_by('attemptnr').reverse().values(*eventsChunksValues))
for eventsChunk in eventsChunks:
if eventsChunk['jobsetid'] in jobsetretries:
eventsChunk['prevAttempts'] = jobsetretries[eventsChunk['jobsetid']]
eventsChunk['attemptnrDS'] = len(jobsetretries[eventsChunk['jobsetid']])
else:
eventsChunk['prevAttempts'] = []
eventsChunk['attemptnrDS'] = 0
return HttpResponse(json.dumps(eventsChunks, cls=DateTimeEncoder), content_type='application/json')
@never_cache
def getJobStatusLog(request, pandaid = None):
"""
A view to asynchronously load job states changes history
:param request:
:param pandaid:
:return: json contained job states changes history
"""
valid, response = initRequest(request)
if not valid:
return response
try:
pandaid = int(pandaid)
except:
HttpResponse(status=404, content_type='text/html')
squery = {}
squery['pandaid'] = pandaid
statusLog = []
statusLog.extend(JobsStatuslog.objects.filter(**squery).order_by('modiftime_extended').values())
mtimeparam = 'modiftime_extended'
if len(statusLog) > 0 and None in set([sl['modiftime_extended'] for sl in statusLog]):
mtimeparam = 'modificationtime'
statusLog = sorted(statusLog, key=lambda x: x[mtimeparam])
if len(statusLog) > 0:
for c, item in enumerate(statusLog):
if c < len(statusLog)-1:
if statusLog[c+1][mtimeparam] is not None and statusLog[c][mtimeparam] is not None:
duration = statusLog[c+1][mtimeparam] - statusLog[c][mtimeparam]
ndays = duration.days
strduration = str(timedelta(seconds=duration.seconds))
statusLog[c]['duration'] = "%s:%s" % (ndays, strduration)
else:
statusLog[c]['duration'] = "---"
else:
statusLog[c]['duration'] = "---"
for sl in statusLog:
sl['modiftime_str'] = sl[mtimeparam].strftime(defaultDatetimeFormat) if sl[mtimeparam] is not None else "---"
if is_json_request(request):
response = JsonResponse(statusLog, safe=False)
else:
response = render_to_response('jobStatusLog.html', {'statusLog': statusLog}, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
@never_cache
def getTaskStatusLog(request, jeditaskid=None):
"""
A view to asynchronously load task states changes history
:param request:
:param jeditaskid:
:return: json contained task states changes history
"""
valid, response = initRequest(request)
if not valid: return response
try:
jeditaskid = int(jeditaskid)
except:
HttpResponse(status=404, content_type='text/html')
mtimeparam = 'modificationtime'
squery = {}
squery['jeditaskid'] = jeditaskid
statusLog = []
statusLog.extend(TasksStatusLog.objects.filter(**squery).order_by(mtimeparam).values())
if len(statusLog) > 0:
for c, item in enumerate(statusLog):
if c < len(statusLog)-1:
if statusLog[c+1][mtimeparam] is not None and statusLog[c][mtimeparam] is not None:
duration = statusLog[c+1][mtimeparam] - statusLog[c][mtimeparam]
ndays = duration.days
strduration = str(timedelta(seconds=duration.seconds))
statusLog[c]['duration'] = "%s:%s" % (ndays, strduration)
else:
statusLog[c]['duration'] = "---"
else:
statusLog[c]['duration'] = "---"
for sl in statusLog:
sl['modiftime_str'] = sl[mtimeparam].strftime(defaultDatetimeFormat) if sl[mtimeparam] is not None else "---"
if is_json_request(request):
response = HttpResponse(json.dumps(statusLog, cls=DateEncoder), content_type='application/json')
else:
response = render_to_response('taskStatusLog.html', {'statusLog': statusLog}, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
@never_cache
def getTaskLogs(request, jeditaskid=None):
"""
A view to asynchronously load task logs from ElasticSearch storage
:param request:
:param jeditaskid:
:return: json
"""
valid, response = initRequest(request)
if not valid: return response
try:
jeditaskid = int(jeditaskid)
except:
HttpResponse(status=404, content_type='text/html')
tasklogs = get_logs_by_taskid(jeditaskid)
response = HttpResponse(json.dumps(tasklogs, cls=DateEncoder), content_type='application/json')
# if is_json_request(request):
# response = HttpResponse(json.dumps(tasklogs, cls=DateEncoder), content_type='application/json')
# else:
# HttpResponse(status=404, content_type='text/html')
return response
### API ###
def getSites(request):
if request.is_ajax():
try:
q = request.GET.get('term', '')
sites = Schedconfig.objects.filter(siteid__icontains=q).exclude(cloud='CMS').values("siteid")
results = []
for site in sites:
results.append(site['siteid'])
data = json.dumps(results)
except:
data = 'fail'
else:
data = 'fail'
mimetype = 'application/json'
return HttpResponse(data, mimetype)
@never_cache
def get_hc_tests(request):
"""
API for getting list of HammerCloud Tests
:param request:
:return: JSON response
"""
valid, response = initRequest(request)
if not valid:
return response
jobs = []
tests = []
panda_queues = []
pilot_timings_names = ['timegetjob', 'timestagein', 'timepayload', 'timestageout', 'timetotal_setup']
error_fields = [
'brokerageerrorcode', 'brokerageerrordiag',
'ddmerrorcode', 'ddmerrordiag',
'exeerrorcode', 'exeerrordiag',
'jobdispatchererrorcode', 'jobdispatchererrordiag',
'piloterrorcode', 'piloterrordiag',
'superrorcode', 'superrordiag',
'taskbuffererrorcode', 'taskbuffererrordiag',
'transexitcode',
]
fields = [
'pandaid',
'produsername',
'prodsourcelabel',
'processingtype',
'transformation',
'atlasrelease',
'proddblock',
'destinationdblock',
'destinationse',
'homepackage',
'inputfileproject',
'inputfiletype',
'jobname',
'cloud',
'nucleus',
'computingsite',
'computingelement',
'gshare',
'schedulerid',
'pilotid',
'jobstatus',
'creationtime',
'starttime',
'endtime',
'statechangetime',
'modificationtime',
'actualcorecount',
'minramcount',
'maxvmem',
'maxpss',
'maxrss',
'cpuconsumptiontime',
'nevents',
'hs06sec',
'noutputdatafiles',
'resourcetype',
'eventservice',
'transformation',
'modificationhost',
'batchid'
]
jvalues = ['pilottiming',]
jvalues.extend(fields)
jvalues.extend(error_fields)
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, wildCardExt=True)
query['produsername'] = 'gangarbt'
query['cloud'] = 'RU'
excluded_time_query = copy.deepcopy(query)
if 'modificationtime__castdate__range' in excluded_time_query:
del excluded_time_query['modificationtime__castdate__range']
# we change time param from modificationtime to :
timeparamname = 'statechangetime'
if 'modificationtime__castdate__range' in query:
query[timeparamname + '__castdate__range'] = query['modificationtime__castdate__range']
del query['modificationtime__castdate__range']
is_archive_only = False
is_archive = False
timerange = [parse_datetime(mt) for mt in query[timeparamname + '__castdate__range']]
if timerange[0] < datetime.utcnow()-timedelta(days=4) and timerange[1] < datetime.utcnow()-timedelta(days=4):
is_archive_only = True
if timerange[0] < datetime.utcnow() - timedelta(days=3):
is_archive = True
if not is_archive_only:
jobs.extend(Jobsdefined4.objects.filter(**excluded_time_query).extra(where=[wildCardExtension]).values(*jvalues))
jobs.extend(Jobsactive4.objects.filter(**excluded_time_query).extra(where=[wildCardExtension]).values(*jvalues))
jobs.extend(Jobswaiting4.objects.filter(**excluded_time_query).extra(where=[wildCardExtension]).values(*jvalues))
jobs.extend(Jobsarchived4.objects.filter(**query).extra(where=[wildCardExtension]).values(*jvalues))
if is_archive_only or is_archive:
jobs.extend(Jobsarchived.objects.filter(**query).extra(where=[wildCardExtension]).values(*jvalues))
_logger.info('Got jobs: {}'.format(time.time() - request.session['req_init_time']))
panda_queues_info = get_panda_queues()
_logger.info('Got PQ info: {}'.format(time.time() - request.session['req_init_time']))
# getting input file info for jobs
try:
jobs = get_file_info(jobs, type='input', is_archive=is_archive)
except:
_logger.warning('Failed to get info of input files')
_logger.info('Got input file info for jobs: {}'.format(time.time() - request.session['req_init_time']))
errorCodes = get_job_error_desc()
for job in jobs:
test = {}
test['errorinfo'] = errorInfo(job, errorCodes=errorCodes)
try:
hctestid = job['destinationdblock'].split('.')[2][2:]
except:
hctestid = None
test['hctestid'] = hctestid
try:
pilot_timings = [int(pti) for pti in job['pilottiming'].split('|')]
except:
pilot_timings = [0] * 5
test.update(dict(zip(pilot_timings_names, pilot_timings)))
test['inputfilename'] = job['inputfilename'] if 'inputfilename' in job else None
test['inputfilesizemb'] = round(job['inputfilesize'] / 1000000., 2) if 'inputfilesize' in job and isinstance(job['inputfilesize'], int) else None
wallclocktime = get_job_walltime(job)
queuetime = get_job_queuetime(job)
if wallclocktime is not None:
test['wallclocktime'] = wallclocktime
if wallclocktime > 0:
test['cpuefficiency'] = round(job['cpuconsumptiontime']/test['wallclocktime'], 3)
else:
test['cpuefficiency'] = 0
else:
test['wallclocktime'] = 0
test['cpuefficiency'] = 0
if queuetime is not None:
test['queuetime'] = queuetime
else:
test['queuetime'] = 0
for f in fields:
test[f] = job[f]
if 'computingsite' in job and job['computingsite'] in panda_queues_info:
for f in ('siteid', 'gocname', 'status', 'cloud', 'tier', 'corepower'):
if f in panda_queues_info[job['computingsite']]:
if f == 'gocname':
test['site'] = panda_queues_info[job['computingsite']][f]
else:
test[f] = panda_queues_info[job['computingsite']][f]
tests.append(test)
data = {'tests': tests}
response = HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
return response
@csrf_exempt
def getPayloadLog(request):
"""
A view to asynchronously load pilot logs from ElasticSearch storage by pandaid or taskid
:param request:
:param id:
:return: json
"""
valid, response = initRequest(request)
connection = create_esatlas_connection()
if not valid: return response
mode = 'pandaid'
log_content = {}
if request.POST and "pandaid" in request.POST:
try:
id = int(request.POST['pandaid'])
start_var = int(request.POST['start'])
length_var = int(request.POST['length'])
draw_var = int(request.POST['draw'])
sort = request.POST['order[0][dir]']
search_string = request.POST['search[value]']
except:
HttpResponse(status=404, content_type='text/html')
else:
HttpResponse(status=404, content_type='text/html')
payloadlog, job_running_flag, total = get_payloadlog(id, connection, start=start_var, length=length_var, mode=mode,
sort=sort, search_string=search_string)
log_content['payloadlog'] = payloadlog
log_content['flag'] = job_running_flag
log_content['recordsTotal'] = total
log_content['recordsFiltered'] = total
log_content['draw'] = draw_var
response = HttpResponse(json.dumps(log_content, cls=DateEncoder), content_type='application/json')
return response
|
__init__.py
|
"""
The ``python_function`` model flavor serves as a default model interface for MLflow Python models.
Any MLflow Python model is expected to be loadable as a ``python_function`` model.
In addition, the ``mlflow.pyfunc`` module defines a generic :ref:`filesystem format
<pyfunc-filesystem-format>` for Python models and provides utilities for saving to and loading from
this format. The format is self contained in the sense that it includes all necessary information
for anyone to load it and use it. Dependencies are either stored directly with the model or
referenced via a Conda environment.
The ``mlflow.pyfunc`` module also defines utilities for creating custom ``pyfunc`` models
using frameworks and inference logic that may not be natively included in MLflow. See
:ref:`pyfunc-create-custom`.
.. _pyfunc-inference-api:
*************
Inference API
*************
Python function models are loaded as an instance of :py:class:`PyFuncModel
<mlflow.pyfunc.PyFuncModel>`, which is an MLflow wrapper around the model implementation and model
metadata (MLmodel file). You can score the model by calling the :py:func:`predict()
<mlflow.pyfunc.PyFuncModel.predict>` method, which has the following signature::
predict(
model_input: [pandas.DataFrame, numpy.ndarray, scipy.sparse.(csc.csc_matrix | csr.csr_matrix),
List[Any], Dict[str, Any]]
) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]
All PyFunc models will support `pandas.DataFrame` as input and DL PyFunc models will also support
tensor inputs in the form of Dict[str, numpy.ndarray] (named tensors) and `numpy.ndarrays`
(unnamed tensors).
.. _pyfunc-filesystem-format:
*****************
Filesystem format
*****************
The Pyfunc format is defined as a directory structure containing all required data, code, and
configuration::
./dst-path/
./MLmodel: configuration
<code>: code packaged with the model (specified in the MLmodel file)
<data>: data packaged with the model (specified in the MLmodel file)
<env>: Conda environment definition (specified in the MLmodel file)
The directory structure may contain additional contents that can be referenced by the ``MLmodel``
configuration.
.. _pyfunc-model-config:
MLModel configuration
#####################
A Python model contains an ``MLmodel`` file in **python_function** format in its root with the
following parameters:
- loader_module [required]:
Python module that can load the model. Expected as module identifier
e.g. ``mlflow.sklearn``, it will be imported using ``importlib.import_module``.
The imported module must contain a function with the following signature::
_load_pyfunc(path: string) -> <pyfunc model implementation>
The path argument is specified by the ``data`` parameter and may refer to a file or
directory. The model implementation is expected to be an object with a
``predict`` method with the following signature::
predict(
model_input: [pandas.DataFrame, numpy.ndarray,
scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], Dict[str, Any]]
) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]
- code [optional]:
Relative path to a directory containing the code packaged with this model.
All files and directories inside this directory are added to the Python path
prior to importing the model loader.
- data [optional]:
Relative path to a file or directory containing model data.
The path is passed to the model loader.
- env [optional]:
Relative path to an exported Conda environment. If present this environment
should be activated prior to running the model.
- Optionally, any additional parameters necessary for interpreting the serialized model in
``pyfunc`` format.
.. rubric:: Example
::
tree example/sklearn_iris/mlruns/run1/outputs/linear-lr
::
├── MLmodel
├── code
│ ├── sklearn_iris.py
│
├── data
│ └── model.pkl
└── mlflow_env.yml
::
cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel
::
python_function:
code: code
data: data/model.pkl
loader_module: mlflow.sklearn
env: mlflow_env.yml
main: sklearn_iris
.. _pyfunc-create-custom:
******************************
Creating custom Pyfunc models
******************************
MLflow's persistence modules provide convenience functions for creating models with the
``pyfunc`` flavor in a variety of machine learning frameworks (scikit-learn, Keras, Pytorch, and
more); however, they do not cover every use case. For example, you may want to create an MLflow
model with the ``pyfunc`` flavor using a framework that MLflow does not natively support.
Alternatively, you may want to build an MLflow model that executes custom logic when evaluating
queries, such as preprocessing and postprocessing routines. Therefore, ``mlflow.pyfunc``
provides utilities for creating ``pyfunc`` models from arbitrary code and model data.
The :meth:`save_model()` and :meth:`log_model()` methods are designed to support multiple workflows
for creating custom ``pyfunc`` models that incorporate custom inference logic and artifacts
that the logic may require.
An `artifact` is a file or directory, such as a serialized model or a CSV. For example, a
serialized TensorFlow graph is an artifact. An MLflow model directory is also an artifact.
.. _pyfunc-create-custom-workflows:
Workflows
#########
:meth:`save_model()` and :meth:`log_model()` support the following workflows:
1. Programmatically defining a new MLflow model, including its attributes and artifacts.
Given a set of artifact URIs, :meth:`save_model()` and :meth:`log_model()` can
automatically download artifacts from their URIs and create an MLflow model directory.
In this case, you must define a Python class which inherits from :class:`~PythonModel`,
defining ``predict()`` and, optionally, ``load_context()``. An instance of this class is
specified via the ``python_model`` parameter; it is automatically serialized and deserialized
as a Python class, including all of its attributes.
2. Interpreting pre-existing data as an MLflow model.
If you already have a directory containing model data, :meth:`save_model()` and
:meth:`log_model()` can import the data as an MLflow model. The ``data_path`` parameter
specifies the local filesystem path to the directory containing model data.
In this case, you must provide a Python module, called a `loader module`. The
loader module defines a ``_load_pyfunc()`` method that performs the following tasks:
- Load data from the specified ``data_path``. For example, this process may include
deserializing pickled Python objects or models or parsing CSV files.
- Construct and return a pyfunc-compatible model wrapper. As in the first
use case, this wrapper must define a ``predict()`` method that is used to evaluate
queries. ``predict()`` must adhere to the :ref:`pyfunc-inference-api`.
The ``loader_module`` parameter specifies the name of your loader module.
For an example loader module implementation, refer to the `loader module
implementation in mlflow.keras <https://github.com/mlflow/mlflow/blob/
74d75109aaf2975f5026104d6125bb30f4e3f744/mlflow/keras.py#L157-L187>`_.
.. _pyfunc-create-custom-selecting-workflow:
Which workflow is right for my use case?
########################################
We consider the first workflow to be more user-friendly and generally recommend it for the
following reasons:
- It automatically resolves and collects specified model artifacts.
- It automatically serializes and deserializes the ``python_model`` instance and all of
its attributes, reducing the amount of user logic that is required to load the model
- You can create Models using logic that is defined in the ``__main__`` scope. This allows
custom models to be constructed in interactive environments, such as notebooks and the Python
REPL.
You may prefer the second, lower-level workflow for the following reasons:
- Inference logic is always persisted as code, rather than a Python object. This makes logic
easier to inspect and modify later.
- If you have already collected all of your model data in a single location, the second
workflow allows it to be saved in MLflow format directly, without enumerating constituent
artifacts.
"""
import importlib
import signal
import sys
import numpy as np
import os
import pandas
import yaml
from copy import deepcopy
import logging
import tempfile
import threading
import collections
import subprocess
from typing import Any, Union, List, Dict, Iterator, Tuple
import mlflow
import mlflow.pyfunc.model
from mlflow.models import Model, ModelSignature, ModelInputExample
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.utils import _save_example
from mlflow.pyfunc.model import ( # pylint: disable=unused-import
PythonModel,
PythonModelContext,
get_default_conda_env,
)
from mlflow.pyfunc.model import get_default_pip_requirements
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.types import DataType, Schema, TensorSpec
from mlflow.types.utils import clean_tensor_type
from mlflow.utils import PYTHON_VERSION, get_major_minor_py_version
from mlflow.utils.annotations import deprecated
from mlflow.utils.file_utils import _copy_file_or_tree, write_to
from mlflow.utils.model_utils import (
_get_flavor_configuration,
_validate_and_copy_code_paths,
_add_code_from_conf_to_system_path,
)
from mlflow.utils.environment import (
_validate_env_arguments,
_process_pip_requirements,
_process_conda_env,
_CONDA_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
)
from mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS
from mlflow.exceptions import MlflowException
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.protos.databricks_pb2 import (
INVALID_PARAMETER_VALUE,
RESOURCE_ALREADY_EXISTS,
RESOURCE_DOES_NOT_EXIST,
)
from scipy.sparse import csc_matrix, csr_matrix
from mlflow.utils.requirements_utils import (
_check_requirement_satisfied,
_parse_requirements,
)
from mlflow.utils import find_free_port
from mlflow.utils.nfs_on_spark import get_nfs_cache_root_dir
FLAVOR_NAME = "python_function"
MAIN = "loader_module"
CODE = "code"
DATA = "data"
ENV = "env"
PY_VERSION = "python_version"
_logger = logging.getLogger(__name__)
PyFuncInput = Union[pandas.DataFrame, np.ndarray, csc_matrix, csr_matrix, List[Any], Dict[str, Any]]
PyFuncOutput = Union[pandas.DataFrame, pandas.Series, np.ndarray, list]
def add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):
"""
Add a ``pyfunc`` spec to the model configuration.
Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model
flavor out of an existing directory structure. For example, other model flavors can use this to
specify how to use their output as a ``pyfunc``.
NOTE:
All paths are relative to the exported model root directory.
:param model: Existing model.
:param loader_module: The module to be used to load the model.
:param data: Path to the model data.
:param code: Path to the code dependencies.
:param env: Conda environment.
:param req: pip requirements file.
:param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification.
Values must be YAML-serializable.
:return: Updated model configuration.
"""
params = deepcopy(kwargs)
params[MAIN] = loader_module
params[PY_VERSION] = PYTHON_VERSION
if code:
params[CODE] = code
if data:
params[DATA] = data
if env:
params[ENV] = env
return model.add_flavor(FLAVOR_NAME, **params)
def _load_model_env(path):
"""
Get ENV file string from a model configuration stored in Python Function format.
Returned value is a model-relative path to a Conda Environment file,
or None if none was specified at model save time
"""
return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)
def _enforce_mlflow_datatype(name, values: pandas.Series, t: DataType):
"""
Enforce the input column type matches the declared in model input schema.
The following type conversions are allowed:
1. object -> string
2. int -> long (upcast)
3. float -> double (upcast)
4. int -> double (safe conversion)
5. np.datetime64[x] -> datetime (any precision)
6. object -> datetime
Any other type mismatch will raise error.
"""
if values.dtype == object and t not in (DataType.binary, DataType.string):
values = values.infer_objects()
if t == DataType.string and values.dtype == object:
# NB: the object can contain any type and we currently cannot cast to pandas Strings
# due to how None is cast
return values
# NB: Comparison of pandas and numpy data type fails when numpy data type is on the left hand
# side of the comparison operator. It works, however, if pandas type is on the left hand side.
# That is because pandas is aware of numpy.
if t.to_pandas() == values.dtype or t.to_numpy() == values.dtype:
# The types are already compatible => conversion is not necessary.
return values
if t == DataType.binary and values.dtype.kind == t.binary.to_numpy().kind:
# NB: bytes in numpy have variable itemsize depending on the length of the longest
# element in the array (column). Since MLflow binary type is length agnostic, we ignore
# itemsize when matching binary columns.
return values
if t == DataType.datetime and values.dtype.kind == t.to_numpy().kind:
# NB: datetime values have variable precision denoted by brackets, e.g. datetime64[ns]
# denotes nanosecond precision. Since MLflow datetime type is precision agnostic, we
# ignore precision when matching datetime columns.
return values
if t == DataType.datetime and values.dtype == object:
# NB: Pyspark date columns get converted to object when converted to a pandas
# DataFrame. To respect the original typing, we convert the column to datetime.
try:
return values.astype(np.datetime64, errors="raise")
except ValueError:
raise MlflowException(
"Failed to convert column {0} from type {1} to {2}.".format(name, values.dtype, t)
)
numpy_type = t.to_numpy()
if values.dtype.kind == numpy_type.kind:
is_upcast = values.dtype.itemsize <= numpy_type.itemsize
elif values.dtype.kind == "u" and numpy_type.kind == "i":
is_upcast = values.dtype.itemsize < numpy_type.itemsize
elif values.dtype.kind in ("i", "u") and numpy_type == np.float64:
# allow (u)int => double conversion
is_upcast = values.dtype.itemsize <= 6
else:
is_upcast = False
if is_upcast:
return values.astype(numpy_type, errors="raise")
else:
# NB: conversion between incompatible types (e.g. floats -> ints or
# double -> float) are not allowed. While supported by pandas and numpy,
# these conversions alter the values significantly.
def all_ints(xs):
return all([pandas.isnull(x) or int(x) == x for x in xs])
hint = ""
if (
values.dtype == np.float64
and numpy_type.kind in ("i", "u")
and values.hasnans
and all_ints(values)
):
hint = (
" Hint: the type mismatch is likely caused by missing values. "
"Integer columns in python can not represent missing values and are therefore "
"encoded as floats. The best way to avoid this problem is to infer the model "
"schema based on a realistic data sample (training dataset) that includes missing "
"values. Alternatively, you can declare integer columns as doubles (float64) "
"whenever these columns may have missing values. See `Handling Integers With "
"Missing Values <https://www.mlflow.org/docs/latest/models.html#"
"handling-integers-with-missing-values>`_ for more details."
)
raise MlflowException(
"Incompatible input types for column {0}. "
"Can not safely convert {1} to {2}.{3}".format(name, values.dtype, numpy_type, hint)
)
def _enforce_tensor_spec(
values: Union[np.ndarray, csc_matrix, csr_matrix], tensor_spec: TensorSpec
):
"""
Enforce the input tensor shape and type matches the provided tensor spec.
"""
expected_shape = tensor_spec.shape
actual_shape = values.shape
actual_type = values.dtype if isinstance(values, np.ndarray) else values.data.dtype
if len(expected_shape) != len(actual_shape):
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
for expected, actual in zip(expected_shape, actual_shape):
if expected == -1:
continue
if expected != actual:
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
if clean_tensor_type(actual_type) != tensor_spec.type:
raise MlflowException(
"dtype of input {0} does not match expected dtype {1}".format(
values.dtype, tensor_spec.type
)
)
return values
def _enforce_col_schema(pfInput: PyFuncInput, input_schema: Schema):
"""Enforce the input columns conform to the model's column-based signature."""
if input_schema.has_input_names():
input_names = input_schema.input_names()
else:
input_names = pfInput.columns[: len(input_schema.inputs)]
input_types = input_schema.input_types()
new_pfInput = pandas.DataFrame()
for i, x in enumerate(input_names):
new_pfInput[x] = _enforce_mlflow_datatype(x, pfInput[x], input_types[i])
return new_pfInput
def _enforce_tensor_schema(pfInput: PyFuncInput, input_schema: Schema):
"""Enforce the input tensor(s) conforms to the model's tensor-based signature."""
if input_schema.has_input_names():
if isinstance(pfInput, dict):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
if not isinstance(pfInput[col_name], np.ndarray):
raise MlflowException(
"This model contains a tensor-based model signature with input names,"
" which suggests a dictionary input mapping input name to a numpy"
" array, but a dict with value type {0} was found.".format(
type(pfInput[col_name])
)
)
new_pfInput[col_name] = _enforce_tensor_spec(pfInput[col_name], tensor_spec)
elif isinstance(pfInput, pandas.DataFrame):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
new_pfInput[col_name] = _enforce_tensor_spec(
np.array(pfInput[col_name], dtype=tensor_spec.type), tensor_spec
)
else:
raise MlflowException(
"This model contains a tensor-based model signature with input names, which"
" suggests a dictionary input mapping input name to tensor, but an input of"
" type {0} was found.".format(type(pfInput))
)
else:
if isinstance(pfInput, pandas.DataFrame):
new_pfInput = _enforce_tensor_spec(pfInput.to_numpy(), input_schema.inputs[0])
elif isinstance(pfInput, (np.ndarray, csc_matrix, csr_matrix)):
new_pfInput = _enforce_tensor_spec(pfInput, input_schema.inputs[0])
else:
raise MlflowException(
"This model contains a tensor-based model signature with no input names,"
" which suggests a numpy array input, but an input of type {0} was"
" found.".format(type(pfInput))
)
return new_pfInput
def _enforce_schema(pfInput: PyFuncInput, input_schema: Schema):
"""
Enforces the provided input matches the model's input schema,
For signatures with input names, we check there are no missing inputs and reorder the inputs to
match the ordering declared in schema if necessary. Any extra columns are ignored.
For column-based signatures, we make sure the types of the input match the type specified in
the schema or if it can be safely converted to match the input schema.
For tensor-based signatures, we make sure the shape and type of the input matches the shape
and type specified in model's input schema.
"""
if not input_schema.is_tensor_spec():
if isinstance(pfInput, (list, np.ndarray, dict)):
try:
pfInput = pandas.DataFrame(pfInput)
except Exception as e:
raise MlflowException(
"This model contains a column-based signature, which suggests a DataFrame"
" input. There was an error casting the input data to a DataFrame:"
" {0}".format(str(e))
)
if not isinstance(pfInput, pandas.DataFrame):
raise MlflowException(
"Expected input to be DataFrame or list. Found: %s" % type(pfInput).__name__
)
if input_schema.has_input_names():
# make sure there are no missing columns
input_names = input_schema.input_names()
expected_cols = set(input_names)
actual_cols = set()
if len(expected_cols) == 1 and isinstance(pfInput, np.ndarray):
# for schemas with a single column, match input with column
pfInput = {input_names[0]: pfInput}
actual_cols = expected_cols
elif isinstance(pfInput, pandas.DataFrame):
actual_cols = set(pfInput.columns)
elif isinstance(pfInput, dict):
actual_cols = set(pfInput.keys())
missing_cols = expected_cols - actual_cols
extra_cols = actual_cols - expected_cols
# Preserve order from the original columns, since missing/extra columns are likely to
# be in same order.
missing_cols = [c for c in input_names if c in missing_cols]
extra_cols = [c for c in actual_cols if c in extra_cols]
if missing_cols:
raise MlflowException(
"Model is missing inputs {0}."
" Note that there were extra inputs: {1}".format(missing_cols, extra_cols)
)
elif not input_schema.is_tensor_spec():
# The model signature does not specify column names => we can only verify column count.
num_actual_columns = len(pfInput.columns)
if num_actual_columns < len(input_schema.inputs):
raise MlflowException(
"Model inference is missing inputs. The model signature declares "
"{0} inputs but the provided value only has "
"{1} inputs. Note: the inputs were not named in the signature so we can "
"only verify their count.".format(len(input_schema.inputs), num_actual_columns)
)
return (
_enforce_tensor_schema(pfInput, input_schema)
if input_schema.is_tensor_spec()
else _enforce_col_schema(pfInput, input_schema)
)
class PyFuncModel:
"""
MLflow 'python function' model.
Wrapper around model implementation and metadata. This class is not meant to be constructed
directly. Instead, instances of this class are constructed and returned from
:py:func:`load_model() <mlflow.pyfunc.load_model>`.
``model_impl`` can be any Python object that implements the `Pyfunc interface
<https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#pyfunc-inference-api>`_, and is
returned by invoking the model's ``loader_module``.
``model_meta`` contains model metadata loaded from the MLmodel file.
"""
def __init__(self, model_meta: Model, model_impl: Any):
if not hasattr(model_impl, "predict"):
raise MlflowException("Model implementation is missing required predict method.")
if not model_meta:
raise MlflowException("Model is missing metadata.")
self._model_meta = model_meta
self._model_impl = model_impl
def predict(self, data: PyFuncInput) -> PyFuncOutput:
"""
Generate model predictions.
If the model contains signature, enforce the input schema first before calling the model
implementation with the sanitized input. If the pyfunc model does not include model schema,
the input is passed to the model implementation as is. See `Model Signature Enforcement
<https://www.mlflow.org/docs/latest/models.html#signature-enforcement>`_ for more details."
:param data: Model input as one of pandas.DataFrame, numpy.ndarray,
scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], or
Dict[str, numpy.ndarray]
:return: Model predictions as one of pandas.DataFrame, pandas.Series, numpy.ndarray or list.
"""
input_schema = self.metadata.get_input_schema()
if input_schema is not None:
data = _enforce_schema(data, input_schema)
return self._model_impl.predict(data)
@property
def metadata(self):
"""Model metadata."""
if self._model_meta is None:
raise MlflowException("Model is missing metadata.")
return self._model_meta
def __repr__(self):
info = {}
if self._model_meta is not None:
if hasattr(self._model_meta, "run_id") and self._model_meta.run_id is not None:
info["run_id"] = self._model_meta.run_id
if (
hasattr(self._model_meta, "artifact_path")
and self._model_meta.artifact_path is not None
):
info["artifact_path"] = self._model_meta.artifact_path
info["flavor"] = self._model_meta.flavors[FLAVOR_NAME]["loader_module"]
return yaml.safe_dump({"mlflow.pyfunc.loaded_model": info}, default_flow_style=False)
def _warn_dependency_requirement_mismatches(model_path):
"""
Inspects the model's dependencies and prints a warning if the current Python environment
doesn't satisfy them.
"""
req_file_path = os.path.join(model_path, _REQUIREMENTS_FILE_NAME)
if not os.path.exists(req_file_path):
return
try:
mismatch_infos = []
for req in _parse_requirements(req_file_path, is_constraint=False):
req_line = req.req_str
mismatch_info = _check_requirement_satisfied(req_line)
if mismatch_info is not None:
mismatch_infos.append(str(mismatch_info))
if len(mismatch_infos) > 0:
mismatch_str = " - " + "\n - ".join(mismatch_infos)
warning_msg = (
"Detected one or more mismatches between the model's dependencies and the current "
f"Python environment:\n{mismatch_str}"
)
_logger.warning(warning_msg)
except Exception as e:
_logger.warning(
f"Encountered an unexpected error ({repr(e)}) while detecting model dependency "
"mismatches. Set logging level to DEBUG to see the full traceback."
)
_logger.debug("", exc_info=True)
def load_model(
model_uri: str, suppress_warnings: bool = False, dst_path: str = None
) -> PyFuncModel:
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
"""
local_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
if not suppress_warnings:
_warn_dependency_requirement_mismatches(local_path)
model_meta = Model.load(os.path.join(local_path, MLMODEL_FILE_NAME))
conf = model_meta.flavors.get(FLAVOR_NAME)
if conf is None:
raise MlflowException(
'Model does not have the "{flavor_name}" flavor'.format(flavor_name=FLAVOR_NAME),
RESOURCE_DOES_NOT_EXIST,
)
model_py_version = conf.get(PY_VERSION)
if not suppress_warnings:
_warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)
_add_code_from_conf_to_system_path(local_path, conf, code_key=CODE)
data_path = os.path.join(local_path, conf[DATA]) if (DATA in conf) else local_path
model_impl = importlib.import_module(conf[MAIN])._load_pyfunc(data_path)
return PyFuncModel(model_meta=model_meta, model_impl=model_impl)
@deprecated("mlflow.pyfunc.load_model", 1.0)
def load_pyfunc(model_uri, suppress_warnings=False):
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
"""
return load_model(model_uri, suppress_warnings)
def _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None):
"""
Compares the version of Python that was used to save a given model with the version
of Python that is currently running. If a major or minor version difference is detected,
logs an appropriate warning.
"""
if model_py_version is None:
_logger.warning(
"The specified model does not have a specified Python version. It may be"
" incompatible with the version of Python that is currently running: Python %s",
PYTHON_VERSION,
)
elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION):
_logger.warning(
"The version of Python that the model was saved in, `Python %s`, differs"
" from the version of Python that is currently running, `Python %s`,"
" and may be incompatible",
model_py_version,
PYTHON_VERSION,
)
def _get_or_create_model_cache_dir():
nfs_root_dir = get_nfs_cache_root_dir()
if nfs_root_dir is not None:
# In databricks, the '/local_disk0/.ephemeral_nfs' is mounted as NFS disk
# the data stored in the disk is shared with all remote nodes.
root_dir = os.path.join(nfs_root_dir, "models")
os.makedirs(root_dir, exist_ok=True)
tmp_model_dir = tempfile.mkdtemp(dir=root_dir)
# TODO: register deleting tmp_model_dir handler when exit
else:
import atexit
import shutil
tmp_model_dir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, tmp_model_dir, ignore_errors=True)
return tmp_model_dir
_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP = 200
def spark_udf(spark, model_uri, result_type="double", env_manager="local"):
"""
A Spark UDF that can be used to invoke the Python function formatted model.
Parameters passed to the UDF are forwarded to the model as a DataFrame where the column names
are ordinals (0, 1, ...). On some versions of Spark (3.0 and above), it is also possible to
wrap the input in a struct. In that case, the data will be passed as a DataFrame with column
names given by the struct definition (e.g. when invoked as my_udf(struct('x', 'y')), the model
will get the data as a pandas DataFrame with 2 columns 'x' and 'y').
If a model contains a signature, the UDF can be called without specifying column name
arguments. In this case, the UDF will be called with column names from signature, so the
evaluation dataframe's column names must match the model signature's column names.
The predictions are filtered to contain only the columns that can be represented as the
``result_type``. If the ``result_type`` is string or array of strings, all predictions are
converted to string. If the result type is not an array type, the left most column with
matching type is returned.
NOTE: Inputs of type ``pyspark.sql.types.DateType`` are not supported on earlier versions of
Spark (2.4 and below).
.. code-block:: python
:caption: Example
from pyspark.sql.functions import struct
predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
df.withColumn("prediction", predict(struct("name", "age"))).show()
:param spark: A SparkSession object.
:param model_uri: The location, in URI format, of the MLflow model with the
:py:mod:`mlflow.pyfunc` flavor. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param result_type: the return type of the user-defined function. The value can be either a
``pyspark.sql.types.DataType`` object or a DDL-formatted type string. Only a primitive
type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.
The following classes of result type are supported:
- "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an
``int32`` or an exception if there is none.
- "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an
``int64`` or an exception if there is none.
- ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested
size.
- "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to
``float32`` or an exception if there is none.
- "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to
``double`` or an exception if there is none.
- ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or
an exception if there are no numeric columns.
- "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.
- ``ArrayType(StringType)``: All columns converted to ``string``.
:param env_manager: The environment manager to use in order to create the
software environment for model inference. Default value is ``local``,
The following values are supported:
- ``conda``: (Recommended) Use Conda to restore the software environment
that was used to train the model. Note that environment is only restored
in the context of the PySpark UDF; the software environment outside of
the UDF is unaffected.
- ``local``: Use the current Python environment for model inference, which
may differ from the environment used to train the model and may lead to
errors or invalid predictions.
:return: Spark UDF that applies the model's ``predict`` method to the data and returns a
type specified by ``result_type``, which by default is a double.
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
import functools
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from mlflow.utils._spark_utils import _SparkDirectoryDistributor
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import (
ArrayType,
DataType as SparkDataType,
StructType as SparkStructType,
)
from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType
from mlflow.models.cli import _get_flavor_backend
if env_manager not in ["local", "conda"]:
raise MlflowException(
f"Illegal env_manager value '{env_manager}'.", error_code=INVALID_PARAMETER_VALUE
)
# Check whether spark is in local or local-cluster mode
# this case all executors and driver share the same filesystem
is_spark_in_local_mode = spark.conf.get("spark.master").startswith("local")
# TODO:
# change `should_use_nfs` to be get_nfs_cache_root_dir() is not None
# when NFS optimization added.
should_use_nfs = False
should_use_spark_to_broadcast_file = not (is_spark_in_local_mode or should_use_nfs)
if not isinstance(result_type, SparkDataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any([isinstance(elem_type, x) for x in supported_types]):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types: {}".format(str(elem_type), str(supported_types)),
error_code=INVALID_PARAMETER_VALUE,
)
local_model_path = _download_artifact_from_uri(
artifact_uri=model_uri, output_path=_get_or_create_model_cache_dir()
)
if env_manager == "local":
# Assume spark executor python environment is the same with spark driver side.
_warn_dependency_requirement_mismatches(local_model_path)
_logger.warning(
'Calling `spark_udf()` with `env_manager="local"` does not recreate the same '
"environment that was used during training, which may lead to errors or inaccurate "
'predictions. We recommend specifying `env_manager="conda"`, which automatically '
"recreates the environment that was used to train the model and performs inference "
"in the recreated environment."
)
else:
_logger.info(
"This UDF will use Conda to recreate the model's software environment for inference. "
"This may take extra time during execution."
)
if not sys.platform.startswith("linux"):
# TODO: support killing mlflow server launched in UDF task when spark job canceled
# for non-linux system.
# https://stackoverflow.com/questions/53208/how-do-i-automatically-destroy-child-processes-in-windows
_logger.warning(
"In order to run inference code in restored python environment, PySpark UDF "
"processes spawn MLflow Model servers as child processes. Due to system "
"limitations with handling SIGKILL signals, these MLflow Model server child "
"processes cannot be cleaned up if the Spark Job is canceled."
)
if not should_use_spark_to_broadcast_file:
# Prepare restored environment in driver side if possible.
if env_manager == "conda":
_get_flavor_backend(local_model_path, no_conda=False, install_mlflow=False).prepare_env(
model_uri=local_model_path
)
# Broadcast local model directory to remote worker if needed.
if should_use_spark_to_broadcast_file:
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
model_metadata = Model.load(os.path.join(local_model_path, MLMODEL_FILE_NAME))
def _predict_row_batch(predict_fn, args):
input_schema = model_metadata.get_input_schema()
pdf = None
for x in args:
if type(x) == pandas.DataFrame:
if len(args) != 1:
raise Exception(
"If passing a StructType column, there should be only one "
"input column, but got %d" % len(args)
)
pdf = x
if pdf is None:
args = list(args)
if input_schema is None:
names = [str(i) for i in range(len(args))]
else:
names = input_schema.input_names()
if len(args) > len(names):
args = args[: len(names)]
if len(args) < len(names):
raise MlflowException(
"Model input is missing columns. Expected {0} input columns {1},"
" but the model received only {2} unnamed input columns"
" (Since the columns were passed unnamed they are expected to be in"
" the order specified by the schema).".format(len(names), names, len(args))
)
pdf = pandas.DataFrame(data={names[i]: x for i, x in enumerate(args)}, columns=names)
result = predict_fn(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elem_type = result_type.elementType if isinstance(result_type, ArrayType) else result_type
if type(elem_type) == IntegerType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int32]
).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, int])
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE,
)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series(result.to_numpy().tolist())
else:
return result[result.columns[0]]
result_type_hint = (
pandas.DataFrame if isinstance(result_type, SparkStructType) else pandas.Series
)
@pandas_udf(result_type)
def udf(
iterator: Iterator[Tuple[Union[pandas.Series, pandas.DataFrame], ...]]
) -> Iterator[result_type_hint]:
# importing here to prevent circular import
from mlflow.pyfunc.scoring_server.client import ScoringServerClient
# Note: this is a pandas udf function in iteration style, which takes an iterator of
# tuple of pandas.Series and outputs an iterator of pandas.Series.
scoring_server_proc = None
# TODO: Support virtual env.
#
# TODO: For conda/virtualenv restored env cases,
# For each individual python process (driver side), create individual and temporary
# conda env dir / virtualenv env dir and when process exit,
# delete the temporary env dir.
# The reason is
# 1. env dir might be a large size directory and cleaning it when process exit
# help saving disk space.
# 2. We have conda package cache dir and pip cache dir which are shared across all
# python processes which help reducing downloading time.
# 3. Avoid race conditions related issues.
#
# TODO:
# For NFS available case, set conda env dir / virtualenv env dir in sub-directory under
# NFS directory, and in spark driver side prepare restored env once, and then all
# spark UDF tasks running on spark workers can skip re-creating the restored env.
if env_manager == "conda":
server_port = find_free_port()
if should_use_spark_to_broadcast_file:
local_model_path_on_executor = _SparkDirectoryDistributor.get_or_extract(
archive_path
)
# Call "prepare_env" in advance in order to reduce scoring server launch time.
# So that we can use a shorter timeout when call `client.wait_server_ready`,
# otherwise we have to set a long timeout for `client.wait_server_ready` time,
# this prevents spark UDF task failing fast if other exception raised when scoring
# server launching.
_get_flavor_backend(
local_model_path_on_executor, no_conda=False, install_mlflow=False
).prepare_env(model_uri=local_model_path_on_executor)
else:
local_model_path_on_executor = local_model_path
# launch scoring server
# TODO: adjust timeout for server requests handler.
scoring_server_proc = _get_flavor_backend(
local_model_path_on_executor, no_conda=False, workers=1, install_mlflow=False
).serve(
model_uri=local_model_path_on_executor,
port=server_port,
host="127.0.0.1",
enable_mlserver=False,
synchronous=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
server_tail_logs = collections.deque(maxlen=_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP)
def server_redirect_log_thread_func(child_stdout):
for line in child_stdout:
if isinstance(line, bytes):
decoded = line.decode()
else:
decoded = line
server_tail_logs.append(decoded)
sys.stdout.write("[model server] " + decoded)
server_redirect_log_thread = threading.Thread(
target=server_redirect_log_thread_func, args=(scoring_server_proc.stdout,)
)
server_redirect_log_thread.setDaemon(True)
server_redirect_log_thread.start()
client = ScoringServerClient("127.0.0.1", server_port)
try:
client.wait_server_ready(timeout=90, scoring_server_proc=scoring_server_proc)
except Exception:
err_msg = "During spark UDF task execution, mlflow model server failed to launch. "
if len(server_tail_logs) == _MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP:
err_msg += (
f"Last {_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP} "
"lines of MLflow model server output:\n"
)
else:
err_msg += "MLflow model server output:\n"
err_msg += "".join(server_tail_logs)
raise MlflowException(err_msg)
def batch_predict_fn(pdf):
return client.invoke(pdf)
elif env_manager == "local":
if should_use_spark_to_broadcast_file:
loaded_model, _ = SparkModelCache.get_or_load(archive_path)
else:
loaded_model = mlflow.pyfunc.load_model(local_model_path)
def batch_predict_fn(pdf):
return loaded_model.predict(pdf)
try:
for input_batch in iterator:
# If the UDF is called with only multiple arguments,
# the `input_batch` is a tuple which composes of several pd.Series/pd.DataFrame
# objects.
# If the UDF is called with only one argument,
# the `input_batch` instance will be an instance of `pd.Series`/`pd.DataFrame`,
if isinstance(input_batch, (pandas.Series, pandas.DataFrame)):
# UDF is called with only one argument
row_batch_args = (input_batch,)
else:
row_batch_args = input_batch
yield _predict_row_batch(batch_predict_fn, row_batch_args)
finally:
if scoring_server_proc is not None:
os.kill(scoring_server_proc.pid, signal.SIGTERM)
udf.metadata = model_metadata
@functools.wraps(udf)
def udf_with_default_cols(*args):
if len(args) == 0:
input_schema = model_metadata.get_input_schema()
if input_schema and len(input_schema.inputs) > 0:
if input_schema.has_input_names():
input_names = input_schema.input_names()
return udf(*input_names)
else:
raise MlflowException(
message="Cannot apply udf because no column names specified. The udf "
"expects {} columns with types: {}. Input column names could not be "
"inferred from the model signature (column names not found).".format(
len(input_schema.inputs),
input_schema.inputs,
),
error_code=INVALID_PARAMETER_VALUE,
)
else:
raise MlflowException(
"Attempting to apply udf on zero columns because no column names were "
"specified as arguments or inferred from the model signature.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
return udf(*args)
return udf_with_default_cols
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def save_model(
path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
mlflow_model=None,
python_model=None,
artifacts=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
**kwargs,
):
"""
save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,\
mlflow_model=Model(), python_model=None, artifacts=None)
Save a Pyfunc model with custom inference logic and optional data dependencies to a path on the
local filesystem.
For information about the workflows that this method supports, please see :ref:`"workflows for
creating custom pyfunc models" <pyfunc-create-custom-workflows>` and
:ref:`"which workflow is right for my use case?" <pyfunc-create-custom-selecting-workflow>`.
Note that the parameters for the second workflow: ``loader_module``, ``data_path`` and the
parameters for the first workflow: ``python_model``, ``artifacts``, cannot be
specified together.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: {{ conda_env }}
:param mlflow_model: :py:mod:`mlflow.models.Model` configuration to which to add the
**python_function** flavor.
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
"""
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
mlflow_model = kwargs.pop("model", mlflow_model)
if len(kwargs) > 0:
raise TypeError("save_model() got unexpected keyword arguments: {}".format(kwargs))
if code_path is not None:
if not isinstance(code_path, list):
raise TypeError("Argument code_path should be a list, not {}".format(type(code_path)))
first_argument_set = {
"loader_module": loader_module,
"data_path": data_path,
}
second_argument_set = {
"artifacts": artifacts,
"python_model": python_model,
}
first_argument_set_specified = any([item is not None for item in first_argument_set.values()])
second_argument_set_specified = any([item is not None for item in second_argument_set.values()])
if first_argument_set_specified and second_argument_set_specified:
raise MlflowException(
message=(
"The following sets of parameters cannot be specified together: {first_set_keys}"
" and {second_set_keys}. All parameters in one set must be `None`. Instead, found"
" the following values: {first_set_entries} and {second_set_entries}".format(
first_set_keys=first_argument_set.keys(),
second_set_keys=second_argument_set.keys(),
first_set_entries=first_argument_set,
second_set_entries=second_argument_set,
)
),
error_code=INVALID_PARAMETER_VALUE,
)
elif (loader_module is None) and (python_model is None):
msg = (
"Either `loader_module` or `python_model` must be specified. A `loader_module` "
"should be a python module. A `python_model` should be a subclass of PythonModel"
)
raise MlflowException(message=msg, error_code=INVALID_PARAMETER_VALUE)
if os.path.exists(path):
raise MlflowException(
message="Path '{}' already exists".format(path), error_code=RESOURCE_ALREADY_EXISTS
)
os.makedirs(path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
if first_argument_set_specified:
return _save_model_with_loader_module_and_data_path(
path=path,
loader_module=loader_module,
data_path=data_path,
code_paths=code_path,
conda_env=conda_env,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
elif second_argument_set_specified:
return mlflow.pyfunc.model._save_model_with_class_artifacts_params(
path=path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
code_paths=code_path,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def log_model(
artifact_path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
python_model=None,
artifacts=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow
artifact for the current run.
For information about the workflows that this method supports, see :ref:`Workflows for
creating custom pyfunc models <pyfunc-create-custom-workflows>` and
:ref:`Which workflow is right for my use case? <pyfunc-create-custom-selecting-workflow>`.
You cannot specify the parameters for the second workflow: ``loader_module``, ``data_path``
and the parameters for the first workflow: ``python_model``, ``artifacts`` together.
:param artifact_path: The run-relative artifact path to which to log the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: {{ conda_env }}
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param registered_model_name: This argument may change or be removed in a
future release without warning. If given, create a model
version under ``registered_model_name``, also creating a
registered model if one with the given name does not exist.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param await_registration_for: Number of seconds to wait for the model version to finish
being created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
"""
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.pyfunc,
loader_module=loader_module,
data_path=data_path,
code_path=code_path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
def _save_model_with_loader_module_and_data_path(
path,
loader_module,
data_path=None,
code_paths=None,
conda_env=None,
mlflow_model=None,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Export model as a generic Python function model.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``.
:param data_path: Path to a file or directory containing model data.
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in.
:return: Model configuration containing model info.
"""
data = None
if data_path is not None:
model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data")
data = model_file
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if mlflow_model is None:
mlflow_model = Model()
mlflow.pyfunc.add_to_model(
mlflow_model,
loader_module=loader_module,
code=code_dir_subpath,
data=data,
env=_CONDA_ENV_FILE_NAME,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
# To ensure `_load_pyfunc` can successfully load the model during the dependency
# inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.
inferred_reqs = mlflow.models.infer_pip_requirements(
path,
FLAVOR_NAME,
fallback=default_reqs,
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
return mlflow_model
loader_template = """
import importlib
import os
import sys
def load_pyfunc():
{update_path}return importlib.import_module('{main}')._load_pyfunc('{data_path}')
"""
|
openface_detector.py
|
import cv2 as cv
import util
import threading
import detect_it_all_bot
import time
from faces_util import detect_faces
from sklearn.svm import SVC
from sklearn.preprocessing import LabelEncoder, Normalizer
import openface_util
import pandas as pd
import numpy as np
from scipy.spatial import distance
class OpenfaceDetector():
def __init__(self,
classifier_path: str,
eye_classifier_path: str,
model_path: str,
labels_path,
reps_path,
cap: util.BufferlessVideoCapture,
color=(255, 0, 0),
minimum_probability: float = 0.5,
cooldown: float = 30,
img_dim: int = 96,
visible: bool = False,
cuda: bool = False,
openface_dir: str = "/usr/local/lib/python3.9/dist-packages/openface"):
self.running = True
self.callback = self.log_callback
self.face_classifier = cv.CascadeClassifier(classifier_path)
self.eye_classifier = cv.CascadeClassifier(eye_classifier_path)
self.net = openface_util.TorchNeuralNet(
openface_dir, model_path, imgDim=img_dim, cuda=cuda)
self.labels_path = labels_path
self.reps_path = reps_path
self.cap = cap
self.color = color
self.minimum_probability = minimum_probability
self.cooldown = cooldown
self.visible = visible
self.users = set()
self.user_cooldown = {}
self.train()
t = threading.Thread(target=self._run_detection)
t.daemon = True
t.start()
def train(self):
self.labels = pd.read_csv(self.labels_path, header=None)[0].to_list()
self.le = LabelEncoder().fit(self.labels)
num_classes = len(self.le.classes_)
self.norm = Normalizer(norm='l2')
print(f"Training for {num_classes} classes.")
t0 = time.time()
self.reps = pd.read_csv(self.reps_path, header=None).values
self.reps = self.norm.transform(self.reps)
labels_num = self.le.transform(self.labels)
self.classifier = SVC(C=1, kernel='linear', probability=True)
self.classifier.fit(self.reps, labels_num)
took = time.time() - t0
print(f"Training took {took}")
def log_callback(self, chat_id, detection_text, frame):
print(f"{chat_id}: {detection_text}")
def stop(self):
self.running = False
self.net.close()
def _run_detection(self):
while self.running:
if len(self.users) > 0 or self.visible:
frame = self.cap.read()
faces = self.detect_in_frame(frame)
if len(faces) > 0:
detection_text = f"{len(faces)} faces detected."
detected = False
for face in faces:
(x, y, w, h) = face["bbox"]
#face_id = face["face_id"]
name = face["name"]
confidence = face["confidence"]
if self.visible:
text = f"{name}: {confidence:.2f}"
cv.rectangle(
frame, (x, y), (x + w, y + h), self.color, 2)
cv.putText(frame, text, (x, y - 5),
cv.FONT_HERSHEY_SIMPLEX, 0.5, self.color, 1)
detected = False # detected or probability >= self.minimum_probability
if detected:
for user_id in self.users:
if self.user_cooldown[user_id] < time.time():
self.user_cooldown[user_id] = time.time(
) + self.cooldown
self.callback(user_id, detection_text, frame)
if self.visible:
cv.imshow('openface', frame)
cv.waitKey(1)
def detect_in_frame(self, frame):
recognized_faces = []
faces = detect_faces(frame,
self.face_classifier,
self.eye_classifier,
desired_face_width=96,
desired_face_height=96,)
for face in faces:
rep = self.net.forward(face.mat)
rep = self.norm.transform(rep.reshape(1, -1))
predictions = self.classifier.predict_proba(rep).ravel()
#(distance, name) = self.get_closest(rep)
max_i = np.argmax(predictions)
name = self.le.inverse_transform([max_i])[0]
confidence = predictions[max_i]
recognized_faces.append({
"bbox": face.bbox,
"name": name,
"face_id": max_i,
"confidence": confidence,
})
return recognized_faces
def get_closest(self, frame_rep):
distances = []
for i, rep in enumerate(self.reps):
d = np.dot(frame_rep, rep)
name = self.labels[i]
distances.append((d, name))
distances = sorted(distances, reverse=True, key=lambda v: v[0])
print()
for d in distances:
print(d)
print()
(d, name) = distances[0]
return (d, name)
def describe(self):
return f"""
This is a OpenCV cascade classifier
to detect faces and Openface to
recognize faces.
Just send '/detect' and we are ready.
"""
def detect(self, user_id, args):
self.users.add(user_id)
self.user_cooldown[user_id] = 0
return f"Detection in progress."
def main():
parser = util.HelperParser(
description='OpenCV cascade classifier and LBPH face recognizer.')
parser.add_argument('-k', '--token', required=True,
help='The telegram bot token.')
parser.add_argument('-p', '--password', required=True,
help='The telegram bot client password.')
parser.add_argument('-c', '--classifier', required=True,
help='The classifier to be used.')
parser.add_argument('-e', '--eye_classifier', required=True,
help='The eye classifier to be used.')
parser.add_argument('-t', '--torch_face_model', required=True,
help='The torch network model to be used.')
parser.add_argument('-l', '--labels', required=True,
help='The labels path.')
parser.add_argument('-s', '--representations', required=True,
help='The representations path.')
parser.add_argument('-d', '--cooldown', default=30, type=float,
help='The cooldown after a detection in seconds. Default: 30')
parser.add_argument('-m', '--minimum_probability', default=50, type=float,
help='The minimum probability to accept a face as a match. Default: 50')
parser.add_argument(
'-u', '--url', help='The stream name (can be an url or the device name).')
parser.add_argument('-v', '--visible', default=False, type=bool,
help='Show detect window. Default: False')
args = parser.parse_args()
cap = util.BufferlessVideoCapture(args.url)
if not cap.isOpened():
print("Cannot open stream")
exit()
else:
print("Stream opened")
args = parser.parse_args()
detector = OpenfaceDetector(
args.classifier,
args.eye_classifier,
args.torch_face_model,
args.labels,
args.representations,
cap,
minimum_probability=args.minimum_probability,
cooldown=args.cooldown,
visible=args.visible,
)
bot = detect_it_all_bot.DetectItAllBot(args.token, args.password, detector)
detector.callback = bot.detection_callback
def stop():
cap.release()
detector.stop()
bot.stop()
killer = util.GracefulKiller()
killer.exit_func = stop
bot.start()
if __name__ == "__main__":
main()
|
tray_log.py
|
# (C) unresolved-external@singu-lair.com released under the MIT license (see LICENSE)
import copy
import datetime
import os
import threading
import time
import win32con
import win32gui
import common
def threaded_function(thread_data, thread_static):
while not thread_data.stop:
update_hwnd = None
with thread_static.lock:
if thread_static.queue:
thread_static.memlog.extend(thread_static.queue)
#for message in thread_static.queue:
# with open('gw2-addon-updater.log', 'a') as logfile:
# logfile.write('{}\n'.format(message))
del thread_static.queue[:]
if len(thread_static.memlog) > thread_static.memmax:
del thread_static.memlog[:(len(thread_static.memlog)-thread_static.memmax)]
update_hwnd = thread_static.hWnd
if update_hwnd is not None:
win32gui.SendMessage(update_hwnd, win32con.WM_COMMAND, common.commands.UPDATE_LOG, 0)
time.sleep(0.01)
class thread_data_type:
stop = False
class thread_static_type:
lock = threading.Lock()
queue = []
memlog = []
memmax = 1000
hWnd = None
class log:
thread = None
thread_data = None
thread_static = thread_static_type()
def start(self):
if self.thread is not None: return
self.thread_data = thread_data_type()
self.thread = threading.Thread(name = 'MemLogThread', target = threaded_function, args = (self.thread_data, self.thread_static))
self.thread.start()
def stop(self):
if self.thread is None: return
if self.thread_data is None: return
self.thread_data.stop = True
self.thread = None
def bind_hwnd(self, hWnd):
with self.thread_static.lock:
self.thread_static.hWnd = hWnd
def log(self, message, info = False):
with self.thread_static.lock:
self.thread_static.queue.append(message)
def log_ln(self, message, info = False):
self.log(message.rstrip('\r\n'))
def log_ts(self, message, info = False):
self.log_ln('{}: {}'.format(datetime.datetime.fromtimestamp(int(time.time())), message))
def extract(self):
with self.thread_static.lock:
log = copy.deepcopy(self.thread_static.memlog)
return log
|
platform.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018-2021 Accenture Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import concurrent.futures
import os
import sys
import signal
import time
import threading
import uuid
from asyncio import QueueEmpty
from queue import Queue, Empty
from mercury.system.config_util import ConfigReader
from mercury.system.connector import NetworkConnector
from mercury.system.distributed_trace import DistributedTrace
from mercury.system.diskqueue import ElasticQueue
from mercury.system.logger import LoggingService
from mercury.system.models import EventEnvelope, AppException, TraceInfo
from mercury.system.singleton import Singleton
from mercury.system.utility import Utility, FunctionType
from mercury.system.throttle import Throttle
class ServiceQueue:
def __init__(self, loop, executor, queue, route, user_function, total_instances):
self.platform = Platform()
self.util = Utility()
self.log = self.platform.log
queue_dir = self.util.normalize_path(self.platform.work_dir + "/queues/" + self.platform.get_origin())
self.disk_queue = ElasticQueue(queue_dir=queue_dir, queue_id=route)
self._loop = loop
self._executor = executor
self.queue = queue
self.route = route
self.user_function = user_function
self.ready_queue = asyncio.Queue(loop=self._loop)
self.worker_list = dict()
self._peek_worker = None
self._buffering = True
self._interceptor = total_instances == 0
self._singleton = True if total_instances < 1 else False
self._loop.create_task(self.listen(total_instances))
def peek_next_worker(self):
if self._peek_worker is None:
self._peek_worker = self._fetch_next_worker()
return self._peek_worker
def get_next_worker(self):
if self._peek_worker is not None:
result = self._peek_worker
self._peek_worker = None
return result
return self._fetch_next_worker()
def _fetch_next_worker(self):
try:
worker_number = self.ready_queue.get_nowait()
if worker_number:
self.ready_queue.task_done()
return worker_number
except QueueEmpty:
return None
def send_to_worker(self, item):
worker_number = self.get_next_worker()
if worker_number:
wq = self.worker_list[worker_number]
if wq:
wq.put_nowait(item)
else:
self.log.error("Event for " + self.route + " dropped because worker #"+str(worker_number) + "not found")
else:
self.log.error("Event for " + self.route + " dropped because there are no workers available")
async def listen(self, total_instances):
# create concurrent workers and
total = 1 if self._singleton else total_instances
for i in range(total):
instance_number = i + 1
worker_queue = asyncio.Queue(loop=self._loop)
self.worker_list[instance_number] = worker_queue
WorkerQueue(self._loop, self._executor, self.queue, worker_queue,
self.route, self.user_function, instance_number, self._singleton, self._interceptor)
# populate the ready queue with an initial set of worker numbers
await self.queue.put(instance_number)
route_type = 'PRIVATE' if self.platform.route_is_private(self.route) else 'PUBLIC'
# minimize logging for temporary inbox that starts with the "r" prefix
if self._interceptor and self.util.is_inbox(self.route):
self.log.debug(route_type+' ' + self.route + " with " + str(total) + " instance" +
('s' if total > 1 else '') + " started")
else:
self.log.info(route_type+' ' + self.route + " with " + str(total) + " instance" +
('s' if total > 1 else '')+" started")
# listen for incoming events
while True:
event = await self.queue.get()
self.queue.task_done()
if event is None:
break
else:
if isinstance(event, int):
# ready signal from a worker
await self.ready_queue.put(event)
if self._buffering:
buffered = self.disk_queue.read()
if buffered:
self.send_to_worker(buffered)
else:
# nothing buffered in disk queue
self._buffering = False
self.disk_queue.close()
if isinstance(event, dict):
# it is a data item
if self._buffering:
# Once buffering is started, continue to spool items to disk to guarantee items in order
await self.disk_queue.write(event)
else:
w = self.peek_next_worker()
if w:
# Nothing buffered in disk queue. Find a worker to receive the item.
self.send_to_worker(event)
else:
# start buffered because there are no available workers
self._buffering = True
await self.disk_queue.write(event)
# tell workers to stop
for i in self.worker_list:
wq = self.worker_list[i]
wq.put_nowait(None)
# destroy disk queue
self.disk_queue.destroy()
# minimize logging for temporary inbox that starts with the "r" prefix
if self._interceptor and self.util.is_inbox(self.route):
self.log.debug(self.route + " stopped")
else:
self.log.info(self.route + " stopped")
class WorkerQueue:
DISTRIBUTED_TRACING = "distributed.tracing"
def __init__(self, loop, executor, manager_queue, worker_queue, route, user_function, instance,
singleton, interceptor):
self.platform = Platform()
self.log = self.platform.log
self._loop = loop
self._executor = executor
self.manager_queue = manager_queue
self.worker_queue = worker_queue
self.route = route
# trace all routes except ws.outgoing
self.tracing = route != 'ws.outgoing'
self.user_function = user_function
self.instance = instance
self.singleton = singleton
self.interceptor = interceptor
self._loop.create_task(self.listen())
self.log.debug(route + " #" + str(self.instance) + " started")
async def listen(self):
while True:
event = await self.worker_queue.get()
self.worker_queue.task_done()
if event is None:
break
else:
# Execute the user function in parallel
if self.interceptor:
self._loop.run_in_executor(self._executor, self.handle_event, event, 0)
elif self.singleton:
self._loop.run_in_executor(self._executor, self.handle_event, event, -1)
else:
self._loop.run_in_executor(self._executor, self.handle_event, event, self.instance)
self.log.debug(self.route + " #" + str(self.instance) + " stopped")
def handle_event(self, event, instance):
headers = dict() if 'headers' not in event else event['headers']
body = None if 'body' not in event else event['body']
result = None
error_code = None
error_msg = None
# start distributed tracing if the event contains trace_id and trace_path
if 'trace_id' in event and 'trace_path' in event:
self.platform.start_tracing(self.route, trace_id=event['trace_id'], trace_path=event['trace_path'])
else:
self.platform.start_tracing(self.route)
# execute user function
begin = end = time.time()
try:
if instance == 0:
# service is an interceptor. e.g. inbox for RPC call
self.user_function(EventEnvelope().from_map(event))
elif instance == -1:
# service is a singleton
result = self.user_function(headers, body)
else:
# service with multiple instances
result = self.user_function(headers, body, instance)
end = time.time()
except AppException as e:
error_code = e.get_status()
error_msg = e.get_message()
except ValueError as e:
error_code = 400
error_msg = str(e)
except Exception as e:
error_code = 500
error_msg = str(e)
# execution time is rounded to 3 decimal points
exec_time = round((end - begin) * 1000, 3)
if error_code:
if 'reply_to' in event:
# set exception as result
result = EventEnvelope().set_status(error_code).set_body(error_msg)
else:
self.log.warn(
"Unhandled exception for " + self.route + " - code=" + str(error_code) + ", message=" + error_msg)
#
# interceptor should not send regular response because it will forward the request to another function.
# However, if error_code exists, the system will send the exception response.
# This allows interceptor to simply throw exception to indicate an error case.
#
if 'reply_to' in event and (error_code or not self.interceptor):
reply_to = event['reply_to']
# in case this is a RPC call from within
if reply_to.startswith('->'):
reply_to = reply_to[2:]
response = EventEnvelope().set_to(reply_to)
if not error_code:
response.set_exec_time(exec_time)
if 'extra' in event:
response.set_extra(event['extra'])
if 'cid' in event:
response.set_correlation_id(event['cid'])
if 'trace_id' in event and 'trace_path' in event:
response.set_trace(event['trace_id'], event['trace_path'])
if isinstance(result, EventEnvelope):
for h in result.get_headers():
response.set_header(h, result.get_header(h))
response.set_body(result.get_body())
response.set_status(result.get_status())
else:
response.set_body(result)
try:
self.platform.send_event(response.set_from(self.route))
except Exception as e:
self.log.warn("Event dropped because "+str(e))
# send tracing info to distributed trace logger
trace_info = self.platform.stop_tracing()
if self.tracing and trace_info is not None and isinstance(trace_info, TraceInfo) \
and trace_info.get_id() is not None and trace_info.get_path() is not None:
dt = EventEnvelope().set_to(self.DISTRIBUTED_TRACING).set_body(trace_info.get_annotations())
dt.set_header('origin', self.platform.get_origin())
dt.set_header('id', trace_info.get_id()).set_header('path', trace_info.get_path())
dt.set_header('service', self.route).set_header('start', trace_info.get_start_time())
if 'from' in event:
dt.set_header('from', event['from'])
if not error_code:
dt.set_header('success', 'true')
dt.set_header('exec_time', exec_time)
else:
dt.set_header('success', 'false')
dt.set_header('status', error_code)
dt.set_header('exception', error_msg)
self.platform.send_event(dt)
self._loop.call_soon_threadsafe(self._ack)
def _ack(self):
self.manager_queue.put_nowait(self.instance)
class Inbox:
def __init__(self, platform):
self.begin = time.time()
self.temp_route = 'r.' + (''.join(str(uuid.uuid4()).split('-')))
self.inbox_queue = Queue()
self.platform = platform
self.platform.register(self.temp_route, self.listener, 1, is_private=True)
# inbox is an interceptor service which must be defined with the parameter "envelope" as below
def listener(self, event: EventEnvelope):
event.set_round_trip((time.time() - self.begin) * 1000)
self.inbox_queue.put(event)
def get_route(self):
return self.temp_route
def get_queue(self):
return self.inbox_queue
def close(self):
self.platform.release(self.temp_route)
@Singleton
class Platform:
SERVICE_QUERY = 'system.service.query'
def __init__(self, work_dir: str = None, log_file: str = None, log_level: str = None, max_threads: int = None,
network_connector: str = None):
if sys.version_info.major < 3:
python_version = str(sys.version_info.major)+"."+str(sys.version_info.minor)
raise RuntimeError("Requires python 3.6 and above. Actual: "+python_version)
self.util = Utility()
self.origin = 'py'+(''.join(str(uuid.uuid4()).split('-')))
self.config = ConfigReader()
my_log_file = log_file if log_file is not None else self.config.get_property('log.filename')
my_log_level = log_level if log_level is not None else self.config.get_property('log.level')
self._max_threads = max_threads if max_threads is not None else self.config.get('max.threads')
self.work_dir = work_dir if work_dir is not None else self.config.get_property('work.directory')
self.log = LoggingService(log_dir=self.util.normalize_path(self.work_dir + "/log"),
log_file=my_log_file, log_level=my_log_level).get_logger()
self._loop = asyncio.new_event_loop()
# DO NOT CHANGE 'distributed.trace.processor' which is an optional user defined trace aggregator
my_tracer = DistributedTrace(self, 'distributed.trace.processor')
my_nc = network_connector if network_connector is not None else self.config.get_property('network.connector')
self._cloud = NetworkConnector(self, my_tracer, self._loop, my_nc, self.origin)
self._function_queues = dict()
self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=self._max_threads)
self.log.info("Concurrent thread pool = "+str(self._max_threads))
#
# Before we figure out how to solve blocking file I/O, we will regulate event output rate.
#
my_test_dir = self.util.normalize_path(self.work_dir + "/test")
if not os.path.exists(my_test_dir):
os.makedirs(my_test_dir)
self._throttle = Throttle(self.util.normalize_path(my_test_dir + "/to_be_deleted"), log=self.log)
self._seq = 0
self.util.cleanup_dir(my_test_dir)
self.log.debug("Estimated processing rate is "+format(self._throttle.get_tps(), ',d') +
" events per second for this computer")
self.running = True
self.stopped = False
# distributed trace sessions
self._traces = {}
# start event loop in a new thread to avoid blocking the main thread
def main_event_loop():
self.log.info("Event system started")
self._loop.run_forever()
self.log.info("Event system stopped")
self._loop.close()
threading.Thread(target=main_event_loop).start()
def get_origin(self):
"""
get the origin ID of this application instance
:return: origin ID
"""
return self.origin
def get_trace_id(self) -> str:
"""
get trace ID for a transaction
:return: trace ID
"""
trace_info = self.get_trace()
return trace_info.get_id() if trace_info is not None else None
def get_trace(self) -> TraceInfo:
"""
get trace info for a transaction
:return:
"""
thread_id = threading.get_ident()
return self._traces[thread_id] if thread_id in self._traces else None
def annotate_trace(self, key: str, value: str):
"""
Annotate a trace at a point of a transaction
:param key: any key
:param value: any value
:return:
"""
trace_info = self.get_trace()
if trace_info is not None and isinstance(trace_info, TraceInfo):
trace_info.annotate(key, value)
def start_tracing(self, route: str, trace_id: str = None, trace_path: str = None):
"""
IMPORTANT: This method is reserved for system use. DO NOT call this from a user application.
:param route: route name
:param trace_id: id
:param trace_path: path such as URI
:return: None
"""
thread_id = threading.get_ident()
self._traces[thread_id] = TraceInfo(route, trace_id, trace_path)
def stop_tracing(self):
"""
IMPORTANT: This method is reserved for system use. DO NOT call this from a user application.
:return: TraceInfo
"""
thread_id = threading.get_ident()
if thread_id in self._traces:
trace_info = self.get_trace()
self._traces.pop(thread_id)
return trace_info
def run_forever(self):
"""
Tell the platform to run in the background until user presses CTL-C or the application is stopped by admin
:return: None
"""
def graceful_shutdown(signum, frame):
self.log.warn("Control-C detected" if signal.SIGINT == signum else "KILL signal detected")
self.running = False
if threading.current_thread() is threading.main_thread():
signal.signal(signal.SIGTERM, graceful_shutdown)
signal.signal(signal.SIGINT, graceful_shutdown)
# keep the main thread running so CTL-C can be detected
self.log.info("To stop this application, press Control-C")
while self.running:
time.sleep(0.1)
# exit forever loop and ask platform to end event loop
self.stop()
else:
raise ValueError('Unable to register Control-C and KILL signals because this is not the main thread')
def register(self, route: str, user_function: any, total_instances: int, is_private: bool = False) -> None:
"""
Register a user function
:param route: ID of the function
:param user_function: the lambda function given by you
:param total_instances: 1 for singleton or more for concurrency
:param is_private: true if internal function within this application instance
:return:
"""
self.util.validate_service_name(route)
if not isinstance(total_instances, int):
raise ValueError("Expect total_instances to be int, actual: "+str(type(total_instances)))
if total_instances < 1:
raise ValueError("total_instances must be at least 1")
if total_instances > self._max_threads:
raise ValueError("total_instances must not exceed max threads of "+str(self._max_threads))
function_type = self.util.get_function_type(user_function)
if function_type == FunctionType.NOT_SUPPORTED:
raise ValueError("Function signature should be (headers: dict, body: any, instance: int) or " +
"(headers: dict, body: any) or (event: EventEnvelope)")
if route in self._function_queues:
self.log.warn(route+" will be reloaded")
self.release(route)
queue = asyncio.Queue(loop=self._loop)
if function_type == FunctionType.INTERCEPTOR:
self._function_queues[route] = {'queue': queue, 'private': is_private, 'instances': 1}
ServiceQueue(self._loop, self._executor, queue, route, user_function, 0)
elif function_type == FunctionType.REGULAR:
self._function_queues[route] = {'queue': queue, 'private': is_private, 'instances': total_instances}
ServiceQueue(self._loop, self._executor, queue, route, user_function, total_instances)
else:
# function_type == FunctionType.SINGLETON
self._function_queues[route] = {'queue': queue, 'private': is_private, 'instances': 1}
ServiceQueue(self._loop, self._executor, queue, route, user_function, -1)
# advertise the new route to the network
if self._cloud.is_ready() and not is_private:
self._cloud.send_payload({'type': 'add', 'route': route})
def cloud_ready(self):
return self._cloud.is_ready()
def release(self, route: str) -> None:
# this will un-register a route
if not isinstance(route, str):
raise ValueError("Expect route to be str, actual: "+str(type(route)))
if route not in self._function_queues:
raise ValueError("route "+route+" not found")
# advertise the deleted route to the network
if self._cloud.is_ready() and self.route_is_private(route):
self._cloud.send_payload({'type': 'remove', 'route': route})
self._remove_route(route)
def has_route(self, route: str) -> bool:
if not isinstance(route, str):
raise ValueError("Expect route to be str, actual: "+str(type(route)))
return route in self._function_queues
def get_routes(self, options: str = 'all'):
result = list()
if 'public' == options:
for route in self._function_queues:
if not self.route_is_private(route):
result.append(route)
return result
elif 'private' == options:
for route in self._function_queues:
if self.route_is_private(route):
result.append(route)
return result
elif 'all' == options:
return list(self._function_queues.keys())
else:
return result
def route_is_private(self, route: str) -> bool:
config = self._function_queues[route]
if config and 'private' in config:
return config['private']
else:
return False
def route_instances(self, route: str) -> int:
config = self._function_queues[route]
if config and 'instances' in config:
return config['instances']
else:
return 0
def parallel_request(self, events: list, timeout_seconds: float):
timeout_value = self.util.get_float(timeout_seconds)
if timeout_value <= 0:
raise ValueError("timeout value in seconds must be positive number")
if not isinstance(events, list):
raise ValueError("events must be a list of EventEnvelope")
if len(events) == 0:
raise ValueError("event list is empty")
if len(events) == 1:
result = list()
result.append(self.request(events[0], timeout_value))
return result
for evt in events:
if not isinstance(evt, EventEnvelope):
raise ValueError("events must be a list of EventEnvelope")
# retrieve distributed tracing info if any
trace_info = self.get_trace()
# emulate RPC
inbox = Inbox(self)
temp_route = inbox.get_route()
inbox_queue = inbox.get_queue()
try:
for evt in events:
# restore distributed tracing info from current thread
if trace_info:
if trace_info.get_route() is not None and evt.get_from() is None:
evt.set_from(trace_info.get_route())
if trace_info.get_id() is not None and trace_info.get_path() is not None:
evt.set_trace(trace_info.get_id(), trace_info.get_path())
route = evt.get_to()
evt.set_reply_to(temp_route, me=True)
if route in self._function_queues:
self._loop.call_soon_threadsafe(self._send, route, evt.to_map())
else:
if self._cloud.is_connected():
self._cloud.send_payload({'type': 'event', 'event': evt.to_map()})
else:
raise ValueError("route " + route + " not found")
total_requests = len(events)
result_list = list()
while True:
try:
# wait until all response events are delivered to the inbox
result_list.append(inbox_queue.get(True, timeout_value))
if len(result_list) == len(events):
return result_list
except Empty:
raise TimeoutError('Requests timeout for '+str(round(timeout_value, 3))+" seconds. Expect: " +
str(total_requests) + " responses, actual: " + str(len(result_list)))
finally:
inbox.close()
def request(self, event: EventEnvelope, timeout_seconds: float):
timeout_value = self.util.get_float(timeout_seconds)
if timeout_value <= 0:
raise ValueError("timeout value in seconds must be positive number")
if not isinstance(event, EventEnvelope):
raise ValueError("event object must be an EventEnvelope")
# restore distributed tracing info from current thread
trace_info = self.get_trace()
if trace_info:
if trace_info.get_route() is not None and event.get_from() is None:
event.set_from(trace_info.get_route())
if trace_info.get_id() is not None and trace_info.get_path() is not None:
event.set_trace(trace_info.get_id(), trace_info.get_path())
# emulate RPC
inbox = Inbox(self)
temp_route = inbox.get_route()
inbox_queue = inbox.get_queue()
try:
route = event.get_to()
event.set_reply_to(temp_route, me=True)
if route in self._function_queues:
self._loop.call_soon_threadsafe(self._send, route, event.to_map())
else:
if self._cloud.is_connected():
self._cloud.send_payload({'type': 'event', 'event': event.to_map()})
else:
raise ValueError("route " + route + " not found")
# wait until response event is delivered to the inbox
return inbox_queue.get(True, timeout_value)
except Empty:
raise TimeoutError('Route '+event.get_to()+' timeout for '+str(round(timeout_value, 3))+" seconds")
finally:
inbox.close()
def send_event(self, event: EventEnvelope, broadcast=False) -> None:
if not isinstance(event, EventEnvelope):
raise ValueError("event object must be an EventEnvelope class")
# restore distributed tracing info from current thread
trace_info = self.get_trace()
if trace_info:
if trace_info.get_route() is not None and event.get_from() is None:
event.set_from(trace_info.get_route())
if trace_info.get_id() is not None and trace_info.get_path() is not None:
event.set_trace(trace_info.get_id(), trace_info.get_path())
# regulate rate for best performance
self._seq += 1
self._throttle.regulate_rate(self._seq)
route = event.get_to()
if broadcast:
event.set_broadcast(True)
reply_to = event.get_reply_to()
if reply_to:
target = reply_to[2:] if reply_to.startswith('->') else reply_to
if route == target:
raise ValueError("route and reply_to must not be the same")
if route in self._function_queues:
if event.is_broadcast() and self._cloud.is_connected():
self._cloud.send_payload({'type': 'event', 'event': event.to_map()})
else:
self._loop.call_soon_threadsafe(self._send, route, event.to_map())
else:
if self._cloud.is_connected():
self._cloud.send_payload({'type': 'event', 'event': event.to_map()})
else:
raise ValueError("route "+route+" not found")
def exists(self, routes: any):
if isinstance(routes, str):
single_route = routes
if self.has_route(single_route):
return True
if self.cloud_ready():
event = EventEnvelope()
event.set_to(self.SERVICE_QUERY).set_header('type', 'find').set_header('route', single_route)
result = self.request(event, 8.0)
if isinstance(result, EventEnvelope):
if result.get_body() is not None:
return result.get_body()
if isinstance(routes, list):
if len(routes) > 0:
remote_routes = list()
for r in routes:
if not self.has_route(r):
remote_routes.append(r)
if len(remote_routes) == 0:
return True
if self.cloud_ready():
# tell service query to use the route list in body
event = EventEnvelope()
event.set_to(self.SERVICE_QUERY).set_header('type', 'find')
event.set_header('route', '*').set_body(routes)
result = self.request(event, 8.0)
if isinstance(result, EventEnvelope) and result.get_body() is not None:
return result.get_body()
return False
def _remove_route(self, route):
if route in self._function_queues:
self._send(route, None)
self._function_queues.pop(route)
def _send(self, route, event):
if route in self._function_queues:
config = self._function_queues[route]
if 'queue' in config:
config['queue'].put_nowait(event)
def connect_to_cloud(self):
self._loop.run_in_executor(self._executor, self._cloud.start_connection)
def stop(self):
#
# to allow user application to invoke the "stop" method from a registered service,
# the system must start a new thread so that the service can finish first.
#
if not self.stopped:
self.log.info('Bye')
# guarantee this stop function to execute only once
self.stopped = True
# exit the run_forever loop if any
self.running = False
# in case the calling function has just send an event asynchronously
time.sleep(0.5)
threading.Thread(target=self._bye).start()
def _bye(self):
def stopping():
route_list = []
for route in self.get_routes():
route_list.append(route)
for route in route_list:
self._remove_route(route)
self._loop.create_task(full_stop())
async def full_stop():
# give time for registered services to stop
await asyncio.sleep(1.0)
queue_dir = self.util.normalize_path(self.work_dir + "/queues/" + self.get_origin())
self.util.cleanup_dir(queue_dir)
self._loop.stop()
self._cloud.close_connection(1000, 'bye', stop_engine=True)
self._loop.call_soon_threadsafe(stopping)
|
Hiwin_RT605_ArmCommand_Socket_20190627164143.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
pos_feedback_times = 0
mode_feedback_times = 0
msg_feedback = 1
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def _init_(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(False,False)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = 1
# rospy.loginfo(state)
pub.publish(state)
rate.sleep()
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
#rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global arm_mode_flag,speed_mode_flag,point_data_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##-----------socket client--------
def socket_client():
global Socket,Arm_feedback,data,Socket_sent_flag
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = False
# Arm_feedback = 0
# socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = True
# Arm_feedback = 1
# socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
# Arm_feedback = 6
# socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = False
# Socket_sent_flag = False
# socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = True
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
|
story.py
|
import web
import disk
from bs4 import BeautifulSoup
from urllib.parse import urlparse, urljoin
import itertools
import threading
import re
import datetime
import dateutil.parser
import math
import statistics
def words_of_entries(entries):
words = 0
for p in entries:
words += len(p.get_text().split())
return words
class PageInfo:
def __init__(self, date, words, next):
self.date = date
self.words = words
self.next = next
class StoryInfo:
def __init__(self, name, url, color, contentclass, dateclass, validationclass, validationtext, validationinvert, nextlinkclass, nextlinktext, contentblockbegin, contentblockend, domains, zerolength, finished, overridestart, posterityonly):
self.name = name
self.url = url
self.color = '#' + color
self.contentclass = contentclass
self.dateclass = dateclass
self.validationclass = validationclass
self.validationtext = validationtext
self.validationinvert = validationinvert
self.nextlinkclass = nextlinkclass
self.nextlinktext = nextlinktext
self.contentblockbegin = contentblockbegin
self.contentblockend = contentblockend
self.domains = domains
self.zerolength = zerolength
self.finished = finished
self.overridestart = overridestart
self.posterityonly = posterityonly
self.data = None;
def words_total(self):
return sum(page.words for page in self.data.pages)
def contentblock_crop(self, blocks):
if self.contentblockend != 0:
return blocks[self.contentblockbegin:-self.contentblockend]
elif self.contentblockbegin != 0:
return blocks[self.contentblockbegin:]
else:
return blocks
def statstart(self):
return dateutil.parser.parse(self.overridestart) if self.overridestart is not None else self.data.pages[0].date
def words_per_week(self, weeks_to_average):
return self.smoothed_worker(weeks_to_average, sum, True)
def words_per_post(self, weeks_to_average):
return self.smoothed_worker(weeks_to_average, self.meanornull, False)
def meanornull(self, input):
data = list(input)
if len(data) > 0:
return statistics.mean(data)
else:
return 0 # this is wrong, for now
def posts_per_week(self, weeks_to_average):
return self.smoothed_worker(weeks_to_average, lambda data: sum(1 if words > 0 else 0 for words in data), True)
def smoothed_worker(self, weeks_to_average, func, per_week):
week_length = 7
average_size = week_length * weeks_to_average
start = self.statstart()
results = []
for center in [start + datetime.timedelta(days = x) for x in range(0, (self.data.pages[-1].date - start).days)]:
rstart = center - datetime.timedelta(days = average_size / 2)
rend = center + datetime.timedelta(days = average_size / 2)
rstartweeks = math.floor((center - max(rstart, self.statstart())).days / 7)
rendweeks = math.floor((min(rend, self.data.pages[-1].date) - center).days / 7)
rstart = center - datetime.timedelta(days = rstartweeks * 7)
rend = center + datetime.timedelta(days = rendweeks * 7)
if per_week:
divisor = (rend - rstart).days / 7
else:
divisor = 1
results += [(center, func(page.words for page in self.data.pages if page.date > rstart and page.date <= rend) / divisor)]
return results
class StoryData:
def __init__(self):
self.pages = []
def handle_page(url, story):
page, err = web.simple_get(url)
if page is None:
raise RuntimeError(f'Page {url} failed to download: {err}')
html = BeautifulSoup(page, 'html.parser')
if story.dateclass is not None:
date = dateutil.parser.parse(html.select_one(story.dateclass).get_text())
else:
date = None
words = words_of_entries(story.contentblock_crop(html.select(story.contentclass)))
if words <= 0 and url not in story.zerolength:
raise RuntimeError(f'Zero words detected in chapter {url}; that is never right')
for link in html.select(story.nextlinkclass):
if re.match(story.nextlinktext, link.text.strip()):
if link.has_attr('href'):
next = link['href']
elif link.has_attr('onclick'):
# fanfiction.net buttons
next = re.match("self.location='(.*)'", link['onclick']).group(1)
else:
continue
if urlparse(next).netloc in story.domains:
next = urljoin(url, next)
break
else:
next = None
# it's weirdly common to just link "next" back to the epilogue, so let's catch that
if next == url:
next = None
if story.validationclass != None:
validated = False
for element in html.select(story.validationclass):
validated = validated or re.match(story.validationtext, element.get_text().strip())
if story.validationinvert:
validated = not validated
else:
validated = True
print(f'{url}, {date}: {words}, {next}' + (" (SKIPPED)" if not validated else ""))
return PageInfo(date, words, next), validated
def handle_story(story):
# we can be passed a string, so let's just convert that to a story
if isinstance(story, str):
story = disk.db()[story]
# get rid of the last page, just in case it's changed (we expect this)
if len(story.data.pages) > 0:
with disk.cache_lock():
story.data.pages.pop()
# either use the final next if available, or the base URL
if len(story.data.pages) > 0:
url = story.data.pages[-1].next
else:
url = story.url
while url != None:
page, validated = handle_page(url, story)
url = page.next
if validated:
with disk.cache_lock():
story.data.pages += [page]
disk.save_cache(optional = True)
def handle_stories(allowthreads):
if allowthreads:
threads = []
for id, story in disk.db().items():
threads.append(threading.Thread(target = lambda: handle_story(story)))
threads[-1].start()
for thread in threads:
thread.join()
else:
for id, story in disk.db().items():
handle_story(story)
disk.save_cache()
|
test_forward.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument
"""
Tensorflow testcases
====================
This article is a test script to test tensorflow operator with Relay.
"""
from __future__ import print_function
import threading
import numpy as np
import pytest
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import graph_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import init_ops
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_functional_ops
from distutils.version import LooseVersion
import tvm
from tvm import te
from tvm import relay
import tvm.relay.testing.tf as tf_testing
from tvm.runtime.vm import VirtualMachine
from packaging import version as package_version
import tvm.testing
#######################################################################
# Generic run functions for TVM & tensorflow
# ------------------------------------------
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
tf_dtypes = {
"float32": tf.float32,
"float16": tf.float16,
"float64": tf.float64,
"int32": tf.int32,
"uint8": tf.uint8,
"int8": tf.int8,
"int16": tf.int16,
"uint16": tf.uint16,
"int64": tf.int64,
}
def vmobj_to_list(o):
if isinstance(o, tvm.nd.NDArray):
return [o.asnumpy()]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1])
hd = vmobj_to_list(o.fields[0])
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].asnumpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def run_tvm_graph(
graph_def,
input_data,
input_node,
num_output=1,
target="llvm",
out_names=None,
opt_level=3,
mode="graph_runtime",
cuda_layout="NCHW",
layout=None,
disabled_pass=None,
ignore_in_shape=False,
serialize=False,
):
""" Generic function to compile on relay and execute on tvm """
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
if target == "cuda":
layout = cuda_layout
target_host = None
if ignore_in_shape:
shape_dict = None
else:
shape_dict = {
e: i.shape if hasattr(i, "shape") else () for e, i in zip(input_node, input_data)
}
mod, params = relay.frontend.from_tensorflow(
graph_def, layout=layout, shape=shape_dict, outputs=out_names
)
ctx = tvm.context(target, 0)
if mode == "debug":
ex = relay.create_executor(mode, mod=mod, ctx=tvm.cpu(), target="llvm")
inputs = []
for param in mod["main"].params:
found = False
for i, n in enumerate(input_node):
if n == param.name_hint:
found = True
inputs.append(tvm.nd.array(input_data[i]))
break
# Interpreter doesn't bind constants, so still need to find in params
if not found:
inputs.append(tvm.nd.array(params[param.name_hint]))
result = ex.evaluate()(*inputs)
return vmobj_to_list(result)
elif mode == "vm":
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
print(mod["main"])
mod = relay.transform.InferType()(mod)
vm_exec = relay.vm.compile(mod, target="llvm", params=params)
if serialize:
code, lib = vm_exec.save()
vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib)
vm = VirtualMachine(vm_exec, tvm.cpu())
inputs = {}
for e, i in zip(input_node, input_data):
inputs[e] = tvm.nd.array(i)
result = vm.invoke("main", **inputs)
return vmobj_to_list(result)
else:
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
graph, lib, params = relay.build(mod, target, target_host, params)
from tvm.contrib import graph_runtime
m = graph_runtime.create(graph, lib, ctx)
# set inputs
for e, i in zip(input_node, input_data):
if e != "":
m.set_input(e, tvm.nd.array(i))
m.set_input(**params)
# execute
m.run()
# get outputs
assert out_names is None or num_output == len(
out_names
), "out_names: {} num_output: {}".format(out_names, num_output)
tvm_output_list = [m.get_output(i).asnumpy() for i in range(num_output)]
return tvm_output_list
def run_tf_graph(sess, input_data, input_node, output_node):
""" Generic function to execute tensorflow """
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
output_node = convert_to_list(output_node)
tensor = [sess.graph.get_tensor_by_name(output_name) for output_name in output_node]
input_dict = {e: input_data[i] for i, e in enumerate(input_node)}
if len(input_node) == 1 and input_node[0] == "":
output_data = sess.run(tensor)
else:
output_data = sess.run(tensor, input_dict)
return output_data
def compare_tf_with_tvm(
in_data,
in_name,
out_name,
init_global_variables=False,
no_gpu=False,
opt_level=3,
mode="graph_runtime",
cuda_layout="NCHW",
add_shapes_to_graph_def=True,
):
"""Generic function to generate and compare tensorflow and TVM output"""
def name_without_num(name):
return name.split(":")[0] if ":" in name else name
out_name = convert_to_list(out_name)
out_node = [name_without_num(name) for name in out_name]
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
in_node = [name_without_num(name) for name in in_name]
with tf.Session() as sess:
if init_global_variables:
sess.run(variables.global_variables_initializer())
final_graph_def = (
tf_testing.AddShapesToGraphDef(sess, out_node)
if add_shapes_to_graph_def
else tf.get_default_graph().as_graph_def()
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
for device in ["llvm", "cuda"]:
ctx = tvm.context(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
if no_gpu and device == "cuda":
continue
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=device,
out_names=out_name,
num_output=len(out_name),
opt_level=opt_level,
mode=mode,
cuda_layout=cuda_layout,
)
# since the names from tensorflow and relay runs are not exactly same,
# first len(tf_output) will be compared
for i in range(len(tf_output)):
if not isinstance(tf_output[i], np.ndarray):
assert len(tvm_output[i].shape) == 0
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
sess.close()
def is_gpu_available():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
gpu_list = [x.name for x in local_device_protos if x.device_type == "GPU"]
if len(gpu_list) > 0:
print("Tensorflow GPU:", gpu_list)
return True
else:
return False
#######################################################################
# Pooling
# -------
def _test_pooling_iteration(input_shape, **kwargs):
""" One iteration of pool operation with given shapes and attributes """
x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)
if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def _test_pooling(input_shape, **kwargs):
_test_pooling_iteration(input_shape, **kwargs)
if is_gpu_available():
if len(input_shape) == 4:
input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)]
kwargs["data_format"] = "NCHW"
_test_pooling_iteration(input_shape, **kwargs)
@tvm.testing.uses_gpu
def test_forward_pooling():
""" Pooling """
# TensorFlow only supports NDHWC for max_pool3d on CPU
for pool_type in ["AVG", "MAX"]:
# NDHWC is the default layout for max_pool3d and avg_pool3d in TensorFlow
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
# test cases for max_pool3d & avg_pool3d with layout NCDHW
# TensorFlow pool3d doesn't support NCDHW on cpu
if is_gpu_available():
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
data_format="NCDHW",
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
data_format="NCDHW",
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[2, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[2, 3],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[2, 1],
)
# Tests involving SpaceToBatchND
_test_pooling(
input_shape=[1, 1, 2, 1],
window_shape=[1, 1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 2],
)
_test_pooling(
input_shape=[1, 2, 1],
window_shape=[1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[2],
)
#######################################################################
# Convolution
# -----------
def _test_convolution(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=[],
add_shapes_to_graph_def=True,
):
""" One iteration of convolution with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv2d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv2D:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
elif opname == "conv_transpose":
nn_ops.conv2d_transpose(
in_data,
in_filter,
output_shape=deconv_output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"conv2d_transpose:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
else:
nn_ops.depthwise_conv2d_native(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"DepthwiseConv2dNative:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution():
if is_gpu_available():
_test_convolution("conv", [4, 176, 8, 8], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 19, 17, 17], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution("conv", [4, 124, 17, 17], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 12, 17, 17], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution(
"depthwise", [4, 176, 8, 8], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 19, 17, 17], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 124, 17, 17], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 17, 17],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 16, 16],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 19, 8, 8],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NCHW",
[1, 1, 8, 8],
)
_test_convolution("conv", [4, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("conv", [4, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"conv",
[4, 17, 17, 12],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution("depthwise", [4, 8, 8, 176], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 19], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 124], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"depthwise",
[4, 17, 17, 12],
[3, 3, 12, 2],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 16, 16, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 19],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 12],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 19],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 12],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 8, 8, 19],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NHWC",
[1, 8, 8, 1],
)
# Test without adding shapes to graph def
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
add_shapes_to_graph_def=False,
)
#######################################################################
# Convolution3D
# -------------
def _test_convolution3d(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=[],
add_shapes_to_graph_def=True,
):
""" One iteration of 3D convolution with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NDHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv3d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv3D:0",
cuda_layout="NCDHW",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d():
if is_gpu_available():
_test_convolution3d(
"conv", [4, 176, 8, 8, 8], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 19, 17, 17, 17], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 124, 17, 17, 17], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 12, 17, 17, 17], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 8, 8, 8, 176], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 19], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 124], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 12], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
# Test without adding shapes to graph def
_test_convolution3d(
"conv",
[4, 17, 17, 17, 12],
[3, 3, 3, 12, 32],
[1, 1, 1],
[2, 2, 2],
"VALID",
"NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# Convolution3D Transpose
# -----------------------
def _test_convolution3d_transpose(
data_shape,
filter_shape,
strides,
padding,
output_shape,
data_format="NCDHW",
add_shapes_to_graph_def=True,
):
""" One iteration of 3D convolution transpose with given shapes and attributes """
dtype = "float32"
data_array = np.random.uniform(size=data_shape).astype(dtype)
filter_array = np.random.uniform(size=filter_shape).astype(dtype)
if data_format == "NDHWC":
strides = [1] + strides + [1]
else:
strides = [1, 1] + strides
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data_shape, dtype=dtype)
in_filter = constant_op.constant(filter_array, shape=filter_shape, dtype=dtype)
nn_ops.conv3d_transpose(
in_data,
in_filter,
output_shape=output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
data_array,
"Placeholder:0",
"conv3d_transpose:0",
cuda_layout="NDHWC",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d_transpose():
if is_gpu_available():
_test_convolution3d_transpose(
data_shape=[1, 10, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[4, 9, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[1, 3, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 6, 15, 15, 15],
)
_test_convolution3d_transpose(
data_shape=[1, 16, 8, 8, 8],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 6, 24, 24, 24],
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 10],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[4, 8, 8, 8, 9],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 3],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 15, 15, 15, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
)
# Test without adding shapes to graph def
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# BiasAdd
# -----------
def _test_biasadd(tensor_in_sizes, data_format):
""" One iteration of biasadd with given shapes and attributes """
total_size_1 = 1
for s in tensor_in_sizes:
total_size_1 *= s
tensor_bias_sizes = [tensor_in_sizes[1]] if data_format == "NCHW" else [tensor_in_sizes[3]]
total_size_2 = tensor_bias_sizes[0]
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
bias_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_bias = constant_op.constant(bias_array, shape=tensor_bias_sizes, dtype="float32")
nn_ops.bias_add(in_data, in_bias, data_format=data_format)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "BiasAdd:0"
)
@tvm.testing.uses_gpu
def test_forward_biasadd():
if is_gpu_available():
_test_biasadd([4, 176, 8, 8], "NCHW")
_test_biasadd([1, 100, 1, 1], "NCHW")
_test_biasadd([4, 19, 17, 17], "NCHW")
_test_biasadd([4, 124, 3, 3], "NCHW")
_test_biasadd([4, 8, 8, 176], "NHWC")
_test_biasadd([1, 1, 1, 100], "NHWC")
_test_biasadd([4, 17, 17, 19], "NHWC")
_test_biasadd([4, 3, 3, 124], "NHWC")
def _test_forward_where(input_shape):
with tf.Graph().as_default():
dtype = tf.float32
t = tf.constant(
np.random.choice([0, 1, -2, 3, -1, 0.1, -0.2], size=input_shape).astype(dtype.name)
)
out = tf.where(t)
compare_tf_with_tvm([], [], out.name, mode="debug")
compare_tf_with_tvm([], [], out.name, mode="vm")
def test_forward_argwhere():
_test_forward_where((5,))
_test_forward_where((5, 5))
_test_forward_where((5, 5, 5))
_test_forward_where((5, 5, 5, 5))
_test_forward_where((5, 5, 5, 5, 5))
#######################################################################
# SpaceToBatchND
# --------------
def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def _test_space_to_batch_nd_infer_paddings(input_shape, block_shape, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
padding_np = np.array([0, 1]).astype(np.int32).reshape((1, 2))
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
const1 = tf.constant(padding_np, dtype=tf.int32)
# make paddings an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
paddings = tf.reverse(const1, axis=[-1])
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_space_to_batch_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/space-to-batch-n-d
_test_space_to_batch_nd(input_shape=[1, 2, 2, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 2, 2, 3], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 4, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(
input_shape=[2, 2, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/spacetobatch_op_test.py
_test_space_to_batch_nd(input_shape=[2, 3], block_shape=[2], paddings=[[1, 0]], dtype="float32")
_test_space_to_batch_nd(
input_shape=[2, 3, 2], block_shape=[2], paddings=[[1, 0]], dtype="float64"
)
_test_space_to_batch_nd_infer_paddings(input_shape=[2, 3, 2], block_shape=[2])
#######################################################################
# BatchToSpaceND
# --------------
def _test_batch_to_space_nd(input_shape, block_shape, crops, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.batch_to_space_nd(in_data, block_shape, crops)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_batch_to_space_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d
_test_batch_to_space_nd(input_shape=[4, 1, 1, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 1, 1, 3], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 2, 2, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(
input_shape=[8, 1, 3, 1], block_shape=[2, 2], crops=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/batchtospace_op_test.py
_test_batch_to_space_nd(
input_shape=[18, 2, 1, 2], block_shape=[2, 3], crops=[[1, 1], [0, 0]], dtype="float32"
)
_test_batch_to_space_nd(
input_shape=[20, 5, 8, 7], block_shape=[2, 2], crops=[[1, 1], [1, 1]], dtype="float64"
)
#######################################################################
# Reshape
# -------
def _test_reshape(data, out_shape):
""" One iteration of reshape operation with given data and out shape """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_with_call():
""" relay.expr.Call as shape """
data = np.zeros((6, 4, 2))
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out_shape = tf.constant([1, 2, 3], dtype="int32")
out_shape = tf.multiply(out_shape, 2)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_like(data, shape_like):
""" A special case for reshape. """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
in_shape_like = array_ops.placeholder(shape=shape_like.shape, dtype=data.dtype)
out_shape = array_ops.shape(in_shape_like)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_symbolic(data, a_data, b_data):
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
a = array_ops.placeholder(shape=a_data.shape, dtype=a_data.dtype)
b = array_ops.placeholder(shape=b_data.shape, dtype=b_data.dtype)
newshape = tf.add(a, b)
out = array_ops.reshape(in_data, newshape)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[data, a_data, b_data], [in_data.name, a.name, b.name], out.name, mode=mode
)
def test_forward_reshape():
_test_reshape(np.arange(6.0), [2, 3])
_test_reshape(np.arange(6), [-1, 2])
_test_reshape(np.arange(6), [3, -1])
_test_reshape(np.arange(6), [-1])
_test_reshape_with_call()
_test_reshape_like(np.zeros((3, 6)), np.zeros((9, 2)))
_test_reshape_symbolic(np.arange(6.0), np.array([2, 0]), np.array([0, 3]))
_test_reshape_symbolic(np.arange(6), np.array([-1, 0]), np.array([0, 2]))
_test_reshape_symbolic(np.arange(6), np.array([3, 0]), np.array([3, -1]))
_test_reshape_symbolic(np.arange(6), np.array([0]), np.array([-1]))
#######################################################################
# DepthToSpace
# ------------
def _test_depthtospace(data, block_size):
""" One iteration of depth_to_space operation with given data and block size """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.depth_to_space(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "DepthToSpace:0")
def test_forward_depthtospace():
_test_depthtospace(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_depthtospace(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# SpaceToDepth
# ------------
def _test_spacetodepth(data, block_size):
""" One iteration of space_to_depth operation with given data and block size """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.space_to_depth(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "SpaceToDepth:0")
def test_forward_spacetodepth():
_test_spacetodepth(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_spacetodepth(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# Squeeze
# -------
def _test_squeeze(data, squeeze_dims=None):
""" One iteration of squeeze """
if squeeze_dims is None:
squeeze_dims = []
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if squeeze_dims:
array_ops.squeeze(in_data, squeeze_dims)
else:
array_ops.squeeze(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Squeeze:0")
def test_forward_squeeze():
""" Squeeze """
# Nothing to squeeze.
_test_squeeze(np.arange(2).reshape((2)))
_test_squeeze(np.arange(6).reshape((2, 3)))
# Squeeze the middle element away.
_test_squeeze(np.arange(4).reshape((2, 1, 2)))
# Squeeze on both ends.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)))
# Positive squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [2, 4])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0, 4, 2])
# Negative squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-1])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5, -1])
#######################################################################
# TensorArray
# -----------
def test_tensor_array_write_read():
def run(dtype_str, infer_shape, element_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
in_data = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(
dtype=dtype, size=2, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
out = ta3.read(0)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, False, None)
run(dtype, False, tf.TensorShape([None, 2]))
run(dtype, True, None)
def test_tensor_array_scatter():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
if infer_shape:
element_shape = tf.TensorShape([tf.Dimension(None)])
else:
element_shape = None
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str), dtype=dtype)
indices = tf.constant([2, 1, 0])
ta1 = tf.TensorArray(
dtype=dtype, size=3, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.scatter(indices, t)
out0 = ta2.read(0)
out1 = ta2.read(1)
out2 = ta2.read(2)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_gather():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
gather_indices = tf.constant([1, 2])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.gather(gather_indices)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_split():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
out0 = ta2.read(0)
out1 = ta2.read(1)
out2 = ta2.read(2)
out3 = ta2.read(3)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_3:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_concat():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
t = ta2.concat()
out = tf.identity(t)
compare_tf_with_tvm([], [], ["Identity:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_size():
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
in_data = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(dtype=dtype, size=2, infer_shape=infer_shape)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
out = ta3.size()
g = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_stack():
def run(dtype_str, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.stack()
print(t1)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayStack/TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_unstack():
def run(dtype_str, input_shape, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.random.choice([0, 1, 2, 3], size=input_shape).astype(dtype.name))
ta1 = tf.TensorArray(dtype=dtype, infer_shape=infer_shape, size=input_shape[0])
ta2 = ta1.unstack(t)
out0 = ta2.size()
out1 = ta2.read(0)
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, (5,), False)
run(dtype, (5, 5), True)
run(dtype, (5, 5, 5), False)
run(dtype, (5, 5, 5, 5), True)
#######################################################################
# ConcatV2
# --------
def _test_concat_v2(shape1, shape2, dim):
""" One iteration of ConcatV2 """
with tf.Graph().as_default():
dtype = "float32"
in1 = tf.placeholder(shape=shape1, dtype=dtype, name="in1")
in2 = tf.placeholder(shape=shape2, dtype=dtype, name="in2")
array_ops.concat_v2([in1, in2], dim)
np_data1 = np.random.uniform(size=shape1).astype(dtype)
np_data2 = np.random.uniform(size=shape2).astype(dtype)
compare_tf_with_tvm([np_data1, np_data2], ["in1:0", "in2:0"], "ConcatV2:0")
def test_forward_concat_v2():
if tf.__version__ < LooseVersion("1.4.1"):
return
_test_concat_v2([2, 3], [2, 3], 0)
_test_concat_v2([10, 3, 5], [2, 3, 5], 0)
_test_concat_v2([2, 3], [2, 3], 1)
_test_concat_v2([5, 8], [5, 4], 1)
_test_concat_v2([2, 8, 5], [2, 8, 6], -1)
#######################################################################
# Sigmoid
# -------
def _test_sigmoid(data):
""" One iteration of sigmoid """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
sigmoid_out = math_ops.sigmoid(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Sigmoid:0")
def test_forward_sigmoid():
""" Sigmoid """
_test_sigmoid(np.random.uniform(size=(3, 4, 4, 3)).astype("float32"))
#######################################################################
# Argmin/Argmax
# -------------
def _test_argx(func, data, **kwargs):
with tf.Graph().as_default():
inp = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="c0")
func(inp, name="argx0", **kwargs)
compare_tf_with_tvm(data, "c0:0", "argx0:0")
def test_forward_argminmax():
for output_type in [tf.int64, tf.int32]:
for axis in [None, 0, 1, 2]:
data = np.random.uniform(size=(8, 4, 9)).astype("float32")
_test_argx(tf.argmax, data=data, axis=axis, output_type=output_type)
_test_argx(tf.argmin, data=data, axis=axis, output_type=output_type)
#######################################################################
# Variable
# --------
def _test_variable(data):
""" One iteration of a variable """
tf.reset_default_graph()
with tf.Graph().as_default():
input_op = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
input_tensor = array_ops.reshape(input_op, data.shape)
size = input_tensor.shape.dims[1]
with variable_scope.variable_scope("linear", reuse=None):
w = variable_scope.get_variable("w", shape=[size, size], dtype=input_tensor.dtype)
math_ops.matmul(input_tensor, w)
compare_tf_with_tvm(data, "Placeholder:0", "MatMul:0", init_global_variables=True)
def test_forward_variable():
"""Variable type op test"""
_test_variable(np.random.uniform(size=(32, 100)).astype("float32"))
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_read_variable_op(target, ctx):
""" Read Variable op test """
tf.reset_default_graph()
data = np.random.uniform(size=(32, 100)).astype("float32")
input_tensor = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
size = input_tensor.shape.dims[1]
var_data = np.random.uniform(-5, 5, size=[size, size]).astype(np.float32)
input_var = tf.Variable(var_data, name="var1", use_resource=True)
math_ops.matmul(input_tensor, input_var)
out_name = ["MatMul:0"]
out_node = ["MatMul"]
in_name = ["Placeholder:0"]
in_node = ["Placeholder"]
in_data = [data]
with tf.Session() as sess:
sess.run(variables.global_variables_initializer())
final_graph_def = sess.graph.as_graph_def(add_shapes=True)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
shape_dict = {e: i.shape for e, i in zip(in_name, in_data)}
with pytest.raises(Exception) as execinfo:
mod, params = relay.frontend.from_tensorflow(
final_graph_def, layout=None, shape=shape_dict, outputs=None
)
assert execinfo.value.args[0].startswith("Graph is not frozen. Provide a frozen graph")
# Now convert the variables to constant and run inference on the converted graph
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=target,
out_names=out_name,
num_output=len(out_name),
)
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-4, rtol=1e-5)
sess.close()
#######################################################################
# MatMul, BatchMatMul, BatchMatMulV2
# ----------------------------------
def _test_matmul(i, j, k, dtype, outer=None):
""" One iteration of matmul """
A_shape_init = [i, j]
B_shape_init = [j, k]
for transpose_a in [False, True]:
for transpose_b in [False, True]:
outer = outer or []
A_shape = outer + (A_shape_init[::-1] if transpose_a else A_shape_init)
B_shape = outer + (B_shape_init[::-1] if transpose_b else B_shape_init)
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, transpose_a=transpose_a, transpose_b=transpose_b)
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([A_np, B_np], [A.name, B.name], result.name)
def test_forward_matmul():
""" MatMul op test"""
_test_matmul(1, 3, 6, "int32")
_test_matmul(5, 3, 1, "float64")
def _test_batch_matmul(A_shape, B_shape, dtype, adjoint_a=False, adjoint_b=False):
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([A_np, B_np], [A.name, B.name], result.name)
def test_forward_batch_matmul():
""" TF op BatchMatMul, BatchMatMulV2 test"""
_test_batch_matmul((3, 5, 4), (3, 4, 5), "int32")
_test_batch_matmul((3, 5, 4), (3, 4, 5), "float32", True, True)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "int32", True, False)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "float32", False, True)
_test_batch_matmul((2, 3, 4, 5, 6), (2, 3, 4, 6, 5), "int32")
_test_batch_matmul((1, 2, 3, 4, 5, 6), (1, 2, 3, 4, 6, 5), "float32", True, True)
_test_batch_matmul((3, 4, 5, 6), (3, 4, 5, 6), "int32", True, False)
_test_batch_matmul((2, 3, 4, 2, 3, 4, 5, 6), (2, 3, 4, 2, 3, 4, 5, 6), "float32", False, True)
#######################################################################
# SparseTensorDenseMatMul
# ----------------------------------
def _test_sparse_dense_matmul(indices, values, A_inp_shape, B_inp_shape, dtype, flip=False):
""" One iteration of sparse_dense_matmul """
for adjoint_a in [False, True]:
for adjoint_b in [False, True]:
A_shape = A_inp_shape[::-1] if adjoint_a else A_inp_shape
B_shape = B_inp_shape[::-1] if adjoint_b else B_inp_shape
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
if flip:
result = tf.sparse.sparse_dense_matmul(
B, A_sp, adjoint_a=adjoint_b, adjoint_b=adjoint_a
)
else:
result = tf.sparse.sparse_dense_matmul(
A_sp, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b
)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([B_np], [B.name], result.name)
def test_forward_sparse_dense_matmul():
""" sparse_dense_matmul op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [4, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [7, 9], [9, 5], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [4, 3], [3, 4], "float32", True)
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32", True)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32", True
)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [9, 5], [7, 9], "float32", True
)
#######################################################################
# StridedSlice
# ------------
def _test_stridedslice(
ip_shape,
begin,
end,
stride,
dtype,
begin_mask=0,
end_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
ellipsis_mask=0,
):
""" One iteration of a Stridedslice """
tf.reset_default_graph()
np_data = np.random.uniform(size=ip_shape).astype(dtype)
with tf.Graph().as_default():
if len(ip_shape) == 0:
in_data = tf.constant(np_data, dtype)
else:
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.strided_slice(
in_data,
begin,
end,
stride,
begin_mask=begin_mask,
end_mask=end_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask,
ellipsis_mask=ellipsis_mask,
name="strided_slice",
)
if len(ip_shape) == 0:
compare_tf_with_tvm(None, "", "strided_slice:0")
else:
compare_tf_with_tvm(np_data, "in_data:0", "strided_slice:0")
def test_forward_stridedslice():
"""test StridedSlice"""
_test_stridedslice([], [0], [0], [1], "float32", new_axis_mask=1)
_test_stridedslice([2], [1], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([2, 1], [0], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([2, 3, 4], [0], [1], [1], "float32", shrink_axis_mask=8)
_test_stridedslice([3, 4, 3], [1, -1, 0], [4, -5, 3], [2, -1, 1], "float32")
_test_stridedslice([3, 4, 3], [1, 0], [4, 3], [2, 1], "float32", ellipsis_mask=8)
_test_stridedslice([3, 4, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 5, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 5, 3], [1, 0, 1], [4, 2, 2], [2, 1, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 3], [1, 1, 0], [4, 4, 2], [2, 1, 1], "float32", new_axis_mask=5)
_test_stridedslice(
[3, 4, 3], [1, 1, 1], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=4
)
_test_stridedslice(
[6, 4, 5], [1, 1, 1], [6, 3, 4], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=5
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=4, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=2
)
_test_stridedslice((3, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=2)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=1, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=1
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6], [0, 0], [2, 3], [1, 1], "float32", shrink_axis_mask=5, new_axis_mask=1
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=5,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=8,
end_mask=8,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=16,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[1, 2, 0, -3],
[4, 5, 3, 3],
[2, 2, 1, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=8,
)
_test_stridedslice(
[1, 13, 13, 3, 2],
[0, 0],
[1, 1],
[1, -1],
"float32",
ellipsis_mask=1,
begin_mask=2,
end_mask=2,
)
#######################################################################
# FloorDiv, RealDiv
# -----------------
def _test_forward_divide(ip_shape, dtype):
np_numer = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_denomin = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
denominator = tf.placeholder(dtype, ip_shape, name="denomin")
tf.math.divide(numerator, denominator, name="RealDiv")
compare_tf_with_tvm([np_numer, np_denomin], ["numer:0", "denomin:0"], "RealDiv:0")
def _test_forward_floordiv(ip_shape, dtype):
np_numer = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
tf.math.floordiv(numerator, tf.constant(5, dtype=dtype), name="FloorDiv")
compare_tf_with_tvm([np_numer], ["numer:0"], "FloorDiv:0")
def test_forward_divide():
"""test FloorDiv, RealDiv"""
_test_forward_divide((4,), "int32")
_test_forward_divide((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "int32")
#######################################################################
# FloorMod
# --------
def _test_forward_floormod(in_shape, if_shape, dtype):
np_numer = np.random.uniform(1, 100, size=in_shape).astype(dtype)
np_factor = np.random.uniform(1, 100, size=if_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, in_shape, name="numer")
factor = tf.placeholder(dtype, if_shape, name="factor")
tf.floormod(numerator, factor, name="FloorMod")
compare_tf_with_tvm([np_numer, np_factor], ["numer:0", "factor:0"], "FloorMod:0")
def test_forward_floormod():
"""test FloorMod"""
_test_forward_floormod((10,), (10,), "float32")
_test_forward_floormod((8, 2), (1,), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "int32")
#######################################################################
# TruncateMod
# -----------
def _test_forward_truncatemod(ip_shape, dtype):
np_data_1 = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_data_2 = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data_1 = tf.placeholder(dtype, ip_shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, ip_shape, name="in_data_2")
tf.truncatemod(in_data_1, in_data_2, name="truncatemod")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "truncatemod:0")
def test_forward_truncatemod():
"""test TruncateMod"""
_test_forward_truncatemod((4, 3, 7), "int32")
#######################################################################
# Gather, GatherV2
# --------------------------
def _test_gather(ip_shape, indice_shape, indice_value, axis, dtype):
""" One iteration of a GatherV2 """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
indices = tf.placeholder("int32", indice_shape, name="indices")
out = tf.gather(in_data, indices, axis=axis)
np_data = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
def _fill_indices(indice_value):
indices = np.array(ip_shape, dtype=dtype)
if isinstance(indice_value, int):
indices = np.array([indice_value], dtype="int32")
else:
indices = np.asarray(indice_value, dtype="int32")
return indices
np_indices = _fill_indices(indice_value)
compare_tf_with_tvm([np_data, np_indices], ["in_data:0", "indices:0"], out.name)
def test_forward_gather():
"""test Gather/GatherV2 layer"""
_test_gather((4,), (1,), 1, 0, "int32")
_test_gather((4,), (1,), 1, 0, "float32")
_test_gather((1, 4), (1,), [0], 0, "int32")
_test_gather((4,), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "float32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 1, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "float32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 0, "int32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 2, "int32")
_test_gather((4, 3, 5, 6), (1, 4), [[2, 1, 0, 0]], 0, "float32")
#######################################################################
# GatherND
# --------------------------
def _test_gather_nd(ip_shape, indice_value, dtype):
"""test operator GatherNd"""
np_data = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.gather_nd(in_data, indices=indice_value, name="gather_nd")
compare_tf_with_tvm([np_data], ["in_data:0"], "gather_nd:0")
def test_forward_gather_nd():
"""test operator GatherNd"""
_test_gather_nd((2, 2), [[0, 0], [1, 1]], "float32")
_test_gather_nd((2, 2, 2), [[1, 0, 0], [0, 0, 0]], "float32")
_test_gather_nd((4,), [1], "float32")
_test_gather_nd((4,), [1], "int32")
_test_gather_nd((1, 4), [0, 3], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "float32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((4, 3, 5, 6), [[2, 1, 0, 0]], "float32")
_test_gather_nd((3, 3, 3), [[[2, 1]]], "int32")
#######################################################################
# BiasAdd
# -------
def test_forward_bias_add():
"""test Op BiasAdd"""
def check_bias_add(lh_shpae, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shpae).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.nn.bias_add(lft_data, rgt_data, name="BiasAdd")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "BiasAdd:0")
check_bias_add((10, 8, 16, 32), (32,), dtype="int32")
check_bias_add((10, 20), (20,), dtype="float32")
#######################################################################
# Split
# -----
def _test_split(in_shape, axis, num_or_size_splits, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
""" One iteration of a Split """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
num_split = (
len(num_or_size_splits) if isinstance(num_or_size_splits, list) else num_or_size_splits
)
split = tf.split(in_data, num_or_size_splits, axis=axis)
relu = [tf.nn.relu(i) for i in split]
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in relu])
# and now test together with concat
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
splitted = tf.split(in_data, num_or_size_splits, axis=axis)
concat = tf.concat(splitted, axis)
compare_tf_with_tvm([np_data], "in_data:0", concat.name)
def test_forward_split():
"""test split layer"""
# rank 1
_test_split((3,), 0, 1, "float32")
_test_split((3,), 0, 3, "float32")
_test_split((6,), 0, 3, "float32")
# rank 2
_test_split((6, 2), 0, 3, "float32")
_test_split((2, 6), 1, 6, "float32")
# rank 3
_test_split((6, 2, 4), 0, 2, "int32")
_test_split((2, 6, 4), 1, 3, "float32")
_test_split((2, 4, 6), 2, 1, "float32")
# rank 4
_test_split((6, 1, 3, 5), 0, 3, "float32")
_test_split((1, 6, 3, 5), 1, 3, "float32")
_test_split((1, 3, 6, 5), 2, 3, "float32")
_test_split((1, 3, 5, 6), 3, 3, "float32")
# split along negative axis
_test_split((6, 1, 3, 5), -4, 3, "float32")
_test_split((1, 6, 3, 5), -3, 3, "float32")
_test_split((1, 3, 6, 5), -2, 3, "float32")
_test_split((1, 3, 5, 6), -1, 3, "float32")
# size_splits list
_test_split((6,), 0, [1, 2, 3], "int32")
_test_split((3, 6, 4), -2, [1, 4, 1], "float32")
######################################################################
# TopKV2
# ------
def _test_forward_top_k_v2(in_shape, k):
np_data = np.random.uniform(-100, 100, size=in_shape).astype("float32")
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder("float32", in_shape, name="in_data")
tf.math.top_k(in_data, k, name="TopK")
compare_tf_with_tvm([np_data], ["in_data:0"], "TopK:0")
def test_forward_top_k_v2():
_test_forward_top_k_v2((3,), 1)
_test_forward_top_k_v2((3,), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
#######################################################################
# Unstack
# -------
def _test_unstack(ip_shape, axis, dtype):
np_data = np.random.uniform(-5, 5, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
unstack = tf.unstack(in_data, axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in unstack])
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.stack(tf.unstack(in_data, axis=axis), axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], "stack:0")
def test_forward_unstack():
"""test unstack layer"""
_test_unstack((6,), 0, "int32")
_test_unstack((2, 6), 1, "float64")
# negative axis
_test_unstack((1, 4), -1, "int32")
_test_unstack((3, 6, 4), -2, "float32")
#######################################################################
# Tile
# ----
def _test_tile(in_shape, multiples, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.tile(in_data, multiples=multiples, name="tile")
compare_tf_with_tvm([np_data], ["in_data:0"], "tile:0")
def test_forward_tile():
"""test Tile"""
_test_tile((2,), (3,), "int32")
_test_tile((2, 2), (2, 3), "float32")
_test_tile((2, 4, 6), (6, 7, 8), "float64")
#######################################################################
# ClipByValue
# -----------
def _test_forward_clip_by_value(ip_shape, clip_value_min, clip_value_max, dtype):
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.clip_by_value(in_data, clip_value_min, clip_value_max, name="ClipByValue")
np_data = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
compare_tf_with_tvm([np_data], ["in_data:0"], "ClipByValue:0")
def test_forward_clip_by_value():
"""test ClipByValue op"""
if tf.__version__ < LooseVersion("1.9"):
_test_forward_clip_by_value((4,), 0.1, 5.0, "float32")
_test_forward_clip_by_value((4, 4), 1, 5, "int32")
#######################################################################
# Multi Input to graph
# --------------------
def test_forward_multi_input():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
out = tf.multiply(out1, out2, name="out")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
compare_tf_with_tvm(
[in_data, in_data, in_data, in_data], ["in1:0", "in2:0", "in3:0", "in4:0"], "out:0"
)
#######################################################################
# Multi Output to Graph
# ---------------------
def test_forward_multi_output():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
in_data = [in_data] * 4
in_name = ["in1:0", "in2:0", "in3:0", "in4:0"]
out_name = ["out1:0", "out2:0"]
out_node = [out.strip(":0") for out in out_name]
in_node = [inp.strip(":0") for inp in in_name]
with tf.Session() as sess:
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
tvm_output = run_tvm_graph(
final_graph_def, in_data, in_node, target="llvm", out_names=out_node, num_output=2
)
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
#######################################################################
# Resize Bilinear, Nearest_Neighbor
# ---------------------------------
def _test_resize_bilinear(in_shape, to_shape, align_corners):
""" One iteration of resize bilinear """
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_bilinear(in_data, shape_data, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_bilinear_from_tensor(in_shape, align_corners):
"""One iteration of resize bilinear with non-constant output shape, requires
value inference to get proper output shape."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], None, None, in_shape[3]], dtype=data.dtype
)
to_shape = tf.shape(in_data)[1:3]
tf.image.resize_bilinear(in_data, to_shape, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_nearest_neighbor(in_shape, to_shape):
""" One iteration of resize nearest neighbor """
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_nearest_neighbor(in_data, shape_data, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def _test_resize_nearest_neighbor_dynamic_shape(in_shape, scale):
""" One iteration of resize nearest neighbor for graph with dynamic input shape """
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=None, dtype=data.dtype)
# multiply input shape by scale factor
new_shape = tf.shape(in_data)[1:3] * tf.constant(scale, dtype=tf.int32)
tf.image.resize_nearest_neighbor(in_data, new_shape, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def test_forward_resize():
""" Resize Bilinear, Nearest_Neighbor """
# TF default layout is NHWC
_test_resize_bilinear((4, 32, 32, 3), [50, 50], False)
_test_resize_bilinear((6, 32, 32, 3), [20, 20], True)
_test_resize_bilinear_from_tensor((4, 32, 32, 3), False)
_test_resize_bilinear_from_tensor((6, 50, 50, 3), True)
_test_resize_nearest_neighbor((6, 32, 32, 3), [20, 20])
_test_resize_nearest_neighbor_dynamic_shape((1, 16, 16, 3), scale=[2, 2])
#######################################################################
# BroadcastTo
# -----------
def _test_broadcast_to(in_shape, to_shape):
""" One iteration of broadcast_to"""
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0", opt_level=0)
def _test_broadcast_to_from_tensor(in_shape):
""" One iteration of broadcast_to with unknown shape at graph build"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=[None], dtype=data.dtype)
shape_data = tf.multiply(tf.shape(in_data), 32)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0")
def test_forward_broadcast_to():
""" Resize Bilinear """
_test_broadcast_to((4, 1, 32, 32), [4, 8, 32, 32])
_test_broadcast_to((6, 32, 32, 1), [6, 32, 32, 16])
_test_broadcast_to_from_tensor((1))
#######################################################################
# Fill
# ----
def _test_fill(in_shape):
""" Use the fill op to create a tensor of ones with non-constant shape."""
with tf.Graph().as_default():
tf.ones(shape=in_shape, dtype="float32")
compare_tf_with_tvm(in_shape, [], "ones:0", opt_level=1)
def _test_fill_from_tensor(in_shape):
"""Use the fill op to create a tensor of ones with non-constant shape.
Some extra ops need to be added here to prevent the graph from
being fully constant and folded away."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], in_shape[1], None, None], dtype=data.dtype
)
x = tf.ones(shape=2 * tf.shape(in_data), dtype=data.dtype)
y = tf.math.add(in_data, tf.reduce_mean(x), name="out1")
compare_tf_with_tvm(data, "Placeholder:0", "out1:0")
def _test_fill_symbolic_inputs(in_shape_data, in_value_data, dtype):
with tf.Graph().as_default():
in_shape = tf.placeholder(shape=[in_shape_data.shape[0]], dtype=in_shape_data.dtype)
in_value = tf.placeholder(shape=(), dtype=dtype)
out = tf.fill(in_shape, in_value)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[in_shape_data, in_value_data], [in_shape.name, in_value.name], out.name, mode=mode
)
def test_forward_fill():
""" Resize Bilinear """
_test_fill((32))
_test_fill((6, 32, 64, 64))
_test_fill_from_tensor((6, 32, 64, 64))
_test_fill_symbolic_inputs(np.array((2,)), np.int32(9), tf.int32)
_test_fill_symbolic_inputs(np.array((2, 3)), 9, tf.int64)
_test_fill_symbolic_inputs(np.array((2, 3, 4)), np.float32(9.0), tf.float32)
#######################################################################
# Crop to bounding box
# --------------------
def _test_crop(in_shape, off_h, off_w, tar_h, tar_w):
""" Crop to bounding box """
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
tf.image.crop_to_bounding_box(in_data, off_h, off_w, tar_h, tar_w)
compare_tf_with_tvm(data, "Placeholder:0", "crop_to_bounding_box/Slice:0")
def test_forward_crop():
""" Crop to bounding box """
_test_crop((1, 224, 224, 3), 20, 20, 120, 120)
#######################################################################
# CropAndResize
# -------------
def _test_forward_crop_and_resize(
img_shape,
boxes,
box_idx,
crop_size,
extrapolation_value=0.0,
method="bilinear",
dtype="float32",
):
image = np.random.uniform(0, 10, size=img_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = array_ops.placeholder(dtype, image.shape, name="in_data")
tf.image.crop_and_resize(
in_data,
boxes=boxes,
box_ind=box_idx,
crop_size=crop_size,
method=method,
extrapolation_value=extrapolation_value,
name="crop_and_resize",
)
compare_tf_with_tvm([image], ["in_data:0"], "crop_and_resize:0")
def test_forward_crop_and_resize():
""" CropAndResize """
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3])
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2)
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2, "nearest")
_test_forward_crop_and_resize([1, 11, 11, 3], [[0.3, 0.3, 1, 1]], [0], [21, 21])
_test_forward_crop_and_resize([1, 41, 41, 3], [[0.2, 0.4, 0.8, 0.8]], [0], [21, 11])
_test_forward_crop_and_resize([1, 100, 100, 3], [[0, 0, 0.9, 0.9]], [0], [30, 30])
_test_forward_crop_and_resize([1, 224, 224, 3], [[0.1, 0.2, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 249, 249, 3], [[0, 0, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 201, 301, 3], [[0.2, 0.3, 0.7, 0.8]], [0], [51, 51])
_test_forward_crop_and_resize(
img_shape=[10, 11, 11, 3],
boxes=[[0, 0, 0.9, 0.9], [0.2, 0.2, 0.8, 0.8]],
box_idx=[0, 1],
crop_size=[5, 5],
)
_test_forward_crop_and_resize(
img_shape=[20, 576, 576, 3],
boxes=[[0, 0, 1, 1], [0, 0, 0.8, 0.8], [0.1, 0.2, 0.9, 1], [0.2, 0, 1, 1]],
box_idx=[1, 0, 2, 3],
crop_size=[24, 24],
extrapolation_value=0.3,
)
_test_forward_crop_and_resize(
img_shape=[20, 229, 229, 3],
boxes=[[0, 0, 0.9, 0.9], [0.3, 0.3, 1, 1], [0.2, 0.1, 0.7, 0.8], [0, 0, 1, 1]],
box_idx=[3, 0, 2, 1],
crop_size=[58, 58],
extrapolation_value=0.2,
method="nearest",
)
#######################################################################
# Non Max Suppression
# -------------------
def _test_forward_nms_v3(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="debug",
)
def _test_forward_nms_v4(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
indices_padded, num_valid = tf.image.non_max_suppression_padded(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
pad_to_max_output_size=True,
)
num_valid = tf.reshape(num_valid, shape=(-1,))
indices_padded = tf.reshape(indices_padded, shape=(-1,))
tf.slice(indices_padded, tf.constant([0]), num_valid, name="SlicedIndices")
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="debug",
)
def _test_forward_nms_v5(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression_with_scores(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV5:0", "nms/NonMaxSuppressionV5:1"],
mode="vm",
)
def test_forward_nms():
""" NonMaxSuppressionV3,5 """
for _test_forward_nms in [_test_forward_nms_v3, _test_forward_nms_v5]:
_test_forward_nms((5, 4), (5,), 0.7, 0.5, 5)
_test_forward_nms((20, 4), (20,), 0.5, 0.6, 10)
_test_forward_nms((1000, 4), (1000,), 0.3, 0.7, 1000)
_test_forward_nms((2000, 4), (2000,), 0.4, 0.6, 7)
#######################################################################
# LSTM
# ----
def _test_lstm_cell(batch_size, num_hidden, num_layers, forget_bias, dtype):
""" One iteration of a LSTM cell """
tf.reset_default_graph()
input_size = num_hidden
input_data = np.full((batch_size, input_size), 1.0, dtype=dtype)
in_state_c = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
in_state_h = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
def _get_tensorflow_output():
with tf.Session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)
):
m0 = tf.placeholder(dtype, [batch_size, num_hidden], name="m0")
m1 = tf.placeholder(dtype, [batch_size, num_hidden], name="m1")
x = tf.placeholder(shape=(batch_size, input_size), dtype=dtype, name="input")
g, ((out_m0, out_m1)) = tensorflow.contrib.rnn.LSTMBlockCell(
num_hidden, forget_bias=forget_bias
)(x, (m0, m1))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_m0, out_m1],
{
x.name: np.array([[1.0, 1.0]]),
m0.name: in_state_c,
m1.name: in_state_h,
},
)
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(
sess, graph_def, ["root/lstm_cell/LSTMBlockCell"]
)
return final_graph_def, res
graph_def, tf_out = _get_tensorflow_output()
tvm_output = run_tvm_graph(
graph_def,
[input_data, in_state_c, in_state_h],
["root/input", "root/m0", "root/m1"],
num_output=7,
)
assert isinstance(tvm_output, list)
tvm.testing.assert_allclose(tf_out[0], tvm_output[6], rtol=1e-3, atol=1e-3)
tvm.testing.assert_allclose(tf_out[1], tvm_output[1], rtol=1e-3, atol=1e-3)
def test_forward_lstm():
"""test LSTM block cell"""
if package_version.parse(tf.VERSION) < package_version.parse("2.0.0"):
# in 2.0, tf.contrib.rnn.LSTMBlockCell is removed
_test_lstm_cell(1, 2, 1, 0.5, "float32")
#######################################################################
# Pack
# ---
def _test_pack(axis, shape, **kwargs):
a = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
b = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
with tf.Graph().as_default():
tf_a = array_ops.placeholder(shape=shape, dtype="float32", name="pl_a")
tf_b = array_ops.placeholder(shape=shape, dtype="float32", name="pl_b")
tf_c = tf.stack([tf_a, tf_b], axis=axis, **kwargs)
assert tf_c.op.op_def.name == "Pack", "tf.stack() is expected to produce 'Pack' operation"
compare_tf_with_tvm([a, b], ["pl_a:0", "pl_b:0"], "stack:0")
def test_forward_pack():
for axis in range(-3, 3):
_test_pack(axis, [3, 2, 1])
for axis in range(-1, 1):
_test_pack(axis, [3])
_test_pack(0, [])
#######################################################################
# Unpack
# ------
def _test_forward_unpack(in_shape, axis, dtype):
"""test operator Unpack"""
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.unstack(in_data, axis=axis, name="Unpack")
compare_tf_with_tvm([np_data], ["in_data:0"], "Unpack:0")
def test_forward_unpack():
_test_forward_unpack((3,), 0, "int32")
_test_forward_unpack((3,), -1, "int16")
_test_forward_unpack((21, 23, 3), 2, "float32")
#######################################################################
# Range
# -----
def test_forward_range():
"""test operator Range"""
for dtype in [tf.int32, tf.int64]:
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 18, 3, name="range", dtype=dtype)
compare_tf_with_tvm([], [], "range:0")
"""test type assignment for operator Range"""
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 256 + 1, 1, dtype=tf.float32)
compare_tf_with_tvm([], [], "range:0")
#######################################################################
# Pad
# ---
def _test_pad(input_shape, paddings, mode, **kwargs):
""" One iteration of pad operation with given shape"""
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
pad_values = constant_op.constant(paddings)
pad = tf.pad(in_data, paddings=pad_values, mode=mode, **kwargs)
if mode == "CONSTANT":
if "constant_values" in kwargs:
out_name = "PadV2:0"
else:
out_name = "Pad:0"
else:
out_name = "MirrorPad:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def test_forward_pad():
""" Pad """
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT", constant_values=1.0)
_test_pad((2, 3), [[1, 1], [2, 2]], mode="SYMMETRIC")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="REFLECT")
#######################################################################
# Logical operators
# --------------------
def test_logical_and():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_and(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_or():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_or(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_xor():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_xor(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_not():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
out = tf.logical_not(in1, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm(in_data1, "in1:0", "out:0")
def test_forward_logical():
test_logical_and()
test_logical_or()
test_logical_xor()
test_logical_not()
#######################################################################
# Where, Select
# -------------
def test_forward_where():
""" Where: return elements depending on conditions"""
with tf.Graph().as_default():
with tf.Session() as sess:
input1 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input1")
input2 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input2")
mask = input1 > input2
tf.where(mask, input1 + 1, input2 * 2)
in_data1 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
in_data2 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
compare_tf_with_tvm([in_data1, in_data2], ["input1:0", "input2:0"], "Select:0")
#######################################################################
# Inception V3
# ------------
def test_forward_inception_v3():
"""test inception V3 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"InceptionV3/inception_v3_2016_08_28_frozen-with_shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input:0", "InceptionV3/Predictions/Reshape_1:0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Inception V1
# ------------
def test_forward_inception_v1():
"""test inception V1 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("InceptionV1/classify_image_graph_def-with_shapes.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
# Build an image from random data.
from PIL import Image
from tvm.contrib import utils
img_array = np.random.uniform(size=(1, 600, 600, 3)).astype("uint8")
img = Image.frombuffer("RGB", (600, 600), img_array.tostring(), "raw", "RGB", 0, 1)
temp = utils.tempdir()
img_path = temp.relpath("tf-test.jpg")
img.save(img_path)
import os.path
if not tf.gfile.Exists(os.path.join(img_path)):
tf.logging.fatal("File does not exist %s", img_path)
data = tf.gfile.FastGFile(os.path.join(img_path), "rb").read()
temp.remove()
# Extract tensorflow decoded image frame for tvm input
with tf.Session() as sess:
tvm_data = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "DecodeJpeg:0")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "softmax:0")
tvm_output = run_tvm_graph(graph_def, tvm_data, "DecodeJpeg/contents")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Mobilenet
# ---------
def test_forward_mobilenet():
"""test mobilenet model"""
# MobilenetV2
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz",
"mobilenet_v2_1.4_224_frozen.pb",
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "MobilenetV2/Predictions/Reshape_1"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "input:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# ResnetV2
# --------
@tvm.testing.requires_gpu
def test_forward_resnetv2():
"""test resnet model"""
if is_gpu_available():
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"ResnetV2/resnet-20180601_resnet_v2_imagenet-shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(128, 224, 224, 3)).astype("float32")
out_node = "ArgMax"
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input_tensor:0", out_node + ":0")
for device in ["llvm", "cuda"]:
ctx = tvm.context(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def, data, "input_tensor", len(tf_output), target=device
)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# SSD
# ---
def _test_ssd_impl():
"""Test SSD with backbone MobileNet V1"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"object_detection/ssd_mobilenet_v1_ppn_shared_"
"box_predictor_300x300_coco14_sync_2018_07_03.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(0.0, 255.0, size=(1, 512, 512, 3)).astype("uint8")
in_node = "image_tensor"
out_node = ["detection_boxes", "detection_scores", "detection_classes"]
with tf.Session() as sess:
tf_output = run_tf_graph(
sess, data, "{}:0".format(in_node), ["{}:0".format(oname) for oname in out_node]
)
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
ctx = tvm.context(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def,
data,
in_node,
len(out_node),
target=device,
layout="NCHW",
out_names=out_node,
mode="vm",
disabled_pass=["FoldScaleAxis"],
serialize=True,
)
for i in range(len(out_node)):
tvm.testing.assert_allclose(tvm_output[i], tf_output[i], rtol=1e-3, atol=1e-3)
def test_forward_ssd():
run_thread = threading.Thread(target=_test_ssd_impl, args=())
old_stack_size = threading.stack_size(100 * 1024 * 1024)
run_thread.start()
run_thread.join()
threading.stack_size(old_stack_size)
#######################################################################
# Placeholder
# -----------
def test_forward_placeholder():
"""test a simple pb with Placeholder node in the end of GraphDef"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("Custom/placeholder.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "mul"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "Placeholder:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "Placeholder")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# PTB
# ---
try:
# Load contrib for running ptb model in tf version before 2.0
import tensorflow.contrib
except:
pass
def test_forward_ptb():
"""test ptb model"""
config = tf_testing.get_config()
num_steps = config.num_steps
num_hidden = config.hidden_size
num_layers = config.num_layers
batch_size = config.batch_size
vocab_size = config.vocab_size
out_sample_shape = (batch_size, vocab_size)
out_state_shape = (batch_size, num_hidden)
# Sample input
inpt = "we have no useful information on"
cnt_sample = 20
def _pretty_print(items, is_char_model, id2word):
if not is_char_model:
return " ".join([id2word[x] for x in items])
else:
return "".join([id2word[x] for x in items]).replace("_", " ")
def _get_tvm_graph_module(graph_def):
# Cell inputs 'c and 'h' consist of all layers values
shape_dict = {"Model/Placeholder": (batch_size, num_steps)}
mod, params = relay.frontend.from_tensorflow(
graph_def,
shape=shape_dict,
outputs=[
"Model/Softmax:0",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:6",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:6",
],
)
target = "llvm"
with tvm.transform.PassContext(opt_level=0):
graph, lib, params = relay.build(mod, target, params=params)
from tvm.contrib import graph_runtime
ctx = tvm.cpu(0)
return params, graph_runtime.create(graph, lib, ctx)
def _do_tvm_sample(model, data, in_states, params, num_samples):
"""Sampled from the model"""
samples = []
state = in_states
sample = None
def _get_sample(data, state):
input_data = np.full((batch_size, num_steps), data, dtype="int32")
model.set_input("Model/Placeholder", tvm.nd.array(input_data.astype("int32")))
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros",
tvm.nd.array(state[0].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros_1",
tvm.nd.array(state[1].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros",
tvm.nd.array(state[2].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros_1",
tvm.nd.array(state[3].astype("float32")),
)
model.set_input(**params)
model.run()
tvm_output = model.get_output(0, tvm.nd.empty(out_sample_shape, "float32")).asnumpy()
state_output = []
for i in range(4):
state_output.append(
model.get_output(i + 1, tvm.nd.empty(out_state_shape, "float32")).asnumpy()
)
sample = tf_testing.pick_from_weight(tvm_output[0])
return sample, state_output
for x in data:
sample, state = _get_sample(x, state)
if sample is not None:
samples.append(sample)
else:
samples.append(0)
k = 1
while k < num_samples:
sample, state = _get_sample(samples[-1], state)
samples.append(sample)
k += 1
return samples, state
with tf.Graph().as_default():
word_to_id, id_to_word, graph_def = tf_testing.get_workload_ptb()
vocab_size = len(word_to_id)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
sess = tf.Session()
# TVM graph module creation
params, m = _get_tvm_graph_module(graph_def)
# Create 10 predicted statments of 20 words
cnt_stm = 0
while cnt_stm < 10:
cnt_stm += 1
in_state = [np.full((batch_size, num_hidden), 0, dtype="float32")] * 2 * num_layers
seed_for_sample = inpt.split()
tvm_samples, tvm_state = _do_tvm_sample(
m, [word_to_id[word] for word in seed_for_sample], in_state, params, cnt_sample
)
tvm_sample_str = _pretty_print(tvm_samples, False, id_to_word)
tf_samples, tf_state = tf_testing.do_tf_sample(
sess, [word_to_id[word] for word in seed_for_sample], in_state, cnt_sample
)
tf_sample_str = _pretty_print(tf_samples, False, id_to_word)
inpt = tvm_sample_str
tvm.testing.assert_allclose(tf_samples, tvm_samples, rtol=1e-5, atol=1e-5)
assert tvm_sample_str == tf_sample_str
#######################################################################
# LRN (Local Response Normalization)
# ----------------------------------
def _test_lrn(ishape, size, axis, bias, alpha, beta):
""" testing local response normalization """
lrn_depth_radius = size / 2
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype, name="lrn0_data")
nn_ops.local_response_normalization(
in1, name="lrn", depth_radius=lrn_depth_radius, bias=bias, alpha=alpha, beta=beta
)
compare_tf_with_tvm(inp_array, "lrn0_data:0", "lrn:0")
def test_forward_lrn():
_test_lrn((1, 3, 20, 20), 3, 1, 1.0, 1.0, 0.5)
#######################################################################
# l2_normalize
# ------------
def _test_l2_normalize(ishape, eps, axis):
""" testing l2 normalize (uses max, sum, square, sqrt frontend operators)"""
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
nn.l2_normalize(in1, axis=axis, epsilon=eps, name=None, dim=None)
compare_tf_with_tvm(inp_array, "Placeholder:0", "l2_normalize:0")
def test_forward_l2_normalize():
_test_l2_normalize((1, 3, 20, 20), 0.001, (0,))
#######################################################################
# transpose
# ---------
def _test_forward_transpose(ishape, axes=None):
data = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
if axes is None:
tf.transpose(in1)
else:
tf.transpose(in1, perm=axes)
compare_tf_with_tvm(data, "transpose_data:0", "transpose:0")
def _test_forward_tranapose_axes_input(ishape, axes):
data = np.random.uniform(size=ishape).astype(np.float32)
axes_np = np.array(axes).astype(np.int32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
const1 = tf.constant(axes_np, dtype=tf.int32)
# make axes an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
axes = tf.reverse(const1, axis=[-1])
tf.transpose(in1, axes)
compare_tf_with_tvm([data], ["transpose_data:0"], "transpose:0")
def test_forward_transpose():
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4))
_test_forward_transpose((7, 8, 8, 10))
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4), (0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2))
_test_forward_tranapose_axes_input((2, 3, 4), (1, 2, 0))
_test_forward_tranapose_axes_input((2, 3, 4, 5), (3, 0, 1, 2))
def _test_forward_slice_operation_input(input_value, begin_value, size_value):
input_data = np.array(input_value, dtype=np.float32)
with tf.Graph().as_default():
input_tensor = tf.placeholder(shape=input_data.shape, dtype=input_data.dtype, name="input")
tf.slice(input_tensor, begin_value, size_value, name="slice_output")
compare_tf_with_tvm([input_data], ["input:0"], "slice_output:0")
def test_forward_slice():
_test_forward_slice_operation_input([1, 1], [0], [2])
_test_forward_slice_operation_input([0, 1, 2, 3], [3], [-1])
_test_forward_slice_operation_input(
[[0, 1, 2, 3], [4, 5, 6, 7]], begin_value=[0, 1], size_value=[-1, -1]
)
def test_forward_ceil():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.ceil(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Ceil:0")
def test_forward_floor():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.floor(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Floor:0")
def test_forward_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_runtime", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.relu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Relu:0", mode=mode)
def test_forward_leaky_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_runtime", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.leaky_relu(in1, alpha=0.4)
compare_tf_with_tvm(inp_array, "Placeholder:0", "LeakyRelu:0", mode=mode)
def test_forward_elu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.elu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Elu:0")
def test_forward_selu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.selu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Selu:0")
def test_forward_tanh():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.tanh(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Tanh:0")
#######################################################################
# Softmax
# -------
def test_forward_softmax():
"""test operator Softmax """
def check_softmax(in_shape, axis, dtype):
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.nn.softmax(in_data, axis=axis, name="Softmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "Softmax:0")
check_softmax((2, 3, 5), 2, "float32")
check_softmax((2, 3, 5), -1, "float32")
#######################################################################
# Tensor
# ------
def test_forward_round():
"""test Round"""
np_data = np.random.uniform(-10, 10, size=(5, 7)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7), name="in_data")
tf.round(in_data, name="round")
compare_tf_with_tvm([np_data], ["in_data:0"], "round:0")
def test_forward_abs():
"""test operator Abs"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.abs(in_data, name="abs")
compare_tf_with_tvm([np_data], ["in_data:0"], "abs:0")
def _test_forward_zeros_like(in_shape, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.zeros_like(in_data, name="zeros_like")
compare_tf_with_tvm([np_data], ["in_data:0"], "zeros_like:0")
def test_forward_zeros_like():
if tf.__version__ < LooseVersion("1.2"):
_test_forward_zeros_like((2, 3), "int32")
_test_forward_zeros_like((2, 3, 5), "int8")
_test_forward_zeros_like((2, 3, 5, 7), "uint16")
_test_forward_zeros_like((2, 3, 11), "float32")
_test_forward_zeros_like((2, 3, 11), "float64")
def test_forward_squared_difference():
ishape = (1, 3, 10, 14)
inp_array_a = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
inp_array_b = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array_a.shape, dtype=inp_array_a.dtype, name="in1")
in2 = tf.placeholder(shape=inp_array_b.shape, dtype=inp_array_b.dtype, name="in2")
out = tf.math.squared_difference(in1, in2)
compare_tf_with_tvm([inp_array_a, inp_array_b], [in1.name, in2.name], out.name)
def _test_forward_reverse_v2(in_shape, axis, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.reverse(in_data, axis=[axis], name="reverse")
compare_tf_with_tvm([np_data], ["in_data:0"], "reverse:0")
def test_forward_reverse_v2():
"""test ReverseV2"""
_test_forward_reverse_v2((2, 3), 0, "int32")
_test_forward_reverse_v2((2, 3, 5), 2, "float32")
_test_forward_reverse_v2((2, 3, 5, 7), 1, "float32")
_test_forward_reverse_v2((2, 3, 5), -1, "float64")
_test_forward_reverse_v2((2, 3, 5), -3, "float64")
def test_forward_sign():
"""test Sign"""
np_data = np.random.uniform(-10, 10, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sign(in_data, name="sign")
compare_tf_with_tvm([np_data], ["in_data:0"], "sign:0")
def test_forward_square():
"""test operator Square """
np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.square(in_data, name="square")
compare_tf_with_tvm([np_data], ["in_data:0"], "square:0")
def test_forward_pow_exp():
"""test Pow and Exp """
np_in1 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
np_in2 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in1 = tf.placeholder(tf.float32, (5, 7, 11), name="in1")
in2 = tf.placeholder(tf.float32, (5, 7, 11), name="in2")
out1 = tf.pow(in1, in2, name="pow")
out = tf.exp(in1, name="exp")
compare_tf_with_tvm([np_in1, np_in2], ["in1:0", "in2:0"], "pow:0")
compare_tf_with_tvm([np_in1], ["in1:0"], "exp:0")
def test_forward_unary():
def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32):
"""test unary operators"""
np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data")
out = op(in_data)
compare_tf_with_tvm([np_data], ["in_data:0"], out.name)
_test_forward_unary(tf.acos, -1, 1)
_test_forward_unary(tf.asin, -1, 1)
_test_forward_unary(tf.atanh, -1, 1)
_test_forward_unary(tf.sinh)
_test_forward_unary(tf.cosh)
_test_forward_unary(tf.acosh)
_test_forward_unary(tf.asinh)
_test_forward_unary(tf.atan)
_test_forward_unary(tf.sin)
_test_forward_unary(tf.cos)
_test_forward_unary(tf.tan)
_test_forward_unary(tf.tanh)
_test_forward_unary(tf.erf)
_test_forward_unary(tf.log)
_test_forward_unary(tf.log1p)
def test_forward_atan2():
"""test operator tan """
tf.disable_eager_execution()
np_data_1 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
np_data_2 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
in_data_1 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_1")
in_data_2 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_2")
tf.atan2(in_data_1, in_data_2, name="atan2")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "atan2:0")
def test_forward_expm1():
"""test operator expm1 """
def _test_forward_expm1(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(1, 10, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.expm1(in_data, name="expm1")
compare_tf_with_tvm([np_data], ["in_data:0"], "expm1:0")
_test_forward_expm1([1, 100])
_test_forward_expm1([1, 10, 10])
_test_forward_expm1([2, 5, 2, 5])
def test_forward_softsign():
"""test operator softsign """
def _test_forward_softsign(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(1, 100, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.nn.softsign(in_data, name="softsign")
compare_tf_with_tvm([np_data], ["in_data:0"], "softsign:0")
_test_forward_softsign([1, 100])
_test_forward_softsign([1, 10, 10])
_test_forward_softsign([2, 5, 2, 5])
def test_forward_rint():
"""test operator rint """
def _test_forward_rint(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(-100, 100, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.math.rint(in_data, name="rint")
compare_tf_with_tvm([np_data], ["in_data:0"], "rint:0")
_test_forward_rint([100])
_test_forward_rint([1, 100])
_test_forward_rint([1, 10, 10])
_test_forward_rint([2, 5, 2, 5])
def test_forward_negative():
"""test tf operator Neg """
np_data = np.random.uniform(-100, 255, size=(224, 224, 3)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (224, 224, 3), name="in_data")
tf.negative(in_data, name="negative")
compare_tf_with_tvm([np_data], ["in_data:0"], "negative:0")
def test_forward_log_softmax():
"""test operator LogSoftmax"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.log_softmax(in_data, name="LogSoftmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "LogSoftmax:0")
def test_forward_softplus():
"""test operator Softplus"""
np_data = np.random.uniform(1, 10, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.nn.softplus(in_data, name="softplus")
compare_tf_with_tvm([np_data], ["in_data:0"], "softplus:0")
def test_forward_rsqrt():
"""test Rsqrt """
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.rsqrt(in_data, name="rsqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "rsqrt:0")
def test_forward_sqrt():
"""test Sqrt """
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sqrt(in_data, name="sqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "sqrt:0")
def _test_forward_right_shift(in_shape, dtype):
"""test operator RightShift"""
lh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 8, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.right_shift(lft_data, rgt_data, name="RightShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "RightShift:0")
def test_forward_right_shift():
_test_forward_right_shift((7,), "int32")
_test_forward_right_shift((3, 11), "int16")
def _test_forward_left_shift(in_shape, dtype):
"""test operator LeftShift"""
lh_data = np.random.randint(100, 1000000, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.left_shift(lft_data, rgt_data, name="LeftShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "LeftShift:0")
def test_forward_left_shift():
_test_forward_left_shift((10,), "int32")
_test_forward_left_shift((224, 224, 3), "int16")
#######################################################################
# Mean
# ----
def test_forward_mean():
def check_mean(ishape, **kwargs):
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.keras.backend.mean(in1, **kwargs)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Mean:0", no_gpu=True)
check_mean((10, 8, 16, 32))
check_mean((10, 8, 16, 32), axis=(2, 3))
check_mean((10, 8, 16, 32), axis=(1, 2), keepdims=True)
#######################################################################
# Size
# ----
def test_forward_size():
def check_size(ishape):
np_input = np.random.uniform(size=ishape).astype(np.float32)
# if all dimensions are constant, TF will optimize away size operator into constant
tf_input_shape = list(np_input.shape)
tf_input_shape[0] = None
with tf.Graph().as_default():
input = tf.placeholder(shape=tf_input_shape, dtype=np_input.dtype, name="input")
tf.size(input, name="size")
compare_tf_with_tvm([np_input], ["input:0"], "size:0")
check_size((10, 8, 16, 32))
check_size((10,))
#######################################################################
# All, Any, Max, Min, Prod, variance, std, logsumexp, euclidean_norm
# ------------------------------------------------------------------
def test_forward_reduce():
def _check_op(tf_op, ishape, axis, keepdims, dtype="float32"):
tf.reset_default_graph()
if dtype == "bool":
np_data = np.random.choice([True, False], size=ishape)
else:
np_data = np.random.uniform(size=ishape).astype(dtype)
if tf_op == tf.math.reduce_prod:
axis = 1
np_data = np_data.reshape(1, -1)
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, name="in_data")
reduce_op = tf_op(in_data, axis=axis, keepdims=keepdims, name="reduce_std")
compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name)
def _test_math_op(op, dtypes=["int32", "float32"]):
for dtype in dtypes:
_check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype)
_check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype)
_test_math_op(tf.math.reduce_all, dtypes=["bool"])
_test_math_op(tf.math.reduce_any, dtypes=["bool"])
_test_math_op(tf.math.reduce_max)
_test_math_op(tf.math.reduce_min)
_test_math_op(tf.math.reduce_prod)
_test_math_op(tf.math.reduce_variance, dtypes=["float32"])
_test_math_op(tf.math.reduce_std, dtypes=["float32"])
_test_math_op(tf.math.reduce_logsumexp, dtypes=["float32"])
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
_test_math_op(tf.math.reduce_euclidean_norm)
#######################################################################
# Relational operators
# --------------------
def _test_forward_rel_op(data, func):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data[0].shape, dtype=data[0].dtype, name="in1")
in2 = tf.placeholder(shape=data[1].shape, dtype=data[1].dtype, name="in2")
op = func(in1, in2, name="op")
out = tf.cast(op, tf.int32, name="out1")
compare_tf_with_tvm([data[0], data[1]], ["in1:0", "in2:0"], "out1:0")
def test_forward_rel_ops():
t1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
t2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
_test_forward_rel_op([t1, t2], math_ops.less)
_test_forward_rel_op([t1, t2], math_ops.greater)
_test_forward_rel_op([t1, t2], math_ops.less_equal)
_test_forward_rel_op([t1, t2], math_ops.greater_equal)
_test_forward_rel_op([t1, t2], math_ops.equal)
_test_forward_rel_op([t1, t2], math_ops.not_equal)
#######################################################################
# ExpandDims
# ----------
def _test_forward_expand_dims(data, axis):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="in1")
out = tf.expand_dims(in1, axis)
compare_tf_with_tvm([data], [in1.name], out.name)
def test_forward_expand_dims():
_test_forward_expand_dims(np.int32(1), 0)
_test_forward_expand_dims(np.array([1]), 0)
_test_forward_expand_dims(np.array([1]), -1)
_test_forward_expand_dims(np.array([[1], [2]]), 0)
_test_forward_expand_dims(np.array([[1], [2]]), 1)
_test_forward_expand_dims(np.array([[1], [2]]), -1)
#######################################################################
# Maximum, Minimum
# ----------------
def test_forward_maximum():
"""test Op Maximum"""
def check_maximum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.maximum(lft_data, rgt_data, name="maximum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "maximum:0")
check_maximum((10, 8, 16, 32), (1,), dtype="int32")
check_maximum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
def test_forward_minimum():
"""test Op Minimum"""
def check_minimum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.minimum(lft_data, rgt_data, name="minimum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "minimum:0")
check_minimum((10, 8, 16, 32), (1,), dtype="int32")
check_minimum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
#######################################################################
# PlaceholderWithDefault
# ----------------------
def test_placeholder():
with tf.Graph().as_default():
in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
var1 = tf.Variable(in_data1, name="in1")
var2 = array_ops.placeholder_with_default(var1, None, name="place1")
in_data2 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
place1 = array_ops.placeholder(shape=in_data1.shape, dtype=in_data1.dtype, name="in2")
out1 = tf.math.add(var1, var2, name="out1")
out2 = tf.math.add(out1, place1, name="out2")
compare_tf_with_tvm(
[in_data1, in_data2], ["place1:0", "in2:0"], "out2:0", init_global_variables=True
)
#######################################################################
# OneHot
# ----------------------
def _test_forward_one_hot(indices_shape, depth, on_value, off_value, axis, out_dtype):
inp_array1 = np.random.randint(0, 5, size=indices_shape)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array1.shape, dtype=inp_array1.dtype)
out = tf.one_hot(in1, depth, on_value, off_value, axis, dtype=out_dtype)
compare_tf_with_tvm(inp_array1, in1.name, out.name)
def test_forward_one_hot():
_test_forward_one_hot((3,), 3, 1, 0, -1, "int32")
_test_forward_one_hot((3,), 3, 1.0, 0.0, -1, "float32")
_test_forward_one_hot((2, 2), 5, 2, -2, 0, "int32")
_test_forward_one_hot((2, 2), 5, 0.5, -0.5, 1, "float32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
#######################################################################
# AddN
# ----------------------
def _test_forward_add_n(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.add_n(temp)
compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name)
def test_forward_add_n():
x = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
y = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
z = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
m, n, o = x.astype(np.float32), y.astype(np.float32), z.astype(np.float32)
in0 = x
in1 = [x, y]
in2 = (x, y, z)
in3 = m
in4 = [m, n]
in5 = (m, n, o)
_test_forward_add_n(in0)
_test_forward_add_n(in1)
_test_forward_add_n(in2)
_test_forward_add_n(in3)
_test_forward_add_n(in4)
_test_forward_add_n(in5)
#######################################################################
# Sharing params case
# ----------------------
def test_sharing_node():
"""Test the sharing params case."""
np_data = np.random.uniform(size=(2, 2, 2)).astype("float32")
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, shape=(2, 2, 2), name="in_data")
axis = tf.constant([-1], dtype=tf.int32, name="axis")
mean0 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean0")
mean1 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean1")
out = tf.add(mean0, mean1, name="out")
compare_tf_with_tvm([np_data], ["in_data:0"], "out:0")
#######################################################################
# Unravel Index
# ----------------------
def _test_forward_unravel_index(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.unravel_index(temp[0], temp[1])
compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name)
def _test_forward_unravel_index_scalar(x, y, dtype="int32"):
tf.reset_default_graph()
with tf.Graph().as_default():
indices_1 = constant_op.constant(x, dtype=dtype)
dims_1 = constant_op.constant(y, dtype=dtype)
out_1 = array_ops.unravel_index(indices_1, dims_1)
compare_tf_with_tvm([], [], out_1.name)
def test_forward_unravel_index():
x = np.array([0, 1, 2, 3])
y = np.array([2, 2])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([2, 3])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([6])
_test_forward_unravel_index([x, y])
x = np.array([102, 300, 16])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
x = np.array([100])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
# Test scalar input
_test_forward_unravel_index_scalar(13, [1, 4, 5, 2])
#######################################################################
# Dilation2d
# ----------------------
def _test_dilation2d(tensor_in_sizes, filter_in_sizes, strides, dilations, padding):
""" One iteration of dilation2d with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
nn_ops.dilation2d(in_data, in_filter, strides=strides, rates=dilations, padding=padding)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Dilation2D:0",
no_gpu=True,
)
def test_forward_dilation():
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [3, 3, 1], [1, 1, 1, 1], [1, 2, 2, 1], "VALID")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 28, 28, 3], [5, 5, 3], [1, 2, 2, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [7, 2, 1], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [3, 4, 1], [1, 2, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 4, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 28, 28, 3], [5, 6, 3], [1, 1, 2, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 2, 1], "VALID")
def _test_identityn(data_np_list):
with tf.Graph().as_default():
data_tensors = []
data_tensors_name = []
for index, data_np in enumerate(data_np_list):
tensor_name = f"data_{index}"
data_tensors_name.append(tensor_name + ":0")
data_tensors.append(
tf.placeholder(shape=data_np.shape, dtype=str(data_np.dtype), name=tensor_name)
)
output = tf.identity_n(data_tensors)
output_names = [out.name for out in output]
compare_tf_with_tvm(
data_np_list,
data_tensors_name,
output_names,
)
@pytest.mark.parametrize(
"data_np_list",
[
(
[
np.array([[1, 1], [0, 3], [0, 1], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4, 5], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
]
),
(
[
np.array([[1, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
np.array([True, False, True]),
]
),
(
[
np.array([]),
np.array([[]]),
]
),
],
)
def test_forward_identityn(data_np_list):
_test_identityn(data_np_list)
#######################################################################
# Sparse To Dense
# ---------------
def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape):
with tf.Graph().as_default():
indices = tf.placeholder(
shape=sparse_indices.shape, dtype=str(sparse_indices.dtype), name="indices"
)
values = tf.placeholder(
shape=sparse_values.shape, dtype=str(sparse_values.dtype), name="values"
)
oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype))
if default_value == None:
output = tf.sparse_to_dense(indices, oshape, values)
compare_tf_with_tvm(
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name
)
else:
dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value")
output = tf.sparse_to_dense(indices, oshape, values, dv)
compare_tf_with_tvm(
[sparse_indices, sparse_values, default_value],
["indices:0", "values:0", "default_value:0"],
output.name,
)
def test_forward_sparse_to_dense():
# scalar
_test_sparse_to_dense(
sparse_indices=np.int32(1),
sparse_values=np.int32(3),
default_value=np.int32(0),
output_shape=np.array([5]).astype("int32"),
)
# vector
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3, 3, 3]).astype("int32"),
default_value=np.int32(0),
output_shape=np.array([5]).astype("int32"),
)
# vector nXd
_test_sparse_to_dense(
sparse_indices=np.array([[0, 0], [1, 2]]).astype("int32"),
sparse_values=np.array([1, 2]).astype("int32"),
default_value=np.int32(0),
output_shape=np.array([3, 4]).astype("int32"),
)
_test_sparse_to_dense(
sparse_indices=np.array([[0, 0, 0], [1, 2, 3]]).astype("int32"),
sparse_values=np.array([1, 2]).astype("int32"),
default_value=np.int32(4),
output_shape=np.array([2, 3, 4]).astype("int32"),
)
# floats
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"),
default_value=np.float32(3.5),
output_shape=np.array([5]).astype("int32"),
)
# default value not specified
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"),
default_value=None,
output_shape=np.array([5]).astype("int32"),
)
#######################################################################
# infinity ops
# ------------
def _verify_infiniteness_ops(tf_op, name):
"""test operator infinity ops"""
# Only float types are allowed in Tensorflow for isfinite and isinf
# float16 is failing on cuda
tf_dtypes = ["float32", "float64"]
for tf_dtype in tf_dtypes:
shape = (8, 8)
data = np.random.uniform(size=shape).astype(tf_dtype)
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
tf.reset_default_graph()
in_data = tf.placeholder(tf_dtype, shape, name="in_data")
tf_op(in_data, name=name)
compare_tf_with_tvm([data], ["in_data:0"], "{}:0".format(name))
def test_forward_isinf():
_verify_infiniteness_ops(tf.is_inf, "isinf")
def test_forward_isfinite():
_verify_infiniteness_ops(tf.is_finite, "isfinite")
def test_forward_isnan():
_verify_infiniteness_ops(tf.is_nan, "isnan")
def _test_spop_placeholder_without_shape_info():
with tf.Graph().as_default():
@function.Defun(*[tf.int32] * 2)
def Forward(x, y):
print(x.name)
print(y.name)
b = tf.add(x, y)
return b
pl1 = tf.placeholder(tf.int32, name="pl1")
pl2 = tf.placeholder(tf.int32, name="pl2")
pl3 = tf.placeholder(tf.int32, name="pl3")
data = np.array([[-1, 1], [2, -2]], dtype=np.int32)
data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1, pl2], Tout=[tf.int32], f=Forward)
z2 = z1 + pl3
compare_tf_with_tvm(
[data, data2, data3],
["pl1:0", "pl2:0", "pl3:0"],
["StatefulPartitionedCall:0", z2.name],
mode="vm",
init_global_variables=True,
)
def _test_spop_placeholder_with_shape_and_default_value():
with tf.Graph().as_default():
data = np.ones([1], dtype=int).astype(np.int32)
dataVar = tf.Variable(data, shape=data.shape)
pl1 = array_ops.placeholder_with_default(dataVar, shape=data.shape, name="pl1")
tpl = tf.convert_to_tensor(pl1, dtype=tf.int32)
@function.Defun(*[tf.int32])
def pl_with_default(pl):
return tf.expand_dims(tf.multiply(pl, pl), 0)
z = gen_functional_ops.StatefulPartitionedCall(
args=[tpl], Tout=[tf.int32], f=pl_with_default
)
compare_tf_with_tvm(
data, ["pl1:0"], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_arange_feed():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_array_feed():
with tf.Graph().as_default():
t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32)
t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32)
t1 = tf.placeholder(tf.int32, name="t1")
t2 = tf.placeholder(tf.int32, name="t2")
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_basic():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_nested():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), name="t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, name="t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def myfunc(x, y):
return tf.add(x, y, "myfunc")
@tf.function
def myfunc2(x, y):
z = myfunc(x, y)
l = myfunc(z, y)
m = myfunc(l, z)
return tf.add(l, m, "myfunc2")
res1 = myfunc(t1, t2)
res2 = myfunc2(res1, t1)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [res2.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_no_autograph():
with tf.Graph().as_default():
@tf.function(autograph=False)
def fun1(a):
return tf.multiply(a, a)
@tf.function(autograph=False)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_defun():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)],
Tout=[dtypes.float32],
f=fun3,
name="SpopFnInvocation",
)
compare_tf_with_tvm([], [], "SpopFnInvocation:0", mode="vm", init_global_variables=True)
def _test_spop_arithmetic():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 3)
def arithmetic(m, x, c):
z = tf.add(tf.multiply(m, x), c)
return z
m = tf.constant(10)
x = tf.constant(20)
c = tf.constant(2)
spopFn = gen_functional_ops.StatefulPartitionedCall(
args=[m, x, c], Tout=[tf.int32], f=arithmetic
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_control_flow():
with tf.Graph().as_default():
@function.Defun(*[dtypes.float32] * 2)
def Body1(x, y):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:0"):
z = math_ops.multiply(x, y)
i = 0
while i < 10:
i += 1
if i == 5:
continue
z = math_ops.multiply(x, y * i)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[constant_op.constant(32.0), constant_op.constant(100.0)],
Tout=[dtypes.float32],
f=Body1,
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_variables():
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32)
var2 = tf.Variable(const2, dtype=tf.int32)
@function.Defun(tf.int32, tf.int32)
def Forward(x, y):
return tf.multiply(x, y)
z = gen_functional_ops.StatefulPartitionedCall(
args=[var1, var2], Tout=[tf.int32], f=Forward
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", init_global_variables=True, mode="vm"
)
def _test_spop_constants():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 2)
def constantsFn(x, y):
vv = tf.constant([2, 3, 4], name="vv")
z = tf.add(vv + x, y)
return z
a = tf.constant(20000, name="a")
b = tf.constant(40000, name="b")
spopFn = gen_functional_ops.StatefulPartitionedCall(
args=[a, b], Tout=[tf.int32], f=constantsFn
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_stateful():
# This test case is to test that TVM rejects any TF stateful operations
# (including Resource Variables) except StatefulPartitionedCall/PartitionedCall
# (as these two operators can still be used as container graphs to execute
# "stateless" operations internally.
tf.reset_default_graph()
with tf.Graph().as_default():
@tf.function
def FunctionWithStatefulOp_One(i):
b = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
y = tf.multiply(b, i)
return y
@tf.function
def FunctionWithStatefulOp(m, n):
a = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
x = tf.multiply(a, m)
y = FunctionWithStatefulOp_One(n)
z = tf.multiply(x, y)
return z
op = FunctionWithStatefulOp(constant_op.constant(1.0), constant_op.constant(2.0))
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm([], [], [op.name], init_global_variables=True, mode="vm")
assert execinfo.value.args[0].startswith("The following operators are not implemented")
def _test_spop_device_assignment():
# This test case is to test that TVM rejects inconsistent device assignment
# while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will
# be used as container graphs to internally execute "stateless" operations.
tf.reset_default_graph()
with tf.Graph().as_default():
def fun1(a):
with ops.device("/GPU:0"):
return tf.multiply(a, a)
def fun2(b):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
with ops.device("/CPU:0"):
x = fun2(x)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:2"):
y = fun1(y)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:3"):
z = tf.add(x, y)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)], Tout=[dtypes.float32], f=fun3
)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Found inconsistent Device assignment")
def _test_spop_resource_variables():
# This test case is to test that TVM rejects any graph containing
# resource variables with StatefulPartitionedOp.
tf.reset_default_graph()
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32, use_resource=True)
var2 = tf.Variable(const2, dtype=tf.int32, use_resource=True)
@tf.function
def resourceVariablesTest(x, y):
return tf.multiply(x, y)
op = resourceVariablesTest(var1, var2)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Graph is not frozen." " Provide a frozen graph")
def test_forward_spop():
_test_spop_stateful()
_test_spop_device_assignment()
_test_spop_resource_variables()
# Placeholder test cases
_test_spop_placeholder_without_shape_info()
_test_spop_placeholder_with_shape_and_default_value()
_test_spop_placeholder_numpy_arange_feed()
_test_spop_placeholder_numpy_array_feed()
# Function Invocation test cases
_test_spop_function_invocation_basic()
_test_spop_function_invocation_nested()
_test_spop_function_invocation_no_autograph()
_test_spop_function_invocation_defun()
# Test cases for various other TF constructs
_test_spop_arithmetic()
_test_spop_control_flow()
_test_spop_variables()
_test_spop_constants()
#######################################################################
# Dynamic input shape
# -------------------
def test_forward_dynamic_input_shape():
tf.reset_default_graph()
with tf.Graph().as_default():
data = tf.placeholder(tf.float32, name="data", shape=(None,))
out = data + 1
np_data = np.random.uniform(size=(2,)).astype("float32")
out_name = "add"
with tf.Session() as sess:
graph_def = tf_testing.AddShapesToGraphDef(sess, out_name)
tf_output = run_tf_graph(sess, np_data, "data:0", ["{}:0".format(out_name)])
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
ctx = tvm.context(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def,
np_data,
["data"],
1,
target=device,
layout="NCHW",
out_names=[out_name],
mode="vm",
ignore_in_shape=True,
)
tvm.testing.assert_allclose(tvm_output[0], tf_output[0], rtol=1e-5, atol=1e-5)
def test_forward_dynmaic_rnn_lstmblockcell():
if package_version.parse(tf.VERSION) >= package_version.parse("2.0.0"):
return
total_series_length = 50000
truncated_backprop_length = 15
state_size = 4
echo_step = 3
batch_size = 5
num_layers = 5
def generateData():
x = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))
y = np.roll(x, echo_step)
y[0:echo_step] = 0
x = x.reshape((batch_size, -1)) # The first index changing slowest, subseries as rows
y = y.reshape((batch_size, -1))
return (x, y)
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
init_state = tf.placeholder(tf.float32, [num_layers, 2, batch_size, state_size])
state_per_layer_list = tf.unstack(init_state, axis=0)
rnn_tuple_state = tuple(
[
tf.nn.rnn_cell.LSTMStateTuple(
state_per_layer_list[idx][0], state_per_layer_list[idx][1]
)
for idx in range(num_layers)
]
)
# Forward passes
def lstm_cell():
return tensorflow.contrib.rnn.LSTMBlockCell(state_size)
cell = tf.nn.rnn_cell.MultiRNNCell(
[lstm_cell() for _ in range(num_layers)], state_is_tuple=True
)
states_series, current_state = tf.nn.dynamic_rnn(
cell, tf.expand_dims(batchX_placeholder, -1), initial_state=rnn_tuple_state
)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
x, y = generateData()
_current_state = np.zeros((num_layers, 2, batch_size, state_size))
start_idx = 0
end_idx = start_idx + truncated_backprop_length
batchX = x[:, start_idx:end_idx]
# Save current state for TVM
current_state_tvm = _current_state
_current_state, _states_series = sess.run(
[current_state, states_series],
feed_dict={batchX_placeholder: batchX, init_state: _current_state},
)
# Organize results and corresponding names
tf_output = [_states_series]
for c in _current_state:
tf_output.append(c.c)
tf_output.append(c.h)
name = [states_series.name.split(":")[0]]
for t in current_state:
name.append(t.c.name.split(":")[0])
name.append(t.h.name.split(":")[0])
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(sess, graph_def, name)
tvm_output = run_tvm_graph(
final_graph_def,
[batchX.astype("float32"), current_state_tvm.astype("float32")],
["Placeholder", "Placeholder_1"],
out_names=name,
num_output=len(name),
mode="vm",
disabled_pass=["FoldScaleAxis"],
)
# Compare result
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
pytest.main([__file__])
|
build_imagenet_data.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Copyright 2021 Giovanni Dispoto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--train_directory', default = '/tmp/',
help = 'Training data directory')
parser.add_argument('--validation_directory', default = '/tmp/',
help = 'Validation data directory')
parser.add_argument('--output_directory', default = '/tmp/',
help = 'Output data directory')
parser.add_argument('--train_shards', default = 1024,
help = 'Number of shards in training TFRecord files.')
parser.add_argument('--validation_shards', default = 128,
help = 'Number of shards in validation TFRecord files.')
parser.add_argument('--num_threads', default = 8, type = int,
help = 'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
parser.add_argument('--labels_file',
default = 'imagenet_lsvrc_2015_synsets.txt',
help = 'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
parser.add_argument('--imagenet_metadata_file',
default = 'imagenet_metadata.txt',
help = 'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
parser.add_argument('--bounding_box_file',
default = './imagenet_2012_bounding_boxes.csv',
help = 'Bounding box file')
args = parser.parse_args()
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace.encode('utf-8')),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset.encode('utf-8')),
'image/class/text': _bytes_feature(human.encode('utf-8')),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format.encode('utf-8')),
'image/filename': _bytes_feature(os.path.basename(filename).encode('utf-8')),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
#self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = "PNG"
#image = tf.image.decode_png(image_data, channels=3)
#self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = "CMYK"
#image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
#self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = "JPEG"
#self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
print("")
def png_to_jpeg(self, image_data):
feed_dict={self._png_data: tf.image.decode_png(image_data, channels=3)}
return tf.image.decode_png(image_data, channels=3)
def cmyk_to_rgb(self, image_data):
feed_dict={self._cmyk_data: tf.image.decode_jpeg(image_data, channels=0)}
return tf.image.decode_png(image_data, channels=0)
def decode_jpeg(self, image_data):
image = feed_dict = {self._decode_jpeg_data: tf.image.decode_jpeg(image_data, channels=3)}
image = tf.image.decode_jpeg(image_data, channels=3)
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.io.gfile.GFile(filename, 'rb').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d.tfrecord' % (name, shard, num_shards)
output_file = os.path.join(args.output_directory, output_filename)
writer = tf.io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
args = parser.parse_args()
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), args.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (args.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [
l.strip() for l in tf.io.gfile.GFile(labels_file, 'r').readlines()
]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.io.gfile.glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, args.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.io.gfile.GFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.io.gfile.GFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main():
assert not args.train_shards % args.num_threads, (
'Please make the args.num_threads commensurate with args.train_shards')
assert not args.validation_shards % args.num_threads, (
'Please make the args.num_threads commensurate with '
'args.validation_shards')
print('Saving results to %s' % args.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(args.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(args.bounding_box_file)
# Run it!
_process_dataset('validation', args.validation_directory,
args.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', args.train_directory, args.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
main()
|
scheduler.py
|
import time
import multiprocessing
from loguru import logger
from proxypool.components.getter import Getter
from proxypool.components.tester import Tester
from proxypool.components.server import app
from proxypool.setting import TESTER_ENABLED, GETTER_ENABLED, SERVER_ENABLED, TESTER_CYCLE, GETTER_CYCLE, API_HOST, \
API_PORT, API_THREADED, LOGGER_ENABLED, LOGGER_FILE, LOGGER_LEVEL, LOGGER_FORMAT, LOGGER_ROTATION, LOGGER_RETENTION
tester_process, getter_process, server_process = None, None, None
class Scheduler:
"""
调度模块
"""
def run_getter(self, cycle=GETTER_CYCLE):
"""
获取代理
"""
getter = Getter()
loop = 0
while True:
logger.debug(f'getter loop {loop} start...')
getter.run()
loop += 1
time.sleep(cycle)
def run_tester(self, cycle=TESTER_CYCLE):
"""
测试代理
"""
tester = Tester()
loop = 0
while True:
logger.debug(f'tester loop {loop} start...')
tester.run()
loop += 1
time.sleep(cycle)
def run_server(self):
"""
开启API接口
"""
app.run(host=API_HOST, port=API_PORT, threaded=API_THREADED)
def run(self):
global tester_process, getter_process, server_process
try:
logger.info('starting proxypool...')
if TESTER_ENABLED:
tester_process = multiprocessing.Process(target=self.run_tester)
logger.info(f'starting tester, pid {tester_process.pid}...')
tester_process.start()
if GETTER_ENABLED:
getter_process = multiprocessing.Process(target=self.run_getter)
logger.info(f'starting getter, pid{getter_process.pid}...')
getter_process.start()
if SERVER_ENABLED:
server_process = multiprocessing.Process(target=self.run_server)
logger.info(f'starting getter, pid{server_process.pid}...')
server_process.start()
if TESTER_ENABLED:
tester_process.join()
if GETTER_ENABLED:
getter_process.join()
if SERVER_ENABLED:
server_process.join()
except KeyboardInterrupt:
logger.info('received keyboard interrupt signal')
tester_process.terminate()
getter_process.terminate()
server_process.terminate()
finally:
if TESTER_ENABLED:
tester_process.join()
if GETTER_ENABLED:
getter_process.join()
if SERVER_ENABLED:
server_process.join()
logger.info(f'tester is {"alive" if tester_process.is_alive() else "dead"}')
logger.info(f'getter is {"alive" if getter_process.is_alive() else "dead"}')
logger.info(f'server is {"alive" if server_process.is_alive() else "dead"}')
logger.info('proxys terminated')
if __name__ == '__main__':
if LOGGER_ENABLED:
logger.add(LOGGER_FILE, level=LOGGER_LEVEL, format=LOGGER_FORMAT, retention=LOGGER_RETENTION,
rotation=LOGGER_ROTATION)
scheduler = Scheduler()
scheduler.run()
|
server.py
|
#Code adapted from optional lecture
import socket
import threading
from queue import Queue
from Database.DatabaseManager import *
HOST = "localhost" # put your IP address here if playing on multiple computers
PORT = 50011
BACKLOG = 3
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST,PORT))
server.listen(BACKLOG)
setOffline()
print("Looking for connections!")
def handleClient(client, serverChannel, cID, clientele):
client.setblocking(1)
msg = ""
while True:
try:
msg += client.recv(10).decode("UTF-8")
command = msg.split("\n")
while (len(command) > 1):
readyMsg = command[0]
msg = "\n".join(command[1:])
serverChannel.put(str(cID) + " " + readyMsg)
command = msg.split("\n")
except:
# we failed
return
def serverThread(clientele, serverChannel):
while True:
msg = serverChannel.get(True, None)
msgList = msg.split(" ")
senderID = msgList[0]
instruction = msgList[1]
details = " ".join(msgList[2:])
if (details != ""):
for cID in clientele:
if cID != senderID:
sendMsg = instruction + " " + senderID + " " + details + "\n"
clientele[cID].send(sendMsg.encode())
serverChannel.task_done()
clientele = dict()
playerNum = 0
serverChannel = Queue(100)
threading.Thread(target = serverThread, args = (clientele, serverChannel)).start()
mics = ["1", "2"]
while True:
client, address = server.accept()
# myID is the key to the client in the clientele dictionary
myID = playerNum
myMic = mics[playerNum]
toRemove = []
for cID in clientele:
try:
print (repr(cID), repr(playerNum))
clientele[cID].send(("newPlayer %s\n" % myID).encode())
client.send(("newPlayer %s\n" % cID).encode())
except:
toRemove.append(cID)
continue
for removal in toRemove:
del clientele[removal]
clientele[myID] = client
client.send(("myIDis %s \n" % myID).encode())
client.send(("myMicIs %s \n" % myMic).encode())
print("connection recieved from %s" % myID + str(myMic))
threading.Thread(target = handleClient, args =
(client ,serverChannel, myID, clientele)).start()
playerNum += 1
|
__init__.py
|
import redis
import sys
import time
from hoocron_plugin import HoocronHookBase
from threading import Thread
from queue import Queue, Empty
class RedisHook(HoocronHookBase):
def __init__(self):
self.th = None
self.redis_path = None
self.redis_list = None
self.redis = None # Redis connection
self.sleep = 1
self.jobs = list()
self.q = None
self.execute_q = None
def add_argument_group(self, parser):
def_redis_path = 'localhost:6379'
def_redis_list = 'hook'
g = parser.add_argument_group('Redis hook')
g.add_argument('--redis', metavar='JOB', default=list(), action='append')
g.add_argument('--redis-db', metavar='DB', type=int, default=0, help='redis db number')
g.add_argument('--redis-path', metavar='SOCKET', default=def_redis_path, help=f'Path to redis def: {def_redis_path}')
g.add_argument('--redis-list', metavar='KEY', default=def_redis_list, help='name of redis key to call hooks def: {def_redis_list}')
def configure(self, jobs, args):
self.db = args.redis_db
self.redis_path = args.redis_path
self.redis_list = args.redis_list
self.sleep = args.sleep
for name in args.redis:
try:
j = jobs[name]
except KeyError:
print("ERROR: Not found job", name)
sys.exit(1)
self.jobs.append(j)
def empty(self):
return not bool(self.jobs)
def thread(self):
while True:
try:
cmd = self.q.get_nowait()
if cmd == 'stop':
print("redis hook stopped")
return
except Empty:
pass
request = self.redis.lpop(self.redis_list)
if request is None:
time.sleep(self.sleep)
else:
for j in self.jobs:
if j.name == request:
self.execute_q.put((j, 'redis'))
def running(self):
return bool(self.th)
def start(self, execute_q):
if self.jobs:
self.redis = self.get_redis()
self.q = Queue()
self.execute_q = execute_q
self.th = Thread(target = self.thread, args = () )
self.th.start()
print(f"started redis thread, watch list {self.redis_list!r}")
else:
print("Warning: do not start cron because no jobs assigned")
def stop(self):
print("stop redis hook")
self.q.put('stop')
def get_redis(self):
if self.redis_path.startswith('/'):
path = self.redis_path
host = None
port = None
print(f"connect to redis over unix socket: {path}")
else:
host, port = self.redis_path.split(':')
path = None
print(f"connect to redis over network: {host}:{port}")
return redis.Redis(
db=self.db,
unix_socket_path=path,
host=host, port=port,
decode_responses=True)
hooks = [ RedisHook() ]
|
thread_handler.py
|
from ezeeai.core.runner import Runner
import time
import psutil
from multiprocessing import Process, Queue
from ..utils.sys_ops import find_free_port, change_checkpoints
from ..config import config_reader
import threading
import logging
import subprocess
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-10s) %(message)s',
)
class ThreadHandler:
def __init__(self):
self._processes = {}
self._ports = {}
self._return_queue = Queue()
def _get_runner(self, all_params_config):
return Runner(all_params_config)
def add_port(self, username, config_file, port):
self._ports[username + '_' + config_file] = port
def get_port(self, username, config_file):
return self._ports[username + '_' + config_file]
def tensor_board_thread(self, config_file, port):
config_path = config_reader.read_config(config_file).all()['checkpoint_dir']
logging.debug('Starting tensor board')
time.sleep(3)
pro = "tensorboard --host=0.0.0.0 --logdir=" + config_path + " --port=" + port
subprocess.call(pro, shell=True)
logging.debug('Exiting tensor board')
def run_tensor_board(self, username, config_file):
if not username + '_' + config_file in self._ports.keys():
try:
port = find_free_port()
self.add_port(username, config_file, port)
name = 'tensorboard-' + str(port)
tboard_thread = threading.Thread(name=name,
target=self.tensor_board_thread, args=(config_file, port))
tboard_thread.setDaemon(True)
tboard_thread.start()
except ValueError:
logging.error('No free port found.')
def run_thread(self, all_params_config):
runner = self._get_runner(all_params_config)
runner.run()
def predict_thread(self, all_params_config, new_features, all=False):
runner = self._get_runner(all_params_config)
self._return_queue.put(runner.predict(new_features, all))
def predict_test_thread(self, all_params_config, test_file):
runner = self._get_runner(all_params_config)
self._return_queue.put(runner.predict_test(test_file))
def explain_thread(self, all_params_config, explain_params):
runner = self._get_runner(all_params_config)
self._return_queue.put(runner.explain(explain_params))
def pause_threads(self, username):
p = self._processes[username]['process'] if username in self._processes.keys() else None
if not isinstance(p, str) and p:
pid = p.pid
parent = psutil.Process(pid)
for child in parent.children(recursive=True):
child.kill()
parent.kill()
del self._processes[username]
return True
def check_running(self, username):
if username in self._processes.keys():
return self._processes[username]['process'].is_alive(), self._processes[username]['config_file']
return False, None
def run_estimator(self, all_params_config, username, config_file):
r_thread = Process(
target=self.run_thread, args=(all_params_config,), name='run')
r_thread.daemon = True
r_thread.start()
self._processes[username] = {'process': r_thread, 'config_file': config_file}
def predict_estimator(self, all_params_config, features, all=False):
r_thread = Process(target=self.predict_thread, args=(all_params_config, features, all), name='predict')
r_thread.daemon = True
r_thread.start()
final_pred = self._return_queue.get()
r_thread.join()
return final_pred
def predict_test_estimator(self, all_params_config, features):
r_thread = Process(target=self.predict_test_thread, args=(all_params_config, features), name='test')
r_thread.daemon = True
r_thread.start()
final_pred = self._return_queue.get()
r_thread.join()
return final_pred
def explain_estimator(self, all_params_config, explain_params):
r_thread = Process(target=self.explain_thread, args=(all_params_config, explain_params),
name='explain')
r_thread.daemon = True
r_thread.start()
exp = self._return_queue.get()
r_thread.join()
return exp
def handle_request(self, option, all_params_config, username, resume_from, config_file):
if option == 'run':
if resume_from != '':
change_checkpoints(all_params_config, resume_from)
self.run_estimator(all_params_config, username, config_file)
elif option == 'pause':
self.pause_threads(username)
else:
raise ValueError("Invalid option")
|
observer.py
|
import threading
import mutex
import rospy
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
class Observer(object):
def __init__(self, name, loop_rate_hz=1):
self._name = name
self._rate = rospy.Rate(loop_rate_hz)
self._seq = 1
self._lock = threading.Lock()
self._thread = threading.Thread(
target=self._run)
self._thread.daemon = True
self._stop_event = threading.Event()
self._pub_diag = rospy.Publisher(
'/diagnostics', DiagnosticArray, queue_size=10)
def __del__(self):
if Observer:
print("{} stopped".format(self._name))
# Every derived class needs to override this
def generate_diagnostics(self):
msg = DiagnosticArray()
return msg
def _run(self):
while not rospy.is_shutdown() and not self._stopped():
diag_msg = DiagnosticArray()
diag_msg.header.stamp = rospy.get_rostime()
status_msgs = self.generate_diagnostics()
diag_msg.status.extend(status_msgs)
self._pub_diag.publish(diag_msg)
self._seq += 1
self._rate.sleep()
def start(self):
print("starting {}...".format(self._name))
self._thread.start()
def stop(self):
self._lock.acquire()
self._stop_event.set()
self._lock.release()
def _stopped(self):
self._lock.acquire()
isSet = self._stop_event.isSet()
self._lock.release()
return isSet
class ServiceObserver(Observer):
def __init__(self, name, service_name=None, service_type=None, loop_rate_hz=1):
super(ServiceObserver, self).__init__(name, loop_rate_hz)
self.name = service_name
self.type = service_type
self.client = None
try:
self.start_service()
except:
print("{} service not started".format(self.name))
super.__del__()
def start_service(self):
try:
rospy.wait_for_service(self.name, timeout=1.0)
self.client = rospy.ServiceProxy(self.name, self.type)
print("Service '" + self.name +
"' added of type" + str(self.type))
except rospy.ServiceException as exc:
print("Service {} is not running: ".format(self.name) + str(exc))
def generate_diagnostics(self):
try:
resp = self.client.call()
except rospy.ServiceException as exc:
print("Service {} did not process request: ".format(
self.name) + str(exc))
status_msg = self.diagnostics_from_response(resp)
return status_msg
# Every derived class needs to override this
def diagnostics_from_response(self, response):
msg = DiagnosticArray()
return msg
class TopicObserver(Observer):
def __init__(self, name, loop_rate_hz, topics):
super(TopicObserver, self).__init__(name, loop_rate_hz)
self._topics = topics
self._id = ""
self._num_topics = len(topics)
# Every derived class needs to override this
def calculate_attr(self, msgs):
# do calculations
return DiagnosticStatus()
def generate_diagnostics(self):
msgs = []
received_all = True
for topic, topic_type in self._topics:
try:
msgs.append(rospy.wait_for_message(topic, topic_type))
except rospy.ROSException as exc:
print("Topic {} is not found: ".format(topic) + str(exc))
received_all = False
break
status_msgs = list()
status_msg = DiagnosticStatus()
if received_all:
status_msg = self.calculate_attr(msgs)
status_msgs.append(status_msg)
return status_msgs
|
elasticsearch-stress-test.py
|
#!/usr/bin/env python
#
# Stress test tool for elasticsearch
# Written by Roi Rav-Hon @ Logz.io (roi@logz.io)
#
import signal
import sys
# Using argparse to parse cli arguments
import argparse
# Import threading essentials
from threading import Lock, Thread, Condition, Event
# For randomizing
import string
from random import randint, choice
# To get the time
import time
# For misc
import sys
# For json operations
import json
# Try and import elasticsearch
try:
from opensearchpy import OpenSearch
except:
print("Could not import elasticsearch..")
print("Try: pip install elasticsearch")
sys.exit(1)
import urllib3
urllib3.disable_warnings()
# Set a parser object
parser = argparse.ArgumentParser()
# Adds all params
parser.add_argument("--es_address", nargs='+', help="The address of your cluster (no protocol or port)", required=True)
parser.add_argument("--indices", type=int, help="The number of indices to write to for each ip", required=True)
parser.add_argument("--documents", type=int, help="The number different documents to write for each ip", required=True)
parser.add_argument("--clients", type=int, help="The number of clients to write from for each ip", required=True)
parser.add_argument("--seconds", type=int, help="The number of seconds to run for each ip", required=True)
parser.add_argument("--number-of-shards", type=int, default=3, help="Number of shards per index (default 3)")
parser.add_argument("--number-of-replicas", type=int, default=1, help="Number of replicas per index (default 1)")
parser.add_argument("--bulk-size", type=int, default=1000, help="Number of document per request (default 1000)")
parser.add_argument("--max-fields-per-document", type=int, default=100,
help="Max number of fields in each document (default 100)")
parser.add_argument("--max-size-per-field", type=int, default=1000, help="Max content size per field (default 1000")
parser.add_argument("--no-cleanup", default=False, action='store_true', help="Don't delete the indices upon finish")
parser.add_argument("--stats-frequency", type=int, default=30,
help="Number of seconds to wait between stats prints (default 30)")
parser.add_argument("--not-green", dest="green", action="store_false", help="Script doesn't wait for the cluster to be green")
parser.set_defaults(green=True)
parser.add_argument("--ca-file", dest="cafile", default="", help="Path to your certificate file")
parser.add_argument("--no-verify", default=False, dest="no_verify", action="store_true", help="Do not verify certificate")
parser.add_argument("--username", dest="auth_username", default="", help="HTTP authentication Username")
parser.add_argument("--password", dest="auth_password", default="", help="HTTP authentication Password")
# Parse the arguments
args = parser.parse_args()
# Set variables from argparse output (for readability)
NUMBER_OF_INDICES = args.indices
NUMBER_OF_DOCUMENTS = args.documents
NUMBER_OF_CLIENTS = args.clients
NUMBER_OF_SECONDS = args.seconds
NUMBER_OF_SHARDS = args.number_of_shards
NUMBER_OF_REPLICAS = args.number_of_replicas
BULK_SIZE = args.bulk_size
MAX_FIELDS_PER_DOCUMENT = args.max_fields_per_document
MAX_SIZE_PER_FIELD = args.max_size_per_field
NO_CLEANUP = args.no_cleanup
STATS_FREQUENCY = args.stats_frequency
WAIT_FOR_GREEN = args.green
CA_FILE = args.cafile
VERIFY_CERTS = not args.no_verify
AUTH_USERNAME = args.auth_username
AUTH_PASSWORD = args.auth_password
# timestamp placeholder
STARTED_TIMESTAMP = 0
# Placeholders
success_bulks = 0
failed_bulks = 0
total_size = 0
indices = []
documents = []
documents_templates = []
es = None # Will hold the elasticsearch session
# Thread safe
success_lock = Lock()
fail_lock = Lock()
size_lock = Lock()
shutdown_event = Event()
# Helper functions
def increment_success():
# First, lock
success_lock.acquire()
global success_bulks
try:
# Increment counter
success_bulks += 1
finally: # Just in case
# Release the lock
success_lock.release()
def increment_failure():
# First, lock
fail_lock.acquire()
global failed_bulks
try:
# Increment counter
failed_bulks += 1
finally: # Just in case
# Release the lock
fail_lock.release()
def increment_size(size):
# First, lock
size_lock.acquire()
try:
# Using globals here
global total_size
# Increment counter
total_size += size
finally: # Just in case
# Release the lock
size_lock.release()
def has_timeout(STARTED_TIMESTAMP):
# Match to the timestamp
if (STARTED_TIMESTAMP + NUMBER_OF_SECONDS) > int(time.time()):
return False
return True
# Just to control the minimum value globally (though its not configurable)
def generate_random_int(max_size):
try:
return randint(1, max_size)
except:
print("Not supporting {0} as valid sizes!".format(max_size))
sys.exit(1)
# Generate a random string with length of 1 to provided param
def generate_random_string(max_size):
return ''.join(choice(string.ascii_lowercase) for _ in range(generate_random_int(max_size)))
# Create a document template
def generate_document():
temp_doc = {}
# Iterate over the max fields
for _ in range(generate_random_int(MAX_FIELDS_PER_DOCUMENT)):
# Generate a field, with random content
temp_doc[generate_random_string(10)] = generate_random_string(MAX_SIZE_PER_FIELD)
# Return the created document
return temp_doc
def fill_documents(documents_templates):
# Generating 10 random subsets
for _ in range(10):
# Get the global documents
global documents
# Get a temp document
temp_doc = choice(documents_templates)
# Populate the fields
for field in temp_doc:
temp_doc[field] = generate_random_string(MAX_SIZE_PER_FIELD)
documents.append(temp_doc)
def client_worker(es, indices, STARTED_TIMESTAMP):
# Running until timeout
while (not has_timeout(STARTED_TIMESTAMP)) and (not shutdown_event.is_set()):
curr_bulk = ""
# Iterate over the bulk size
for _ in range(BULK_SIZE):
# Generate the bulk operation
curr_bulk += "{0}\n".format(json.dumps({"index": {"_index": choice(indices), "_type": "stresstest"}}))
curr_bulk += "{0}\n".format(json.dumps(choice(documents)))
try:
# Perform the bulk operation
es.bulk(body=curr_bulk)
# Adding to success bulks
increment_success()
# Adding to size (in bytes)
increment_size(sys.getsizeof(str(curr_bulk)))
except:
# Failed. incrementing failure
increment_failure()
def generate_clients(es, indices, STARTED_TIMESTAMP):
# Clients placeholder
temp_clients = []
# Iterate over the clients count
for _ in range(NUMBER_OF_CLIENTS):
temp_thread = Thread(target=client_worker, args=[es, indices, STARTED_TIMESTAMP])
temp_thread.daemon = True
# Create a thread and push it to the list
temp_clients.append(temp_thread)
# Return the clients
return temp_clients
def generate_documents():
# Documents placeholder
temp_documents = []
# Iterate over the clients count
for _ in range(NUMBER_OF_DOCUMENTS):
# Create a document and push it to the list
temp_documents.append(generate_document())
# Return the documents
return temp_documents
def generate_indices(es):
# Placeholder
temp_indices = []
# Iterate over the indices count
for _ in range(NUMBER_OF_INDICES):
# Generate the index name
temp_index = generate_random_string(16)
# Push it to the list
temp_indices.append(temp_index)
try:
# And create it in ES with the shard count and replicas
es.indices.create(index=temp_index, body={"settings": {"number_of_shards": NUMBER_OF_SHARDS,
"number_of_replicas": NUMBER_OF_REPLICAS}})
except Exception as e:
print("Could not create index. Is your cluster ok?")
print(e)
sys.exit(1)
# Return the indices
return temp_indices
def cleanup_indices(es, indices):
# Iterate over all indices and delete those
for curr_index in indices:
try:
# Delete the index
es.indices.delete(index=curr_index, ignore=[400, 404])
except:
print("Could not delete index: {0}. Continue anyway..".format(curr_index))
def print_stats(STARTED_TIMESTAMP):
# Calculate elpased time
elapsed_time = (int(time.time()) - STARTED_TIMESTAMP)
# Calculate size in MB
size_mb = total_size / 1024 / 1024
# Protect division by zero
if elapsed_time == 0:
mbs = 0
else:
mbs = size_mb / float(elapsed_time)
# Print stats to the user
print("Elapsed time: {0} seconds".format(elapsed_time))
print("Successful bulks: {0} ({1} documents)".format(success_bulks, (success_bulks * BULK_SIZE)))
print("Failed bulks: {0} ({1} documents)".format(failed_bulks, (failed_bulks * BULK_SIZE)))
print("Indexed approximately {0} MB which is {1:.2f} MB/s".format(size_mb, mbs))
print("")
def print_stats_worker(STARTED_TIMESTAMP):
# Create a conditional lock to be used instead of sleep (prevent dead locks)
lock = Condition()
# Acquire it
lock.acquire()
# Print the stats every STATS_FREQUENCY seconds
while (not has_timeout(STARTED_TIMESTAMP)) and (not shutdown_event.is_set()):
# Wait for timeout
lock.wait(STATS_FREQUENCY)
# To avoid double printing
if not has_timeout(STARTED_TIMESTAMP):
# Print stats
print_stats(STARTED_TIMESTAMP)
def main():
clients = []
all_indices = []
auth = None
context = None
# Set the timestamp
STARTED_TIMESTAMP = int(time.time())
for esaddress in args.es_address:
print("")
print("Starting initialization of {0}".format(esaddress))
try:
# Initiate the elasticsearch session
# We increase the timeout here from the default value (10 seconds)
# to ensure we wait for requests to finish even if the cluster is overwhelmed
# and it takes a bit longer to process one bulk.
if CA_FILE:
context = create_ssl_context(cafile=CA_FILE)
if AUTH_USERNAME and AUTH_PASSWORD:
auth = (AUTH_USERNAME, AUTH_PASSWORD)
es = OpenSearch(
esaddress,
http_auth=auth,
verify_certs=VERIFY_CERTS,
ssl_context=context,
timeout=60)
except Exception as e:
print("Could not connect to elasticsearch!")
print(e)
sys.exit(1)
# Generate docs
documents_templates = generate_documents()
fill_documents(documents_templates)
print("Done!")
print("Creating indices.. ")
indices = generate_indices(es)
all_indices.extend(indices)
try:
#wait for cluster to be green if nothing else is set
if WAIT_FOR_GREEN:
es.cluster.health(wait_for_status='green', master_timeout='600s', timeout='600s')
except Exception as e:
print("Cluster timeout....")
print("Cleaning up created indices.. "),
cleanup_indices(es, indices)
continue
print("Generating documents and workers.. ") # Generate the clients
clients.extend(generate_clients(es, indices, STARTED_TIMESTAMP))
print("Done!")
print("Starting the test. Will print stats every {0} seconds.".format(STATS_FREQUENCY))
print("The test would run for {0} seconds, but it might take a bit more "
"because we are waiting for current bulk operation to complete. \n".format(NUMBER_OF_SECONDS))
# Run the clients!
for d in clients:
d.start()
# Create and start the print stats thread
stats_thread = Thread(target=print_stats_worker, args=[STARTED_TIMESTAMP])
stats_thread.daemon = True
stats_thread.start()
for c in clients:
while c.is_alive():
try:
c.join(timeout=0.1)
except KeyboardInterrupt:
print("")
print("Ctrl-c received! Sending kill to threads...")
shutdown_event.set()
# set loop flag true to get into loop
flag = True
while flag:
#sleep 2 secs that we don't loop to often
sleep(2)
# set loop flag to false. If there is no thread still alive it will stay false
flag = False
# loop through each running thread and check if it is alive
for t in threading.enumerate():
# if one single thread is still alive repeat the loop
if t.isAlive():
flag = True
print("Cleaning up created indices.. "),
cleanup_indices(es, all_indices)
print("\nTest is done! Final results:")
print_stats(STARTED_TIMESTAMP)
# Cleanup, unless we are told not to
if not NO_CLEANUP:
print("Cleaning up created indices.. "),
cleanup_indices(es, all_indices)
print("Done!") # # Main runner
try:
main()
except Exception as e:
print("Got unexpected exception. probably a bug, please report it.")
print("")
print(e.message)
print("")
sys.exit(1)
|
treasury_service.py
|
import config
import account_helper
import node_rpc_helper
import os
import json
from bottle import post, request, response, get, route, static_file
from threading import Thread
import requests
def setHeaders():
response.content_type = 'application/json'
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
@route('/treasury/send', method='OPTIONS')
def sendOptions():
setHeaders()
return "OK"
# Example: curl -d "{'pool_account_id': 'FaucetPool', 'pool_account_password': 'some_password', 'dest_account': 'mik_1naij1wkner3gb6j4o1tsf4me3zz8q9t1km9wnm5qzmnycfa44t8tkbq4srs', 'amount': '1', 'unique_id': '1234500017', 'callback': 'http://localhost:8090/treasury/sample-send-callback'}" http://localhost:8090/treasury/send
@route('/treasury/send', method='POST')
def send():
global config
setHeaders()
if config['treasury_service.enabled'] != 'true':
return {"error": "service not enabled"}
postdata = request.body.read().decode('utf8')
#print("postdata ", postdata)
postjson = json.loads(postdata.replace("'", '"'))
#print("postjson ", postjson)
pool_account_id = postjson["pool_account_id"]
pool_account_password = postjson["pool_account_password"]
dest_account = postjson["dest_account"]
amount = postjson["amount"]
unique_id = postjson["unique_id"]
callback = ''
if 'callback' in postjson:
callback = postjson['callback']
#print("dest_account ", dest_account, " amount ", amount, " id ", unique_id)
if (pool_account_id not in config["treasury_service.account"]) or (pool_account_password != config["treasury_service.account"][pool_account_id]["password"]):
return {"error": "source account not found or wrong password"}
src_account = config["treasury_service.account"][pool_account_id]["account"]
src_walletid = config["treasury_service.account"][pool_account_id]["walletid"]
#print("src_account ", src_account, " walletid ", src_walletid)
max_amount = min(500000, float(config["treasury_service.max_amount"]))
min_amount = max(0.000000001, float(config["treasury_service.min_amount"]))
if callback == '':
# no callback, sync
resp = sendIntern(src_account, src_walletid, dest_account, amount, unique_id, max_amount, min_amount)
#print("resp ", resp)
return resp
else:
# callback, do send asynchronously, with callback at the end
sendAsync(src_account, src_walletid, dest_account, amount, unique_id, max_amount, min_amount, callback)
return {
'id': unique_id,
# block_hash is not yet available
'callback': callback
}
def sendIntern(src_account, src_walletid, dest_account, amount, unique_id, max_amount, min_amount):
# exclude send to self
if src_account == dest_account:
return {"error": "Send to self is invalid"}
amountFloat = 0
try:
amountFloat = float(amount)
except:
return {"error": "Invalid amount"}
if (amountFloat > max_amount):
return {"error": "Amount too high (max " + str(max_amount) + ")"}
if (amountFloat < min_amount):
return {"error": "Amount too small (min " + str(min_amount) + ")"}
#if (amountFloat == 1.2345):
# return {"error": "Refused due to test"}
# debug: retrieve balance
src_orig_balance = node_rpc_helper.getAccountBalance(src_account)
print("sendIntern: orig src balance", src_orig_balance)
resp = node_rpc_helper.doSend(src_walletid, src_account, dest_account, amount, unique_id)
print("sendIntern: send complete, amount ", amount, "dest", dest_account)
if 'error' in resp:
return resp
if 'block' not in resp:
return {"error": "no block in response"}
return {
"id": unique_id,
"amount": amount,
"block_hash": resp['block']
}
def sendInternWithCallback(src_account, src_walletid, dest_account, amount, unique_id, max_amount, min_amount, callback):
result = sendIntern(src_account, src_walletid, dest_account, amount, unique_id, max_amount, min_amount)
invokeSendCallback(callback, result, unique_id)
def invokeSendCallback(callback, result, id):
# include ID in callback always
if 'id' not in result:
result['id'] = id
print('Invoking send callback', callback, 'with result data', result)
postdata = json.dumps(result)
response = requests.post(callback, data=postdata)
print(response.url, response.text[:200])
def sendAsync(src_account, src_walletid, dest_account, amount, unique_id, max_amount, min_amount, callback):
#print("Doing send in background")
t = Thread(target=sendInternWithCallback, args=(src_account, src_walletid, dest_account, amount, unique_id, max_amount, min_amount, callback))
t.start()
# Sample send callback, used for testing
# Example: curl -d "{'id': '1234500017', 'amount': '3', 'block_hash': 'D70BB005723EF4AE3850861FB8819628CD101EE1F3A4FF40808213EB5B99FECF'}" http://localhost:8090/treasury/sample-send-callback
@route('/treasury/sample-send-callback', method='POST')
def sample_send_callback():
global config
setHeaders()
if config['treasury_service.enabled'] != 'true':
return {"error": "service not enabled"}
postdata = request.body.read().decode('utf8')
#print("postdata ", postdata)
postjson = json.loads(postdata.replace("'", '"'))
#print("postjson ", postjson)
id = ''
if 'id' in postjson:
id = postjson['id']
if 'error' in postjson:
print('Send callback', 'id', id, 'ERROR', postjson['error'])
else:
amount = 0
if 'amount' in postjson:
amount = postjson['amount']
block_hash = ''
if 'block_hash' in postjson:
block_hash = postjson['block_hash']
print('Send callback', 'id', id, 'amount', amount, 'block_hash', block_hash)
config = config.readConfig()
|
swift_t.py
|
''' Sample Executor for integration with SwiftT.
This follows the model used by `EMEWS <http://www.mcs.anl.gov/~wozniak/papers/Cancer2_2016.pdf>`_
to some extent.
'''
from concurrent.futures import Future
import logging
import uuid
import threading
import queue
import multiprocessing as mp
from ipyparallel.serialize import pack_apply_message, unpack_apply_message
from ipyparallel.serialize import serialize_object, deserialize_object
from parsl.executors.base import ParslExecutor
logger = logging.getLogger(__name__)
BUFFER_THRESHOLD = 1024*1024
ITEM_THRESHOLD = 1024
def runner(incoming_q, outgoing_q):
''' This is a function that mocks the Swift-T side. It listens on the the incoming_q for tasks
and posts returns on the outgoing_q
Args:
- incoming_q (Queue object) : The queue to listen on
- outgoing_q (Queue object) : Queue to post results on
The messages posted on the incoming_q will be of the form :
{
"task_id" : <uuid.uuid4 string>,
"buffer" : serialized buffer containing the fn, args and kwargs
}
If ``None`` is received, the runner will exit.
Response messages should be of the form:
{
"task_id" : <uuid.uuid4 string>,
"result" : serialized buffer containing result
"exception" : serialized exception object
}
On exiting the runner will post ``None`` to the outgoing_q
'''
logger.debug("[RUNNER] Starting")
def execute_task(bufs):
''' Deserialize the buf, and execute the task.
Returns the serialized result/exception
'''
all_names = dir(__builtins__)
user_ns = locals()
user_ns.update( {'__builtins__' : {k : getattr(__builtins__, k) for k in all_names} } )
f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)
fname = getattr(f, '__name__', 'f')
prefix = "parsl_"
fname = prefix+"f"
argname = prefix+"args"
kwargname = prefix+"kwargs"
resultname = prefix+"result"
user_ns.update({ fname : f,
argname : args,
kwargname : kwargs,
resultname : resultname })
code = "{0} = {1}(*{2}, **{3})".format(resultname, fname,
argname, kwargname)
try:
print("[RUNNER] Executing : {0}".format(code))
exec(code, user_ns, user_ns)
except Exception as e:
logger.warning("Caught errors but will not handled %s", e)
raise e
else :
#print("Done : {0}".format(locals()))
print("[RUNNER] Result : {0}".format(user_ns.get(resultname)))
return user_ns.get(resultname)
while True :
try:
# Blocking wait on the queue
msg = incoming_q.get(block=True, timeout=10)
#logger.debug("[RUNNER] Got message : %s", msg)
except queue.Empty:
# Handle case where no items were on queue
logger.debug("[RUNNER] got nothing")
except IOError as ioerror:
logger.debug("[RUNNER] broken pipe, error: %s", ioerror)
try:
# Attempt to send a stop notification to the management thread
outgoing_q.put(None)
except Exception :
pass
break
except Exception as e:
logger.debug("[RUNNER] caught unknown exception : %s", e)
else:
# Handle received message
if not msg :
# Empty message is a die request
logger.debug("[RUNNER] Received exit request")
outgoing_q.put(None)
break
else:
# Received a valid message, handle it
logger.debug("[RUNNER] Got a valid task : %s", msg["task_id"])
try:
response_obj = execute_task(msg['buffer'])
response = {"task_id" : msg["task_id"],
"result" : serialize_object(response_obj)}
logger.warning("[RUNNER] Returing result : %s",
deserialize_object(response["result"]) )
except Exception as e:
logger.debug("[RUNNER] Caught task exception")
response = {"task_id" : msg["task_id"],
"exception" : serialize_object(e)}
outgoing_q.put(response)
logger.debug("[RUNNER] Terminating")
class TurbineExecutor(ParslExecutor):
''' The Turbine executor. Bypass the Swift/T language and run on top off the Turbine engines
in an MPI environment.
Here's a simple diagram
.. code:: python
| Data | Executor | IPC | External Process(es)
| Flow | | |
Task | Kernel | | |
+----->|-------->|------------>|outgoing_q -|-> Worker_Process
| | | | | | |
Parsl<---Fut-| | | | result exception
^ | | | | | |
| | | Q_mngmnt | | V V
| | | Thread<--|incoming_q<-|--- +---------+
| | | | | |
| | | | | |
+----update_fut-----+
'''
def _queue_management_worker(self):
''' The queue management worker is responsible for listening to the incoming_q
for task status messages and updating tasks with results/exceptions/updates
It expects the following messages:
{
"task_id" : <task_id>
"result" : serialized result object, if task succeeded
... more tags could be added later
}
{
"task_id" : <task_id>
"exception" : serialized exception object, on failure
}
We don't support these yet, but they could be added easily as heartbeat.
{
"task_id" : <task_id>
"cpu_stat" : <>
"mem_stat" : <>
"io_stat" : <>
"started" : tstamp
}
The None message is a die request.
None
'''
while True:
logger.debug("[MTHREAD] Management thread active")
try:
msg = self.incoming_q.get(block=True, timeout=1)
except queue.Empty as e:
# timed out.
pass
except IOError as e:
logger.debug("[MTHREAD] caught broken queue : %s : errno:%s", e, e.errno)
return
except Exception as e:
logger.debug("[MTHREAD] caught unknown exception : %s", e)
pass
else:
if msg is None:
logger.debug("[MTHREAD] Got None")
return
else:
logger.debug("[MTHREAD] Got message : %s", msg)
task_fut = self.tasks[msg['task_id']]
if 'result' in msg:
result, _ = deserialize_object(msg['result'])
task_fut.set_result(result)
elif 'exception' in msg:
exception, _ = deserialize_object(msg['exception'])
task_fut.set_exception(exception)
if not self.isAlive:
break
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(self, q=None):
''' We do not use this yet
'''
q.put(None)
def _start_queue_management_thread(self):
''' Method to start the management thread as a daemon.
Checks if a thread already exists, then starts it.
Could be used later as a restart if the management thread dies.
'''
logging.debug("In _start %s", "*"*40)
if self._queue_management_thread is None:
logging.debug("Starting management thread ")
self._queue_management_thread = threading.Thread (target=self._queue_management_worker)
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
else:
logging.debug("Management thread already exists, returning")
def shutdown(self):
''' Shutdown method, to kill the threads and workers.
'''
self.isAlive = False
logging.debug("Waking management thread")
self.incoming_q.put(None) # Wake up the thread
self._queue_management_thread.join() # Force join
logging.debug("Exiting thread")
self.worker.join()
return True
def __init__ (self, swift_attribs=None):
''' Initialize the thread pool
Trying to implement the emews model.
Kwargs:
- swift_attribs : Takes a dict of swift attribs. Fot future.
'''
logger.debug("In __init__")
self.mp_manager = mp.Manager()
self.outgoing_q = self.mp_manager.Queue()
self.incoming_q = self.mp_manager.Queue()
self.isAlive = True
self._queue_management_thread = None
self._start_queue_management_thread()
logger.debug("Created management thread : %s", self._queue_management_thread)
self.worker = mp.Process(target=runner, args = (self.outgoing_q, self.incoming_q))
self.worker.start()
logger.debug("Created worker : %s", self.worker)
self.tasks = {}
def submit (self, func, *args, **kwargs):
''' Submits work to the the outgoing_q, an external process listens on this queue for new
work. This method is simply pass through and behaves like a submit call as described
here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
Args:
- func (callable) : Callable function
- *args (list) : List of arbitrary positional arguments.
Kwargs:
- **kwargs (dict) : A dictionary of arbitrary keyword args for func.
Returns:
Future
'''
task_id = uuid.uuid4()
logger.debug("Before pushing to queue : func:%s func_args:%s", func, args)
self.tasks[task_id] = Future()
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024*1024,
item_threshold=1024)
msg = {"task_id" : task_id,
"buffer" : fn_buf }
# Post task to the the outgoing queue
self.outgoing_q.put(msg)
# Return the future
return self.tasks[task_id]
def scale_out (self, workers=1):
''' Scales out the number of active workers by 1
This method is notImplemented for threads and will raise the error if called.
This would be nice to have, and can be done
Raises:
NotImplemented exception
'''
raise NotImplementedError
def scale_in (self, workers=1):
''' Scale in the number of active workers by 1
This method is notImplemented for threads and will raise the error if called.
Raises:
NotImplemented exception
'''
raise NotImplementedError
if __name__ == "__main__" :
print("Start")
turb_x = TurbineExecutor()
print("Done")
|
server.py
|
# Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import asyncio
import contextlib
import gzip
import mimetypes
import socket
import threading
from contextlib import closing
from http import HTTPStatus
import greenlet
from OpenSSL import crypto
from twisted.internet import reactor, ssl
from twisted.web import http
from playwright.path_utils import get_file_dirname
from playwright.sync_base import dispatcher_fiber
_dirname = get_file_dirname()
def _find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
class Server:
protocol = "http"
def __init__(self):
self.PORT = _find_free_port()
self.EMPTY_PAGE = f"{self.protocol}://localhost:{self.PORT}/empty.html"
self.PREFIX = f"{self.protocol}://localhost:{self.PORT}"
self.CROSS_PROCESS_PREFIX = f"{self.protocol}://127.0.0.1:{self.PORT}"
# On Windows, this list can be empty, reporting text/plain for scripts.
mimetypes.add_type("text/html", ".html")
mimetypes.add_type("text/css", ".css")
mimetypes.add_type("application/javascript", ".js")
mimetypes.add_type("image/png", ".png")
mimetypes.add_type("font/woff2", ".woff2")
def __repr__(self) -> str:
return self.PREFIX
@abc.abstractmethod
def listen(self, factory):
pass
def start(self):
request_subscribers = {}
auth = {}
csp = {}
routes = {}
gzip_routes = set()
self.request_subscribers = request_subscribers
self.auth = auth
self.csp = csp
self.routes = routes
self.gzip_routes = gzip_routes
static_path = _dirname / "assets"
class TestServerHTTPHandler(http.Request):
def process(self):
request = self
self.post_body = request.content.read().decode()
request.content.seek(0, 0)
uri = request.uri.decode()
if request_subscribers.get(uri):
request_subscribers[uri].set_result(request)
request_subscribers.pop(uri)
if auth.get(uri):
authorization_header = request.requestHeaders.getRawHeaders(
"authorization"
)
creds_correct = False
if authorization_header:
creds_correct = auth.get(uri) == (
request.getUser(),
request.getPassword(),
)
if not creds_correct:
request.setHeader(
b"www-authenticate", 'Basic realm="Secure Area"'
)
request.setResponseCode(HTTPStatus.UNAUTHORIZED)
request.finish()
return
if csp.get(uri):
request.setHeader(b"Content-Security-Policy", csp[uri])
if routes.get(uri):
routes[uri](request)
return
file_content = None
try:
file_content = (
static_path / request.path.decode()[1:]
).read_bytes()
except (FileNotFoundError, IsADirectoryError):
request.setResponseCode(HTTPStatus.NOT_FOUND)
if file_content:
request.setHeader("Content-Type", mimetypes.guess_type(uri)[0])
if uri in gzip_routes:
request.setHeader("Content-Encoding", "gzip")
request.write(gzip.compress(file_content))
else:
request.write(file_content)
self.setResponseCode(HTTPStatus.OK)
self.finish()
class MyHttp(http.HTTPChannel):
requestFactory = TestServerHTTPHandler
class MyHttpFactory(http.HTTPFactory):
protocol = MyHttp
self.listen(MyHttpFactory())
async def wait_for_request(self, path):
if path in self.request_subscribers:
return await self.request_subscribers[path]
future = asyncio.Future()
self.request_subscribers[path] = future
return await future
@contextlib.contextmanager
def expect_request(self, path):
future = asyncio.create_task(self.wait_for_request(path))
class CallbackValue:
def __init__(self) -> None:
self._value = None
@property
def value(self):
return self._value
g_self = greenlet.getcurrent()
cb_wrapper = CallbackValue()
def done_cb(task):
cb_wrapper._value = future.result()
g_self.switch()
future.add_done_callback(done_cb)
yield cb_wrapper
while not future.done():
dispatcher_fiber.switch()
def set_auth(self, path: str, username: str, password: str):
self.auth[path] = (username, password)
def set_csp(self, path: str, value: str):
self.csp[path] = value
def reset(self):
self.request_subscribers.clear()
self.auth.clear()
self.csp.clear()
self.gzip_routes.clear()
self.routes.clear()
def set_route(self, path, callback):
self.routes[path] = callback
def enable_gzip(self, path):
self.gzip_routes.add(path)
def set_redirect(self, from_, to):
def handle_redirect(request):
request.setResponseCode(HTTPStatus.FOUND)
request.setHeader("location", to)
request.finish()
self.set_route(from_, handle_redirect)
class HTTPServer(Server):
def listen(self, factory):
reactor.listenTCP(self.PORT, factory)
class HTTPSServer(Server):
protocol = "https"
def listen(self, factory):
cert = ssl.PrivateCertificate.fromCertificateAndKeyPair(
ssl.Certificate.loadPEM(
(_dirname / "testserver" / "cert.pem").read_bytes()
),
ssl.KeyPair.load(
(_dirname / "testserver" / "key.pem").read_bytes(), crypto.FILETYPE_PEM
),
)
contextFactory = cert.options()
reactor.listenSSL(self.PORT, factory, contextFactory)
class TestServer:
def __init__(self) -> None:
self.server = HTTPServer()
self.https_server = HTTPSServer()
def start(self) -> None:
self.server.start()
self.https_server.start()
self.thread = threading.Thread(
target=lambda: reactor.run(installSignalHandlers=0)
)
self.thread.start()
def stop(self) -> None:
reactor.stop()
self.thread.join()
def reset(self) -> None:
self.server.reset()
self.https_server.reset()
test_server = TestServer()
|
Translator.py
|
import asyncio
import json
import os
import shutil
import threading
import zipfile
import aiohttp
import requests
from Util import Configuration, GearbotLogging, Emoji
LANGS = dict()
BOT = None
def initialize(bot_in):
global BOT
BOT = bot_in
load_translations()
def load_translations():
directory = os.fsencode("lang")
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".json"):
with open(f"lang/{filename}", encoding="UTF-8") as lang:
LANGS[filename[:-5]] = json.load(lang)
def assemble(emoji, key, location, **kwargs):
return f"{Emoji.get_chat_emoji(emoji)} {translate(key, location, **kwargs)}"
def translate(key, location, **kwargs):
lid = None
if location is not None:
if hasattr(location, "guild"):
location = location.guild
if location is not None and hasattr(location, "id"):
lid = location.id
else:
lid = location
if lid is None:
lang_key = "en_US"
else:
lang_key = Configuration.get_var(lid, "LANG")
if key in LANGS[lang_key].keys():
try:
return LANGS[lang_key][key].format(**kwargs)
except (KeyError, ValueError):
GearbotLogging.error(f"Corrupt translation detected in {lang_key}: {key}\n```\n{LANGS[lang_key][key]}```")
if key in LANGS["en_US"].keys():
return LANGS["en_US"][key].format(**kwargs)
return key
async def update():
message = await GearbotLogging.bot_log(f"{Emoji.get_chat_emoji('REFRESH')} Updating translations")
crowdin_data = Configuration.get_master_var("CROWDIN")
session: aiohttp.ClientSession = BOT.aiosession
async with session.get(f"https://api.crowdin.com/api/project/Gearbot/export?login={crowdin_data['login']}&account-key={crowdin_data['key']}&json",) as reply:
if reply.status is not 200:
await GearbotLogging.bot_log(f"{Emoji.get_chat_emoji('WARNING')} Crowdin api error, got response code {reply.status}")
else:
response = await reply.json()
if response["success"]["status"] == "built": # only update translations if we actually got a new build, should be every time though unless this runs 2x within 30 mins for some reason
async with session.get(
f"https://api.crowdin.com/api/project/Gearbot/download/all.zip?login={crowdin_data['login']}&account-key={crowdin_data['key']}") as reply:
data = await reply.read()
with open("zip.zip", "wb") as file:
file.write(data)
with zipfile.ZipFile("zip.zip", "r") as archive:
tempdir = os.path.abspath("temp")
if os.path.isdir(tempdir):
shutil.rmtree(tempdir, ignore_errors=True)
os.mkdir(tempdir)
archive.extractall("temp")
for entry in archive.filelist:
if not entry.filename.endswith(".json"):
continue
filename =entry.filename[-10:]
if os.path.isfile(os.path.abspath(f"lang/{filename}")):
os.remove(os.path.abspath(f"lang/{filename}"))
archive.extract(entry, tempdir)
os.rename(os.path.abspath(f"temp/{entry.filename}"), os.path.abspath(f"lang/{filename}"))
shutil.rmtree("temp", ignore_errors=True)
load_translations()
await message.edit(content=f"{Emoji.get_chat_emoji('YES')} Translations have been updated")
else:
await message.edit(content=f"{Emoji.get_chat_emoji('WARNING')} Crowdin build status was `{response['success']['status']}`, no translation update required")
async def upload():
if Configuration.get_master_var("CROWDIN", None) is None:
return
message = await GearbotLogging.bot_log(f"{Emoji.get_chat_emoji('REFRESH')} Uploading translation file")
t = threading.Thread(target=upload_file)
t.start()
while t.is_alive():
await asyncio.sleep(1)
await message.edit(content=f"{Emoji.get_chat_emoji('YES')} Translations file has been uploaded")
def upload_file():
data = {'files[master/lang/en_US.json]': open('lang/en_US.json', 'r')}
crowdin_data = Configuration.get_master_var("CROWDIN")
requests.post(f"https://api.crowdin.com/api/project/gearbot/update-file?login={crowdin_data['login']}&account-key={crowdin_data['key']}&json", files=data)
|
test_cymj.py
|
import pytest
from numbers import Number
from io import BytesIO, StringIO
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from mujoco_py import (MjSim, MjSimPool, load_model_from_xml,
load_model_from_path, MjSimState,
ignore_mujoco_warnings,
load_model_from_mjb)
from mujoco_py import const, cymj
from mujoco_py.tests.utils import compare_imgs
import scipy.misc
from threading import Thread, Event
from multiprocessing import get_context
import sys
BASIC_MODEL_XML = """
<mujoco>
<worldbody>
<light name="light1" diffuse=".5 .5 .5" pos="0 0 3" dir="0 0 -1"/>
<camera name="camera1" pos="3 0 0" zaxis="1 0 0" />
<camera name="camera2" pos="4 0 0" zaxis="1 0 0" />
<geom name="geom1" pos="0.5 0.4 0.3" type="plane" size="1 1 0.1" rgba=".9 0 0 1"/>
<body pos="0 0 1" name="body1">
<joint name="joint1" type="free"/>
<geom name="geom2" pos="0 1 0" type="box" size=".1 .2 .3" rgba="0 .9 0 1"/>
<site name="site1" pos="1 0 0" size="0.1" type="sphere"/>
<site name="sensorsurf" pos="0 0.045 0" size=".03 .03 .03" type="ellipsoid" rgba="0.3 0.2 0.1 0.3"/>
</body>
<body pos="1 0 0" name="mocap1" mocap="true">
<geom conaffinity="0" contype="0" pos="0 0 0" size="0.01 0.01 0.01" type="box"/>
</body>
</worldbody>
<sensor>
<touch name="touchsensor" site="sensorsurf" />
</sensor>
</mujoco>
"""
def test_nested():
model = load_model_from_xml(BASIC_MODEL_XML)
model.vis.global_.fovy
model.vis.quality.shadowsize
def test_mj_sim_basics():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model, nsubsteps=2)
sim.reset()
sim.step()
sim.reset()
sim.forward()
@pytest.mark.requires_rendering
def test_arrays_of_objs():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
renderer = cymj.MjRenderContext(sim, offscreen=True)
assert len(renderer.scn.camera) == 2, "Expecting scn.camera to be available"
def test_model_save_load():
model = load_model_from_xml(BASIC_MODEL_XML)
xml_from_model = model.get_xml()
model_from_xml = load_model_from_xml(xml_from_model)
assert(xml_from_model == model_from_xml.get_xml())
mjb_from_model = model.get_mjb()
model_from_mjb = load_model_from_mjb(mjb_from_model)
assert(mjb_from_model == model_from_mjb.get_mjb())
def test_sim_save():
model = load_model_from_xml(BASIC_MODEL_XML)
assert model.nkey == 0
sim = MjSim(model)
with StringIO() as f:
sim.save(f)
f.seek(0)
loaded_model = load_model_from_xml(f.read())
assert loaded_model.nkey == 1
with BytesIO() as f:
sim.save(f, format='mjb')
f.seek(0)
loaded_model = load_model_from_mjb(f.read())
assert loaded_model.nkey == 1
def test_mj_sim_buffers():
model = load_model_from_xml(BASIC_MODEL_XML)
# test no callback
sim = MjSim(model, nsubsteps=2)
assert(sim.udd_state == {})
sim.step()
assert(sim.udd_state == {})
# test with callback
foo = 10
d = {"foo": foo,
"foo_2": np.array([foo, foo])}
def udd_callback(sim):
return d
sim = MjSim(model, nsubsteps=2, udd_callback=udd_callback)
assert(sim.udd_state is not None)
assert(sim.udd_state["foo"] == foo)
assert(sim.udd_state["foo_2"].shape[0] == 2)
assert(sim.udd_state["foo_2"][0] == foo)
foo = 11
d = {"foo": foo,
"foo_2": np.array([foo, foo])}
sim.step()
assert(sim.udd_state is not None)
assert(sim.udd_state["foo"] == foo)
assert(sim.udd_state["foo_2"][0] == foo)
d = {}
with pytest.raises(AssertionError):
sim.step()
d = {"foo": foo,
"foo_2": np.array([foo, foo]),
"foo_3": foo}
with pytest.raises(AssertionError):
sim.step()
d = {"foo": foo,
"foo_2": np.array([foo, foo, foo])}
with pytest.raises(AssertionError):
sim.step()
d = {"foo": "haha",
"foo_2": np.array([foo, foo, foo])}
with pytest.raises(AssertionError):
sim.step()
def test_mj_sim_pool_buffers():
model = load_model_from_xml(BASIC_MODEL_XML)
foo = 10
def udd_callback(sim):
return {"foo": foo}
sims = [MjSim(model, udd_callback=udd_callback) for _ in range(2)]
sim_pool = MjSimPool(sims, nsubsteps=2)
for i in range(len(sim_pool.sims)):
assert(sim_pool.sims[i].udd_state is not None)
assert(sim_pool.sims[i].udd_state["foo"] == 10)
foo = 11
sim_pool.step()
for i in range(len(sim_pool.sims)):
assert(sim_pool.sims[i].udd_state is not None)
assert(sim_pool.sims[i].udd_state["foo"] == 11)
def test_mj_sim_pool_basics():
model = load_model_from_xml(BASIC_MODEL_XML)
sims = [MjSim(model) for _ in range(2)]
sim_pool = MjSimPool(sims, nsubsteps=2)
sim_pool.reset()
sim_pool.step()
sim_pool.forward()
def test_data_attribute_getters():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
assert_array_equal(sim.data.get_body_xpos("body1"), [0, 0, 1])
with pytest.raises(ValueError):
sim.data.get_body_xpos("body_foo")
with pytest.raises(RuntimeError):
sim.data.get_xpos("body1")
assert len(sim.data.get_body_xquat("body1")) == 4
assert_array_equal(sim.data.get_body_xmat("body1").shape, (3, 3))
# At (0, 1, 1) since the geom is displaced in the body
assert_array_equal(sim.data.get_body_xipos("body1"), [0, 1, 1])
assert_array_equal(sim.data.get_site_xpos("site1"), [1, 0, 1])
assert_array_equal(sim.data.get_site_xmat("site1").shape, (3, 3))
assert_array_equal(sim.data.get_geom_xpos("geom1"), [0.5, 0.4, 0.3])
assert_array_equal(sim.data.get_geom_xpos("geom2"), [0, 1, 1])
assert_array_equal(sim.data.get_geom_xmat("geom2").shape, (3, 3))
assert_array_equal(sim.data.get_light_xpos("light1"), [0, 0, 3])
assert_array_equal(sim.data.get_light_xdir("light1"), [0, 0, -1])
assert_array_equal(sim.data.get_camera_xpos("camera1"), [3, 0, 0])
assert_array_equal(sim.data.get_camera_xmat("camera1").shape, (3, 3))
assert_array_equal(sim.data.get_joint_xaxis("joint1"), [0, 0, 1])
assert_array_equal(sim.data.get_joint_xanchor("joint1"), [0, 0, 1])
def test_joint_qpos_qvel_ops():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
# Test setting one with a list
sim.data.set_joint_qpos("joint1", [1, 2, 3, 1, 0, 0, 0])
# And the other with an np.ndarray
sim.data.set_joint_qvel("joint1", np.array([1, 2, 3, 0.1, 0.1, 0.1]))
sim.forward()
assert_array_equal(sim.data.get_joint_qpos(
"joint1"), [1, 2, 3, 1, 0, 0, 0])
assert_array_equal(sim.data.get_joint_qvel(
"joint1"), [1, 2, 3, 0.1, 0.1, 0.1])
def test_mocap_ops():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
assert_array_equal(sim.data.get_body_xpos("mocap1"), [1, 0, 0])
assert_array_equal(sim.data.get_mocap_pos("mocap1"), [1, 0, 0])
assert_array_equal(sim.data.get_mocap_quat("mocap1"), [1, 0, 0, 0])
new_pos = [2, 1, 1]
new_quat = [0.707107, 0.707107, 0, 0]
sim.data.set_mocap_pos("mocap1", new_pos)
sim.data.set_mocap_quat("mocap1", new_quat)
sim.forward()
assert_array_equal(sim.data.get_mocap_pos("mocap1"), new_pos)
assert_array_almost_equal(sim.data.get_mocap_quat("mocap1"), new_quat)
assert_array_equal(sim.data.get_body_xpos("mocap1"), new_pos)
assert_array_almost_equal(sim.data.get_body_xquat("mocap1"), new_quat)
assert_array_almost_equal(sim.data.get_body_xmat("mocap1"),
[[1, 0, 0], [0, 0, -1], [0, 1, 0]])
def test_sim_state():
model = load_model_from_xml(BASIC_MODEL_XML)
foo = 10
d = {"foo": foo,
"foo_array": np.array([foo, foo, foo]),
"foo_2darray": np.reshape(np.array([foo, foo, foo, foo]), (2, 2)),
}
def udd_callback(sim):
return d
sim = MjSim(model, nsubsteps=2, udd_callback=udd_callback)
state = sim.get_state()
assert np.array_equal(state.time, sim.data.time)
assert np.array_equal(state.qpos, sim.data.qpos)
assert np.array_equal(state.qvel, sim.data.qvel)
assert np.array_equal(state.act, sim.data.act)
for k in state.udd_state.keys():
if (isinstance(state.udd_state[k], Number)):
assert state.udd_state[k] == sim.udd_state[k]
else:
assert np.array_equal(state.udd_state[k], sim.udd_state[k])
# test flatten, unflatten
a = state.flatten()
assert len(a) == (1 + sim.model.nq + sim.model.nv + sim.model.na + 8)
state2 = MjSimState.from_flattened(a, sim)
assert np.array_equal(state.time, sim.data.time)
assert np.array_equal(state.qpos, sim.data.qpos)
assert np.array_equal(state.qvel, sim.data.qvel)
assert np.array_equal(state.act, sim.data.act)
for k in state2.udd_state.keys():
if (isinstance(state2.udd_state[k], Number)):
assert state2.udd_state[k] == sim.udd_state[k]
else:
assert np.array_equal(state2.udd_state[k], sim.udd_state[k])
assert state2 == state
assert not state2 != state
# test equality with deleting keys
state2 = state2._replace(udd_state={"foo": foo})
assert state2 != state
assert not (state2 == state)
# test equality with changing contents of array
state2 = state2._replace(
udd_state={"foo": foo, "foo_array": np.array([foo, foo + 1])})
assert state2 != state
assert not (state2 == state)
# test equality with adding keys
d2 = dict(d)
d2.update({"not_foo": foo})
state2 = state2._replace(udd_state=d2)
assert state2 != state
assert not (state2 == state)
# test defensive copy
sim.set_state(state)
state.qpos[0] = -1
assert not np.array_equal(state.qpos, sim.data.qpos)
state3 = sim.get_state()
state3.qpos[0] = -1
assert not np.array_equal(state3.qpos, sim.data.qpos)
state3.udd_state["foo_array"][0] = -1
assert not np.array_equal(
state3.udd_state["foo_array"], sim.udd_state["foo_array"])
# test no callback
sim = MjSim(model, nsubsteps=2)
state = sim.get_state()
print("state.udd_state = %s" % state.udd_state)
assert state.udd_state == {}
# test flatten, unflatten
a = state.flatten()
assert len(a) == 1 + sim.model.nq + sim.model.nv + sim.model.na
state2 = MjSimState.from_flattened(a, sim)
assert np.array_equal(state.time, sim.data.time)
assert np.array_equal(state.qpos, sim.data.qpos)
assert np.array_equal(state.qvel, sim.data.qvel)
assert np.array_equal(state.act, sim.data.act)
assert state.udd_state == sim.udd_state
def test_mj_warning_raises():
''' Test that MuJoCo warnings cause exceptions. '''
# Two boxes on a plane need more than 1 contact (nconmax)
xml = '''
<mujoco>
<size nconmax="1"/>
<worldbody>
<geom type="plane" size="1 1 0.1"/>
<body pos="1 0 1"> <joint type="free"/> <geom size="1"/> </body>
<body pos="0 1 1"> <joint type="free"/> <geom size="1"/> </body>
</worldbody>
</mujoco>
'''
model = load_model_from_xml(xml)
sim = MjSim(model)
sim.reset()
with pytest.raises(Exception):
# This should raise an exception due to the mujoco warning callback
sim.step()
def test_ignore_mujoco_warnings():
# Two boxes on a plane need more than 1 contact (nconmax)
xml = '''
<mujoco>
<size nconmax="1"/>
<worldbody>
<geom type="plane" size="1 1 0.1"/>
<body pos="1 0 1"> <joint type="free"/> <geom size="1"/> </body>
<body pos="0 1 1"> <joint type="free"/> <geom size="1"/> </body>
</worldbody>
</mujoco>
'''
model = load_model_from_xml(xml)
sim = MjSim(model)
sim.reset()
with ignore_mujoco_warnings():
# This should raise an exception due to the mujoco warning callback,
# but it's suppressed by the context manager.
sim.step()
sim.reset()
with pytest.raises(Exception):
# test to make sure previous warning callback restored.
sim.step()
def test_jacobians():
xml = """
<mujoco>
<worldbody>
<body name="body1" pos="0 0 0">
<joint axis="1 0 0" name="a" pos="0 0 0" type="hinge"/>
<geom name="geom1" pos="0 0 0" size="1.0"/>
<body name="body2" pos="0 0 1">
<joint name="b" axis="1 0 0" pos="0 0 1" type="hinge"/>
<geom name="geom2" pos="1 1 1" size="0.5"/>
<site name="target" size="0.1"/>
</body>
</body>
</worldbody>
<actuator>
<motor joint="a"/>
<motor joint="b"/>
</actuator>
</mujoco>
"""
model = load_model_from_xml(xml)
sim = MjSim(model)
sim.reset()
# After reset jacobians are all zeros
target_jacp = np.zeros(3 * sim.model.nv)
sim.data.get_site_jacp('target', jacp=target_jacp)
np.testing.assert_allclose(target_jacp, np.zeros(3 * sim.model.nv))
# After first forward, jacobians are real
sim.forward()
sim.data.get_site_jacp('target', jacp=target_jacp)
target_test = np.array([0, 0, -1, 1, 0, 0])
np.testing.assert_allclose(target_jacp, target_test)
# Should be unchanged after steps (zero action)
for _ in range(2):
sim.step()
sim.forward()
sim.data.get_site_jacp('target', jacp=target_jacp)
assert np.linalg.norm(target_jacp - target_test) < 1e-3
# Apply a very large action, ensure jacobian unchanged after step
sim.reset()
sim.forward()
sim.data.ctrl[:] = np.ones(sim.model.nu) * 1e9
sim.step()
sim.data.get_site_jacp('target', jacp=target_jacp)
np.testing.assert_allclose(target_jacp, target_test)
# After large action, ensure jacobian changed after forward
sim.forward()
sim.data.get_site_jacp('target', jacp=target_jacp)
assert not np.allclose(target_jacp, target_test)
# Test the `site_jacp` property, which gets all at once
np.testing.assert_allclose(target_jacp, sim.data.site_jacp[0])
# Test not passing in array
sim.reset()
sim.forward()
target_jacp = sim.data.get_site_jacp('target')
np.testing.assert_allclose(target_jacp, target_test)
# Test passing in bad array (long instead of double)
target_jacp = np.zeros(3 * sim.model.nv, dtype=np.long)
with pytest.raises(ValueError):
sim.data.get_site_jacp('target', jacp=target_jacp)
# Test rotation jacobian - like above but 'jacr' instead of 'jacp'
# After reset jacobians are all zeros
sim.reset()
target_jacr = np.zeros(3 * sim.model.nv)
sim.data.get_site_jacr('target', jacr=target_jacr)
np.testing.assert_allclose(target_jacr, np.zeros(3 * sim.model.nv))
# After first forward, jacobians are real
sim.forward()
sim.data.get_site_jacr('target', jacr=target_jacr)
target_test = np.array([1, 1, 0, 0, 0, 0])
# Test allocating dedicated array
target_jacr = sim.data.get_site_jacr('target')
np.testing.assert_allclose(target_jacr, target_test)
# Test the batch getter (all sites at once)
np.testing.assert_allclose(target_jacr, sim.data.site_jacr[0])
# Test passing in bad array
target_jacr = np.zeros(3 * sim.model.nv, dtype=np.long)
with pytest.raises(ValueError):
sim.data.get_site_jacr('target', jacr=target_jacr)
def test_xvelp(): # xvelp = positional velocity in world frame
xml = """
<mujoco>
<worldbody>
<body name="body1" pos="0 0 0">
<joint name="a" axis="1 0 0" pos="0 0 0" type="slide"/>
<geom name="geom1" pos="0 0 0" size="1.0"/>
<body name="body2" pos="0 0 1">
<joint name="b" axis="1 0 0" pos="0 0 1" type="slide"/>
<geom name="geom2" pos="0 0 0" size="0.5"/>
<site name="site1" size="0.1"/>
</body>
</body>
</worldbody>
<actuator>
<motor joint="a"/>
<motor joint="b"/>
</actuator>
</mujoco>
"""
model = load_model_from_xml(xml)
sim = MjSim(model)
sim.reset()
sim.forward()
# Check that xvelp starts out at zero (since qvel is zero)
site1_xvelp = sim.data.get_site_xvelp('site1')
np.testing.assert_allclose(site1_xvelp, np.zeros(3))
# Push the base body and step forward to get it moving
sim.data.ctrl[0] = 1e9
sim.step()
sim.forward()
# Check that the first body has nonzero xvelp
body1_xvelp = sim.data.get_body_xvelp('body1')
assert not np.allclose(body1_xvelp, np.zeros(3))
# Check that the second body has zero xvelp (still)
body2_xvelp = sim.data.get_body_xvelp('body2')
np.testing.assert_allclose(body2_xvelp, np.zeros(3))
# Check that this matches the batch (gathered) getter property
np.testing.assert_allclose(body2_xvelp, sim.data.body_xvelp[2])
def test_xvelr(): # xvelr = rotational velocity in world frame
xml = """
<mujoco>
<worldbody>
<body name="body1" pos="0 0 0">
<joint name="a" axis="1 0 0" pos="0 0 0" type="hinge"/>
<geom name="geom1" pos="0 0 0" size="0.3"/>
<body name="body2" pos="0 0 1">
<joint name="b" axis="1 0 0" pos="0 0 0" type="hinge"/>
<geom name="geom2" pos="0 0 0" size="0.3"/>
<site name="site1" size="0.1"/>
</body>
</body>
</worldbody>
<actuator>
<motor joint="a"/>
<motor joint="b"/>
</actuator>
</mujoco>
"""
model = load_model_from_xml(xml)
sim = MjSim(model)
sim.reset()
sim.forward()
# Check that xvelr starts out at zero (since qvel is zero)
site1_xvelr = sim.data.get_site_xvelr('site1')
np.testing.assert_allclose(site1_xvelr, np.zeros(3))
# Push the base body and step forward to get it moving
sim.data.ctrl[0] = 1e9
sim.step()
sim.forward()
# Check that the first body has nonzero xvelr
body1_xvelr = sim.data.get_body_xvelr('body1')
assert not np.allclose(body1_xvelr, np.zeros(3))
# Check that the second body has zero xvelr (still)
body2_xvelr = sim.data.get_body_xvelr('body2')
np.testing.assert_allclose(body2_xvelr, np.zeros(3))
# Check that this matches the batch (gathered) getter property
np.testing.assert_allclose(body2_xvelr, sim.data.body_xvelr[2])
@pytest.mark.requires_rendering
def test_rendering():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
img, depth = sim.render(200, 200, depth=True)
assert img.shape == (200, 200, 3)
compare_imgs(img, 'test_rendering.freecam.png')
depth = (depth - np.min(depth)) / (np.max(depth) - np.min(depth))
depth = np.asarray(depth * 255, dtype=np.uint8)
assert depth.shape == (200, 200)
compare_imgs(depth, 'test_rendering.freecam.depth.png')
img = sim.render(100, 100, camera_name="camera1")
assert img.shape == (100, 100, 3)
compare_imgs(img, 'test_rendering.camera1.png')
img = sim.render(200, 100, camera_name="camera1")
assert img.shape == (100, 200, 3)
compare_imgs(img, 'test_rendering.camera1.narrow.png')
render_context = sim.render_contexts[0]
render_context.add_marker(size=np.array([.4, .5, .6]),
pos=np.array([.4, .5, .6]),
rgba=np.array([.7, .8, .9, 1.0]),
label="mark")
img = sim.render(200, 200, camera_name="camera1")
assert img.shape == (200, 200, 3)
compare_imgs(img, 'test_rendering_markers.camera1.png')
@pytest.mark.requires_rendering
def test_rendering_failing():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
sim.render(100, 100)
render_context = sim.render_contexts[0]
render_context.add_marker(size=np.array([.4, .5, .6]),
pos=np.array([.4, .5, .6]),
rgba=np.array([.7, .8, .9, 1.0]),
label="blaaaa")
img = sim.render(200, 200, camera_name="camera1")
assert img.shape == (200, 200, 3)
try:
compare_imgs(img, 'test_rendering_markers.camera1.png')
assert False
except Exception as e:
pass
@pytest.mark.requires_rendering
def test_viewercontext():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
renderer = cymj.MjRenderContext(sim, offscreen=True)
renderer.add_marker(type=const.GEOM_SPHERE,
size=np.ones(3) * 0.1,
pos=np.zeros(3),
mat=np.eye(3).flatten(),
rgba=np.ones(4),
label="mark")
@pytest.mark.requires_rendering
def test_many_sims_rendering():
model = load_model_from_xml(BASIC_MODEL_XML)
sims = [MjSim(model) for _ in range(5)]
pool = MjSimPool(sims)
pool.forward()
for sim in sims:
img, depth = sim.render(200, 200, depth=True)
assert img.shape == (200, 200, 3)
compare_imgs(img, 'test_rendering.freecam.png')
def test_xml_from_path():
model = load_model_from_path("mujoco_py/tests/test.xml")
sim = MjSim(model)
xml = model.get_xml()
assert xml.find("blabla") > -1, "include should be embeeded"
assert xml.find("include") == - \
1, "include should be parsed and not present"
def test_sensors():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.model.sensor_names
sim.data.get_sensor("touchsensor")
@pytest.mark.requires_rendering
@pytest.mark.skipif("Darwin" not in sys.platform,
reason="Only Darwin code is thread safe.")
def test_concurrent_rendering():
'''Best-effort testing that concurrent multi-threaded rendering works.
The test has no guarantees around being deterministic, but if it fails
you know something is wrong with concurrent rendering. If it passes,
things are probably working.'''
err = None
def func(sim, event):
event.wait()
sim.data.qpos[:] = 0.0
sim.forward()
img1 = sim.render(width=40, height=40, camera_name="camera1")
img2 = sim.render(width=40, height=40, camera_name="camera2")
try:
assert np.sum(img1[:]) == 23255
assert np.sum(img2[:]) == 12007
except Exception as e:
nonlocal err
err = e
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.render(100, 100)
event = Event()
threads = []
for _ in range(100):
thread = Thread(target=func, args=(sim, event))
threads.append(thread)
thread.start()
event.set()
for thread in threads:
thread.join()
assert err is None, "Exception: %s" % (str(err))
@pytest.mark.requires_rendering
def test_high_res():
model = load_model_from_xml(BASIC_MODEL_XML)
sim = MjSim(model)
sim.forward()
img = sim.render(1000, 1000)
img = scipy.misc.imresize(img, (200, 200, 3))
assert img.shape == (200, 200, 3)
compare_imgs(img, 'test_rendering.freecam.png')
@pytest.mark.skipif(sys.platform.startswith("win"), reason="This test fails on windows.")
def test_multiprocess():
'''
Tests for importing mujoco_py from multiple processes.
'''
ctx = get_context('spawn')
processes = []
times = 3
queue = ctx.Queue()
for idx in range(3):
processes.append(ctx.Process(target=import_process, args=(queue, )))
for p in processes:
p.start()
for p in processes:
p.join()
for _ in range(times):
assert queue.get(), "One of processes failed."
def import_process(queue):
try:
from mujoco_py import builder
mjpro_path, key_path = builder.discover_mujoco()
builder.load_cython_ext(mjpro_path)
except Exception as e:
queue.put(False)
else:
queue.put(True)
|
cartwrap.py
|
import subprocess
import io
from threading import Thread
from queue import Queue
import re
import math
def reader(pipe, pipe_name, queue):
try:
with pipe:
for line in iter(pipe.readline, b''):
queue.put((pipe_name, line))
finally:
queue.put(None)
# This function invokes the C code to calculate a cartogram for a given gen and area input.
# It returns a generator that yields its output on stdout and stderr in the format:
#
# source, line
#
# where source is a string (either 'stdout' or 'stderr'), and line is a string.
#
# It takes as input:
#
# area_data: A string containing appropriately formated area data
# gen_file: A string containing the path to the appropriate .gen file
# cartogram_executable: A string containg the path to the C code executable
def generate_cartogram(area_data, gen_file, cartogram_executable, world=False):
flag = '-s'
if world == True:
flag = '-sw'
cartogram_process = subprocess.Popen([
cartogram_executable,
'-g',
gen_file,
flag
],stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,bufsize=1)
q = Queue()
Thread(target=reader,args=[cartogram_process.stdout, "stdout", q]).start()
Thread(target=reader,args=[cartogram_process.stderr, "stderr", q]).start()
cartogram_process.stdin.write(str.encode(area_data))
cartogram_process.stdin.close()
for _ in range(2):
for source, line in iter(q.get, None):
yield source,line
#output, errors = cartogram_process.communicate(bytes(area_data, 'UTF-8'))
#return io.StringIO(output.decode())
|
e2e.py
|
"""
This is an end to end release test automation script used to kick off periodic
release tests, running on Anyscale.
The tool leverages app configs and compute templates.
Calling this script will run a single release test.
Example:
python e2e.py --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
The following steps are then performed:
1. It will look up the test tune_small in the file xgboost_tests.yaml
2. It will fetch the specified app config and compute template and register
those with anyscale (if they don’t exist yet)
3. It waits until the app config is built
4. It then kicks off the script defined in the run block
5. When the script is finished, it will fetch the latest logs, the full log
output, and any artifacts specified in the artifacts block.
6. The full logs and artifacts will be stored in a s3 bucket
7. It will also fetch the json file specified in the run block as results.
This is the file where you should write your metrics to.
8. All results are then stored in a database.
Specifically it will store the following fields:
- Timestamp
- Test name
- Status (finished, error, timeout, invalid)
- Last logs (50 lines)
- results (see above)
- artifacts (links to s3 files)
Then the script exits. If an error occurs at any time, a fail result is
written to the database.
Exit codes
----------
The script exits with code 0 on success, i.e. if the test has been run
end to end without failures and the subsequent results checks have passed.
In all other cases, an exit code > 0 is returned.
Exit code 1 is the general failure exit code returned by Python when we
encounter an error that isn't caught by the rest of the script.
Generally, we try to catch errors as they occur, and return a specific exit
code that can be used in automation tools to e.g. retry a test when nodes
didn't come up in time.
These exit codes are defined in the ``ExitCode`` enum below.
Writing a new release test
--------------------------
Each release test requires the following:
1. It has to be added in a release test yaml file, describing meta information
about the test (e.g. name, command to run, timeout)
2. You need an app config yaml
3. You need a compute template yaml
4. You need to define a command to run. This is usually a python script.
The command should accept (or ignore) a single optional
`--smoke-test` argument.
Usually the command should write its result metrics to a json file.
The json filename is available in the TEST_OUTPUT_JSON env variable.
5. Add your test in release/.buildkite/build_pipeline.py.
The script will have access to these environment variables:
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto")
"TEST_OUTPUT_JSON": results_json_filename
"IS_SMOKE_TEST": "1" if smoke_test else "0"
For an example, take a look at the XGBoost test suite:
https://github.com/ray-project/ray/blob/master/release/xgboost_tests/xgboost_tests.yaml
These all use the same app configs and similar compute templates. This means
that app configs can be re-used across runs and only have to be built ones.
App configs and compute templates can interpret environment variables.
A notable one is the `RAY_WHEELS` variable which points to the wheels that
should be tested (e.g. latest master wheels). You might want to include
something like this in your `post_build_cmds`:
- pip3 install -U {{ env["RAY_WHEELS"] | default("ray") }}
If you want to force rebuilds, consider using something like
- echo {{ env["TIMESTAMP"] }}
so that your app configs changes each time the script is executed. If you
only want to trigger rebuilds once per day, use `DATESTAMP` instead:
- echo {{ env["DATESTAMP"] }}
Local testing
-------------
Make sure to set these environment variables:
- ANYSCALE_CLI_TOKEN (should contain your anyscale credential token)
- ANYSCALE_PROJECT (should point to a project ID you have access to)
A test can then be run like this:
python e2e.py --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
Using Compilation on Product + App Config Override
--------------------------------------------------
For quick iteration when debugging a release test, go/compile-on-product allows
you to easily modify and recompile Ray, such that the recompilation happens
within an app build step and can benefit from a warm Bazel cache. See
go/compile-on-product for more information.
After kicking off the app build, you can give the app config ID to this script
as an app config override, where the indicated app config will be used instead
of the app config given in the test config. E.g., running
python e2e.py --test-config ~/ray/benchmarks/benchmark_tests.yaml --test-name=single_node --app-config-id-override=apt_TBngEXXXrhipMXgexVcrpC9i
would run the single_node benchmark test with the apt_TBngEXXXrhipMXgexVcrpC9i
app config instead of the app config given in
~/ray/benchmarks/benchmark_tests.yaml. If the build for the app config is still
in progress, the script will wait until it completes, same as for a locally
defined app config.
Running on Head Node vs Running with Anyscale Connect
-----------------------------------------------------
By default release tests run their drivers on the head node. Support is being
added to run release tests that execute the driver as a subprocess and run
the workload on Anyscale product via Anyscale connect.
Note that when the driver in the test is a subprocess of releaser, releaser
cannot be terminated before the test finishes.
Other known feature gaps when running with Anyscale connect:
- Kicking off a test or checking progress is not supported.
- Downloading / uploading logs and artifacts are unsupported.
- Logs from remote may not have finished streaming, before the driver exits.
Long running tests
------------------
Long running tests can be kicked off with by adding the --kick-off-only
parameters to the e2e script. The status can then be checked with the
--check command.
Long running test sessions will be terminated after `timeout` seconds, after
which the latest result in the TEST_OUTPUT_JSON will be reported. Thus,
long running release tests should update this file periodically.
There are also two config options to configure behavior. The `time_key` is
needed to track the latest update of the TEST_OUTPUT_JSON and should contain
a floating point number (usually `time.time()`). The `max_update_delay` then
specified the maximum time in seconds that can be passed without an update
to the results json. If the output file hasn't been updated in e.g. 60 seconds,
this could indicate that the command is stale/frozen, and thus should fail.
Release test yaml example
-------------------------
- name: example
owner:
mail: "kai@anyscale.com" # Currently not used
slack: "@tune-team" # Currentl not used
cluster:
app_config: app_config.yaml # Relative to the release test yaml
compute_template: tpl_cpu.yaml
run:
timeout: 600 # in seconds
prepare: python wait_cluster.py 4 600 # prepare cmd to run before test
script: python workloads/train.py # actual release test command
# Only needed for long running test
time_key: last_update # Key in the results json indicating current time
max_update_delay: 30 # If state hasn't been updated in 30s, terminate
# This block is optional
artifacts:
# Artifact name: location on head node
- detailed_output: detailed_output.csv
# This block is optional. If present, the contents will be
# deep updated for smoke testing
smoke_test:
cluster:
compute_template: tpl_cpu_smoketest.yaml
""" # noqa: E501
import argparse
import enum
import boto3
import collections
import copy
import datetime
import hashlib
import jinja2
import json
import logging
import multiprocessing
import os
import requests
import shutil
import subprocess
import sys
import re
import tempfile
import time
from queue import Empty
from typing import Any, Dict, Optional, Tuple, List
import yaml
import anyscale
import anyscale.conf
from anyscale.api import instantiate_api_client
from anyscale.controllers.session_controller import SessionController
from anyscale.sdk.anyscale_client.sdk import AnyscaleSDK
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(fmt="[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def getenv_default(key: str, default: Optional[str] = None):
"""Return environment variable with default value"""
# If the environment variable is set but "", still return default
return os.environ.get(key, None) or default
GLOBAL_CONFIG = {
"ANYSCALE_USER": getenv_default("ANYSCALE_USER",
"release-automation@anyscale.com"),
"ANYSCALE_HOST": getenv_default("ANYSCALE_HOST",
"https://beta.anyscale.com"),
"ANYSCALE_CLI_TOKEN": getenv_default("ANYSCALE_CLI_TOKEN"),
"ANYSCALE_CLOUD_ID": getenv_default(
"ANYSCALE_CLOUD_ID",
"cld_4F7k8814aZzGG8TNUGPKnc"), # anyscale_default_cloud
"ANYSCALE_PROJECT": getenv_default("ANYSCALE_PROJECT", ""),
"RAY_VERSION": getenv_default("RAY_VERSION", "1.9.0"),
"RAY_REPO": getenv_default("RAY_REPO",
"https://github.com/ray-project/ray.git"),
"RAY_BRANCH": getenv_default("RAY_BRANCH", "master"),
"RELEASE_AWS_BUCKET": getenv_default("RELEASE_AWS_BUCKET",
"ray-release-automation-results"),
"RELEASE_AWS_LOCATION": getenv_default("RELEASE_AWS_LOCATION", "dev"),
"RELEASE_AWS_DB_NAME": getenv_default("RELEASE_AWS_DB_NAME", "ray_ci"),
"RELEASE_AWS_DB_TABLE": getenv_default("RELEASE_AWS_DB_TABLE",
"release_test_result"),
"RELEASE_AWS_DB_SECRET_ARN": getenv_default(
"RELEASE_AWS_DB_SECRET_ARN",
"arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"rds-db-credentials/cluster-7RB7EYTTBK2EUC3MMTONYRBJLE/ray_ci-MQN2hh",
),
"RELEASE_AWS_DB_RESOURCE_ARN": getenv_default(
"RELEASE_AWS_DB_RESOURCE_ARN",
"arn:aws:rds:us-west-2:029272617770:cluster:ci-reporting",
),
"RELEASE_RESULTS_DIR": getenv_default("RELEASE_RESULTS_DIR",
"/tmp/ray_release_test_artifacts"),
"DATESTAMP": str(datetime.datetime.now().strftime("%Y%m%d")),
"TIMESTAMP": str(int(datetime.datetime.now().timestamp())),
"EXPIRATION_1D": str((datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")),
"EXPIRATION_2D": str((datetime.datetime.now() +
datetime.timedelta(days=2)).strftime("%Y-%m-%d")),
"EXPIRATION_3D": str((datetime.datetime.now() +
datetime.timedelta(days=3)).strftime("%Y-%m-%d")),
"REPORT_RESULT": getenv_default("REPORT_RESULT", ""),
}
REPORT_S = 30
RETRY_MULTIPLIER = 2
class ExitCode(enum.Enum):
UNSPECIFIED = 2
UNKNOWN = 3
RUNTIME_ERROR = 4
COMMAND_ERROR = 5
COMMAND_TIMEOUT = 6
PREPARE_TIMEOUT = 7
FILESYNC_TIMEOUT = 8
SESSION_TIMEOUT = 9
PREPARE_ERROR = 10
APPCONFIG_BUILD_ERROR = 11
INFRA_ERROR = 12
def exponential_backoff_retry(f, retry_exceptions, initial_retry_delay_s,
max_retries):
retry_cnt = 0
retry_delay_s = initial_retry_delay_s
while True:
try:
return f()
except retry_exceptions as e:
retry_cnt += 1
if retry_cnt > max_retries:
raise
logger.info(f"Retry function call failed due to {e} "
f"in {retry_delay_s} seconds...")
time.sleep(retry_delay_s)
retry_delay_s *= RETRY_MULTIPLIER
def maybe_fetch_api_token():
if GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] is None:
logger.info(
"Missing ANYSCALE_CLI_TOKEN, retrieving from AWS secrets store")
# NOTE(simon) This should automatically retrieve
# release-automation@anyscale.com's anyscale token
GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] = boto3.client(
"secretsmanager", region_name="us-west-2"
).get_secret_value(
SecretId="arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"release-automation/"
"anyscale-token20210505220406333800000001-BcUuKB")["SecretString"]
class PrepareCommandRuntimeError(RuntimeError):
pass
class ReleaseTestRuntimeError(RuntimeError):
pass
class ReleaseTestInfraError(ReleaseTestRuntimeError):
pass
class ReleaseTestTimeoutError(ReleaseTestRuntimeError):
pass
class SessionTimeoutError(ReleaseTestTimeoutError):
pass
class FileSyncTimeoutError(ReleaseTestTimeoutError):
pass
class CommandTimeoutError(ReleaseTestTimeoutError):
pass
class PrepareCommandTimeoutError(ReleaseTestTimeoutError):
pass
# e.g., App config failure.
class AppConfigBuildFailure(RuntimeError):
pass
class State:
def __init__(self, state: str, timestamp: float, data: Any):
self.state = state
self.timestamp = timestamp
self.data = data
sys.path.insert(0, anyscale.ANYSCALE_RAY_DIR)
def anyscale_project_url(project_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/?tab=session-list"
def anyscale_session_url(project_id: str, session_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/clusters/{session_id}"
def anyscale_compute_tpl_url(compute_tpl_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/configurations/cluster-computes" \
f"/{compute_tpl_id}"
def anyscale_app_config_build_url(build_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/configurations/app-config-details" \
f"/{build_id}"
def wheel_url(ray_version, git_branch, git_commit):
return f"https://s3-us-west-2.amazonaws.com/ray-wheels/" \
f"{git_branch}/{git_commit}/" \
f"ray-{ray_version}-cp37-cp37m-manylinux2014_x86_64.whl"
def wheel_exists(ray_version, git_branch, git_commit):
url = wheel_url(ray_version, git_branch, git_commit)
return requests.head(url).status_code == 200
def commit_or_url(commit_or_url: str) -> str:
if commit_or_url.startswith("http"):
url = None
# Directly return the S3 url
if "s3" in commit_or_url and "amazonaws.com" in commit_or_url:
url = commit_or_url
# Resolve the redirects for buildkite artifacts
# This is needed because otherwise pip won't recognize the file name.
elif "buildkite.com" in commit_or_url and "artifacts" in commit_or_url:
url = requests.head(commit_or_url, allow_redirects=True).url
if url is not None:
# Extract commit from url so that we can do the
# commit sanity check later.
p = re.compile("/([a-f0-9]{40})/")
m = p.search(url)
if m is not None:
os.environ["RAY_COMMIT"] = m.group(1)
return url
# Else, assume commit
os.environ["RAY_COMMIT"] = commit_or_url
return wheel_url(GLOBAL_CONFIG["RAY_VERSION"], GLOBAL_CONFIG["RAY_BRANCH"],
commit_or_url)
def get_latest_commits(repo: str, branch: str = "master") -> List[str]:
cur = os.getcwd()
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
clone_cmd = [
"git",
"clone",
"--filter=tree:0",
"--no-checkout",
# "--single-branch",
# "--depth=10",
f"--branch={branch}",
repo,
tmpdir,
]
log_cmd = [
"git",
"log",
"-n",
"10",
"--pretty=format:%H",
]
subprocess.check_output(clone_cmd)
commits = subprocess.check_output(log_cmd).decode(
sys.stdout.encoding).split("\n")
os.chdir(cur)
return commits
def find_ray_wheels(repo: str, branch: str, version: str):
url = None
commits = get_latest_commits(repo, branch)
logger.info(f"Latest 10 commits for branch {branch}: {commits}")
for commit in commits:
if wheel_exists(version, branch, commit):
url = wheel_url(version, branch, commit)
os.environ["RAY_WHEELS"] = url
os.environ["RAY_COMMIT"] = commit
logger.info(
f"Found wheels URL for Ray {version}, branch {branch}: "
f"{url}")
break
return url
def populate_wheels_sanity_check(commit: Optional[str] = None):
if not commit:
cmd = ("python -c 'import ray; print("
"\"No commit sanity check available, but this is the "
"Ray wheel commit:\", ray.__commit__)'")
else:
cmd = (f"python -c 'import ray; "
f"assert ray.__commit__ == \"{commit}\", ray.__commit__'")
os.environ["RAY_WHEELS_SANITY_CHECK"] = cmd
def _check_stop(stop_event: multiprocessing.Event, timeout_type: str):
if stop_event.is_set():
if timeout_type == "prepare_command":
raise PrepareCommandTimeoutError(
"Process timed out in the prepare command stage.")
if timeout_type == "command":
raise CommandTimeoutError(
"Process timed out while running a command.")
elif timeout_type == "file_sync":
raise FileSyncTimeoutError(
"Process timed out while syncing files.")
elif timeout_type == "session":
raise SessionTimeoutError(
"Process timed out while starting a session.")
else:
assert False, "Unexpected timeout type."
def _deep_update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = _deep_update(d.get(k, {}), v)
else:
d[k] = v
return d
def _dict_hash(dt: Dict[Any, Any]) -> str:
json_str = json.dumps(dt, sort_keys=True, ensure_ascii=True)
sha = hashlib.sha256()
sha.update(json_str.encode())
return sha.hexdigest()
def _load_config(local_dir: str, config_file: Optional[str]) -> Optional[Dict]:
if not config_file:
return None
config_path = os.path.join(local_dir, config_file)
with open(config_path, "rt") as f:
# Todo: jinja2 render
content = f.read()
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
content = jinja2.Template(content).render(env=env)
return yaml.safe_load(content)
def _wrap_app_config_pip_installs(app_config: Dict[Any, Any]):
"""Wrap pip package install in quotation marks"""
if app_config.get("python", {}).get("pip_packages"):
new_pip_packages = []
for pip_package in app_config["python"]["pip_packages"]:
new_pip_packages.append(f"\"{pip_package}\"")
app_config["python"]["pip_packages"] = new_pip_packages
def has_errored(result: Dict[Any, Any]) -> bool:
return result.get("status", "invalid") != "finished"
def maybe_get_alert_for_result(result_dict: Dict[str, Any]) -> Optional[str]:
# If we get a result dict, check if any alerts should be raised
from alert import SUITE_TO_FN, default_handle_result
logger.info("Checking if results are valid...")
# Copy dict because we modify kwargs here
handle_result_kwargs = result_dict.copy()
handle_result_kwargs["created_on"] = None
test_suite = handle_result_kwargs.get("test_suite", None)
handle_fn = SUITE_TO_FN.get(test_suite, None)
if not handle_fn:
logger.warning(f"No handle for suite {test_suite}")
alert = default_handle_result(**handle_result_kwargs)
else:
alert = handle_fn(**handle_result_kwargs)
return alert
def report_result(test_suite: str, test_name: str, status: str, last_logs: str,
results: Dict[Any, Any], artifacts: Dict[Any, Any],
category: str):
now = datetime.datetime.utcnow()
rds_data_client = boto3.client("rds-data", region_name="us-west-2")
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_TABLE"]
sql = (
f"INSERT INTO {schema} "
f"(created_on, test_suite, test_name, status, last_logs, "
f"results, artifacts, category) "
f"VALUES (:created_on, :test_suite, :test_name, :status, :last_logs, "
f":results, :artifacts, :category)")
parameters = [{
"name": "created_on",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": now.strftime("%Y-%m-%d %H:%M:%S")
},
}, {
"name": "test_suite",
"value": {
"stringValue": test_suite
}
}, {
"name": "test_name",
"value": {
"stringValue": test_name
}
}, {
"name": "status",
"value": {
"stringValue": status
}
}, {
"name": "last_logs",
"value": {
"stringValue": last_logs
}
}, {
"name": "results",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(results)
},
}, {
"name": "artifacts",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(artifacts)
},
}, {
"name": "category",
"value": {
"stringValue": category
}
}]
# Default boto3 call timeout is 45 seconds.
retry_delay_s = 64
MAX_RDS_RETRY = 3
exponential_backoff_retry(
lambda: rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
parameters=parameters,
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql),
retry_exceptions=rds_data_client.exceptions.StatementTimeoutException,
initial_retry_delay_s=retry_delay_s,
max_retries=MAX_RDS_RETRY)
logger.info("Result has been persisted to the database")
def log_results_and_artifacts(result: Dict):
results = result.get("results", {})
if results:
msg = "Observed the following results:\n\n"
for key, val in results.items():
msg += f" {key} = {val}\n"
else:
msg = "Did not find any results."
logger.info(msg)
artifacts = result.get("artifacts", {})
if artifacts:
msg = "Saved the following artifacts:\n\n"
for key, val in artifacts.items():
msg += f" {key} = {val}\n"
else:
msg = "Did not find any artifacts."
logger.info(msg)
def _cleanup_session(sdk: AnyscaleSDK, session_id: str):
if session_id:
# Just trigger a request. No need to wait until session shutdown.
sdk.terminate_session(
session_id=session_id, terminate_session_options={})
def search_running_session(sdk: AnyscaleSDK, project_id: str,
session_name: str) -> Optional[str]:
session_id = None
logger.info(f"Looking for existing session with name {session_name}")
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(name=dict(equals=session_name)))
if len(result.results) > 0 and result.results[0].state == "Running":
logger.info("Found existing session.")
session_id = result.results[0].id
return session_id
def find_cloud_by_name(sdk: AnyscaleSDK, cloud_name: str,
_repeat: bool = True) -> Optional[str]:
cloud_id = None
logger.info(f"Looking up cloud with name `{cloud_name}`. ")
paging_token = None
while not cloud_id:
result = sdk.search_clouds(
clouds_query=dict(
paging=dict(count=50, paging_token=paging_token)))
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == cloud_name:
cloud_id = res.id
logger.info(
f"Found cloud with name `{cloud_name}` as `{cloud_id}`")
break
if not paging_token or cloud_id or not len(result.results):
break
return cloud_id
def create_or_find_compute_template(
sdk: AnyscaleSDK,
project_id: str,
compute_tpl: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
compute_tpl_id = None
compute_tpl_name = None
if compute_tpl:
# As of Anyscale 0.4.1, it is an error to use the same compute template
# name within the same organization, between different projects.
compute_tpl_name = f"{project_id}/compute/{_dict_hash(compute_tpl)}"
logger.info(f"Tests uses compute template "
f"with name {compute_tpl_name}. Looking up existing "
f"templates.")
paging_token = None
while not compute_tpl_id:
result = sdk.search_compute_templates(
dict(
project_id=project_id,
name=dict(equals=compute_tpl_name),
include_anonymous=True),
paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == compute_tpl_name:
compute_tpl_id = res.id
logger.info(
f"Template already exists with ID {compute_tpl_id}")
break
if not paging_token:
break
if not compute_tpl_id:
logger.info(f"Compute template not found. "
f"Creating with name {compute_tpl_name}.")
try:
result = sdk.create_compute_template(
dict(
name=compute_tpl_name,
project_id=project_id,
config=compute_tpl))
compute_tpl_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create compute "
f"template: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_compute_template(
sdk=sdk,
project_id=project_id,
compute_tpl=compute_tpl,
_repeat=False)
raise e
logger.info(f"Compute template created with ID {compute_tpl_id}")
return compute_tpl_id, compute_tpl_name
def create_or_find_app_config(
sdk: AnyscaleSDK,
project_id: str,
app_config: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
app_config_id = None
app_config_name = None
if app_config:
app_config_name = f"{project_id}-{_dict_hash(app_config)}"
logger.info(f"Test uses an app config with hash {app_config_name}. "
f"Looking up existing app configs with this name.")
paging_token = None
while not app_config_id:
result = sdk.list_app_configs(
project_id=project_id, count=50, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == app_config_name:
app_config_id = res.id
logger.info(
f"App config already exists with ID {app_config_id}")
break
if not paging_token or app_config_id:
break
if not app_config_id:
logger.info("App config not found. Creating new one.")
try:
result = sdk.create_app_config(
dict(
name=app_config_name,
project_id=project_id,
config_json=app_config))
app_config_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create app "
f"config: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_app_config(
sdk=sdk,
project_id=project_id,
app_config=app_config,
_repeat=False)
raise e
logger.info(f"App config created with ID {app_config_id}")
return app_config_id, app_config_name
def run_bash_script(local_dir: str, bash_script: str):
previous_dir = os.getcwd()
bash_script_local_dir = os.path.dirname(bash_script)
file_name = os.path.basename(bash_script)
full_local_dir = os.path.join(local_dir, bash_script_local_dir)
os.chdir(full_local_dir)
subprocess.run("./" + file_name, shell=True, check=True)
os.chdir(previous_dir)
def install_app_config_packages(app_config: Dict[Any, Any]):
os.environ.update(app_config.get("env_vars", {}))
packages = app_config["python"]["pip_packages"]
for package in packages:
subprocess.check_output(["pip", "install", "-U", package], text=True)
def install_matching_ray():
wheel = os.environ.get("RAY_WHEELS", None)
if not wheel:
return
assert "manylinux2014_x86_64" in wheel, wheel
if sys.platform == "darwin":
platform = "macosx_10_15_intel"
elif sys.platform == "win32":
platform = "win_amd64"
else:
platform = "manylinux2014_x86_64"
wheel = wheel.replace("manylinux2014_x86_64", platform)
subprocess.check_output(["pip", "uninstall", "-y", "ray"], text=True)
subprocess.check_output(["pip", "install", "-U", wheel], text=True)
def wait_for_build_or_raise(sdk: AnyscaleSDK,
app_config_id: Optional[str]) -> Optional[str]:
if not app_config_id:
return None
# Fetch build
build_id = None
last_status = None
result = sdk.list_builds(app_config_id)
for build in sorted(result.results, key=lambda b: b.created_at):
build_id = build.id
last_status = build.status
if build.status == "failed":
continue
if build.status == "succeeded":
logger.info(f"Link to app config build: "
f"{anyscale_app_config_build_url(build_id)}")
return build_id
if last_status == "failed":
raise AppConfigBuildFailure("App config build failed.")
if not build_id:
raise AppConfigBuildFailure("No build found for app config.")
# Build found but not failed/finished yet
completed = False
start_wait = time.time()
next_report = start_wait + REPORT_S
logger.info(f"Waiting for build {build_id} to finish...")
logger.info(f"Track progress here: "
f"{anyscale_app_config_build_url(build_id)}")
while not completed:
now = time.time()
if now > next_report:
logger.info(f"... still waiting for build {build_id} to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
result = sdk.get_build(build_id)
build = result.result
if build.status == "failed":
raise AppConfigBuildFailure(
f"App config build failed. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
if build.status == "succeeded":
logger.info("Build succeeded.")
return build_id
completed = build.status not in ["in_progress", "pending"]
if completed:
raise AppConfigBuildFailure(
f"Unknown build status: {build.status}. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
time.sleep(1)
return build_id
def run_job(cluster_name: str, compute_tpl_name: str, cluster_env_name: str,
job_name: str, min_workers: str, script: str,
script_args: List[str], env_vars: Dict[str, str],
autosuspend: int) -> Tuple[int, str]:
# Start cluster and job
address = f"anyscale://{cluster_name}?autosuspend={autosuspend}"
logger.info(f"Starting job {job_name} with Ray address: {address}")
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
env.update(env_vars)
env["RAY_ADDRESS"] = address
env["RAY_JOB_NAME"] = job_name
env["RAY_RELEASE_MIN_WORKERS"] = str(min_workers)
proc = subprocess.Popen(
script.split(" ") + script_args,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True)
proc.stdout.reconfigure(line_buffering=True)
logs = ""
for line in proc.stdout:
logs += line
sys.stdout.write(line)
proc.wait()
return proc.returncode, logs
def create_and_wait_for_session(
sdk: AnyscaleSDK,
stop_event: multiprocessing.Event,
session_name: str,
session_options: Dict[Any, Any],
project_id: str,
) -> str:
# Create session
logger.info(f"Creating session {session_name}")
result = sdk.create_session(session_options)
session_id = result.result.id
# Trigger session start
logger.info(f"Starting session {session_name} ({session_id})")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result = sdk.start_session(session_id, start_session_options={})
sop_id = result.result.id
completed = result.result.completed
# Wait for session
logger.info(f"Waiting for session {session_name}...")
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
# Sleep 1 sec before next check.
time.sleep(1)
session_operation_response = sdk.get_session_operation(
sop_id, _request_timeout=30)
session_operation = session_operation_response.result
completed = session_operation.completed
_check_stop(stop_event, "session")
now = time.time()
if now > next_report:
logger.info(f"... still waiting for session {session_name} "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
result = sdk.get_session(session_id)
if not result.result.state != "Active":
raise ReleaseTestInfraError(
f"Cluster did not come up - most likely the nodes are currently "
f"not available. Please check the cluster startup logs: "
f"{anyscale_session_url(project_id, session_id)}")
return session_id
def run_session_command(sdk: AnyscaleSDK,
session_id: str,
cmd_to_run: str,
result_queue: multiprocessing.Queue,
env_vars: Dict[str, str],
state_str: str = "CMD_RUN") -> Tuple[str, int]:
full_cmd = " ".join(f"{k}={v}"
for k, v in env_vars.items()) + " " + cmd_to_run
logger.info(f"Running command in session {session_id}: \n" f"{full_cmd}")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result_queue.put(State(state_str, time.time(), None))
result = sdk.create_session_command(
dict(session_id=session_id, shell_command=full_cmd))
scd_id = result.result.id
return scd_id, result
def wait_for_session_command_to_complete(create_session_command_result,
sdk: AnyscaleSDK,
scd_id: str,
stop_event: multiprocessing.Event,
state_str: str = "CMD_RUN"):
result = create_session_command_result
completed = result.result.finished_at is not None
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
# Sleep 1 sec before next check.
time.sleep(1)
result = exponential_backoff_retry(
lambda: sdk.get_session_command(session_command_id=scd_id),
retry_exceptions=Exception,
initial_retry_delay_s=10,
max_retries=3)
completed = result.result.finished_at
if state_str == "CMD_RUN":
_check_stop(stop_event, "command")
elif state_str == "CMD_PREPARE":
_check_stop(stop_event, "prepare_command")
now = time.time()
if now > next_report:
logger.info(f"... still waiting for command to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
status_code = result.result.status_code
runtime = time.time() - start_wait
if status_code != 0:
if state_str == "CMD_RUN":
raise RuntimeError(
f"Command returned non-success status: {status_code}")
elif state_str == "CMD_PREPARE":
raise PrepareCommandRuntimeError(
f"Prepare command returned non-success status: {status_code}")
return status_code, runtime
def get_command_logs(session_controller: SessionController,
scd_id: str,
lines: int = 50):
result = exponential_backoff_retry(
lambda: session_controller.api_client.get_execution_logs_api_v2_session_commands_session_command_id_execution_logs_get( # noqa: E501
session_command_id=scd_id,
start_line=-1 * lines,
end_line=0),
retry_exceptions=Exception,
initial_retry_delay_s=10,
max_retries=3)
return result.result.lines
def get_remote_json_content(
temp_dir: str,
session_name: str,
remote_file: Optional[str],
session_controller: SessionController,
):
if not remote_file:
logger.warning("No remote file specified, returning empty dict")
return {}
local_target_file = os.path.join(temp_dir, ".tmp.json")
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
with open(local_target_file, "rt") as f:
return json.load(f)
def get_local_json_content(local_file: Optional[str], ):
if not local_file:
logger.warning("No local file specified, returning empty dict")
return {}
with open(local_file, "rt") as f:
return json.load(f)
def pull_artifacts_and_store_in_cloud(
temp_dir: str,
logs: str,
session_name: str,
test_name: str,
artifacts: Optional[Dict[Any, Any]],
session_controller: SessionController,
):
output_log_file = os.path.join(temp_dir, "output.log")
with open(output_log_file, "wt") as f:
f.write(logs)
bucket = GLOBAL_CONFIG["RELEASE_AWS_BUCKET"]
location = f"{GLOBAL_CONFIG['RELEASE_AWS_LOCATION']}" \
f"/{session_name}/{test_name}"
saved_artifacts = {}
s3_client = boto3.client("s3")
s3_client.upload_file(output_log_file, bucket, f"{location}/output.log")
saved_artifacts["output.log"] = f"s3://{bucket}/{location}/output.log"
# Download artifacts
if artifacts:
for name, remote_file in artifacts.items():
logger.info(f"Downloading artifact `{name}` from "
f"{remote_file}")
local_target_file = os.path.join(temp_dir, name)
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
# Upload artifacts to s3
s3_client.upload_file(local_target_file, bucket,
f"{location}/{name}")
saved_artifacts[name] = f"s3://{bucket}/{location}/{name}"
return saved_artifacts
def find_session_by_test_name(
sdk: AnyscaleSDK,
session_controller: SessionController,
temp_dir: str,
state_json: str,
project_id: str,
test_name: str,
) -> Optional[Tuple[str, str, Dict[Any, Any]]]:
paging_token = None
while True: # Will break if paging_token is None after first search
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(
name=dict(contains=test_name),
state_filter=["Running"],
paging=dict(count=20, paging_token=paging_token)))
for session in result.results:
logger.info(f"Found sessions {session.name}")
if not session.name.startswith(test_name):
continue
try:
session_state = get_remote_json_content(
temp_dir=temp_dir,
session_name=session.name,
remote_file=state_json,
session_controller=session_controller)
except Exception as exc:
raise RuntimeError(f"Could not get remote json content "
f"for session {session.name}") from exc
if session_state.get("test_name") == test_name:
return session.id, session.name, session_state
session_token = result.metadata.next_paging_token
if not session_token:
return None
def get_latest_running_command_id(sdk: AnyscaleSDK, session_id: str
) -> Tuple[Optional[str], Optional[bool]]:
scd_id = None
paging_token = None
success = None
while not scd_id:
result = sdk.list_session_commands(
session_id=session_id, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for cmd in result.results:
if not scd_id:
scd_id = cmd.id
completed = cmd.finished_at is not None
if completed:
if success is None:
success = True
success = success and cmd.status_code == 0
if not completed:
return cmd.id, None
return scd_id, success or False
def run_test_config(
local_dir: str,
project_id: str,
test_name: str,
test_config: Dict[Any, Any],
commit_url: str,
session_name: str = None,
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress: bool = False,
upload_artifacts: bool = True,
keep_results_dir: bool = False,
app_config_id_override: Optional[str] = None,
) -> Dict[Any, Any]:
"""
Returns:
Dict with the following entries:
status (str): One of [finished, error, timeout]
command_link (str): Link to command (Anyscale web UI)
last_logs (str): Last logs (excerpt) to send to owner
artifacts (dict): Dict of artifacts
Key: Name
Value: S3 URL
"""
stop_event = multiprocessing.Event()
result_queue = multiprocessing.Queue()
if not session_name:
session_name = f"{test_name}_{int(time.time())}"
temp_dir = tempfile.mkdtemp()
# Result and state files
results_json = test_config["run"].get("results", None)
if results_json is None:
results_json = "/tmp/release_test_out.json"
state_json = test_config["run"].get("state", None)
if state_json is None:
state_json = "/tmp/release_test_state.json"
env_vars = {
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto"),
"TEST_OUTPUT_JSON": results_json,
"TEST_STATE_JSON": state_json,
"IS_SMOKE_TEST": "1" if smoke_test else "0",
}
with open(os.path.join(local_dir, ".anyscale.yaml"), "wt") as f:
f.write(f"project_id: {project_id}")
os.chdir(local_dir)
# Setup interface
# Unfortunately, there currently seems to be no great way to
# transfer files with the Anyscale SDK.
# So we use the session controller instead.
sdk = AnyscaleSDK(auth_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"])
session_controller = SessionController(
api_client=instantiate_api_client(
cli_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"],
host=GLOBAL_CONFIG["ANYSCALE_HOST"],
),
anyscale_api_client=sdk.api_client,
)
cloud_id = test_config["cluster"].get("cloud_id", None)
cloud_name = test_config["cluster"].get("cloud_name", None)
if cloud_id and cloud_name:
raise RuntimeError(
f"You can't supply both a `cloud_name` ({cloud_name}) and a "
f"`cloud_id` ({cloud_id}) in the test cluster configuration. "
f"Please provide only one.")
elif cloud_name and not cloud_id:
cloud_id = find_cloud_by_name(sdk, cloud_name)
if not cloud_id:
raise RuntimeError(
f"Couldn't find cloud with name `{cloud_name}`.")
else:
cloud_id = cloud_id or GLOBAL_CONFIG["ANYSCALE_CLOUD_ID"]
# Overwrite global config so that `_load_config` sets the correct cloud
GLOBAL_CONFIG["ANYSCALE_CLOUD_ID"] = cloud_id
cluster_config_rel_path = test_config["cluster"].get(
"cluster_config", None)
cluster_config = _load_config(local_dir, cluster_config_rel_path)
app_config_rel_path = test_config["cluster"].get("app_config", None)
app_config = _load_config(local_dir, app_config_rel_path)
compute_tpl_rel_path = test_config["cluster"].get("compute_template", None)
compute_tpl = _load_config(local_dir, compute_tpl_rel_path)
timeout = test_config["run"].get("timeout", 1800)
if "RELEASE_OVERRIDE_TIMEOUT" in os.environ:
previous_timeout = timeout
timeout = int(os.environ.get("RELEASE_OVERRIDE_TIMEOUT", str(timeout)))
logger.warning(f"Release test timeout override: {timeout} "
f"(would have been {previous_timeout})")
# If a test is long running, timeout does not mean it failed
is_long_running = test_config["run"].get("long_running", False)
build_id_override = None
if test_config["run"].get("use_connect"):
autosuspend_mins = test_config["run"].get("autosuspend_mins", 5)
assert not kick_off_only, \
"Unsupported for running with Anyscale connect."
if app_config_id_override is not None:
logger.info(
"Using connect and an app config override, waiting until "
"build finishes so we can fetch the app config in order to "
"install its pip packages locally.")
build_id_override = wait_for_build_or_raise(
sdk, app_config_id_override)
response = sdk.get_cluster_environment_build(build_id_override)
app_config = response.result.config_json
install_app_config_packages(app_config)
install_matching_ray()
elif "autosuspend_mins" in test_config["run"]:
raise ValueError(
"'autosuspend_mins' is only supported if 'use_connect' is True.")
# Only wrap pip packages after we installed the app config packages
_wrap_app_config_pip_installs(app_config)
# Add information to results dict
def _update_results(results: Dict):
if "last_update" in results:
results["last_update_diff"] = time.time() - results["last_update"]
if smoke_test:
results["smoke_test"] = True
def _process_finished_command(session_controller: SessionController,
scd_id: str,
results: Optional[Dict] = None,
runtime: int = None,
commit_url: str = None,
session_url: str = None):
logger.info("Command finished successfully.")
if results_json:
results = results or get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
else:
results = {"passed": 1}
_update_results(results)
if scd_id:
try:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
except Exception as e:
raise ReleaseTestInfraError(
f"Could not fetch command logs: {e}. This is an "
f"infrastructure error on the Anyscale side.")
else:
logs = "No command found to fetch logs for"
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=test_config.get("artifacts", {}),
session_controller=session_controller,
)
logger.info("Fetched results and stored on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
# Add these metadata here to avoid changing SQL schema.
results["_runtime"] = runtime
results["_session_url"] = session_url
results["_commit_url"] = commit_url
results["_stable"] = test_config.get("stable", True)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
# When running the test script in client mode, the finish command is a
# completed local process.
def _process_finished_client_command(returncode: int, logs: str):
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=None,
session_controller=None,
)
logger.info("Stored results on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
if results_json:
results = get_local_json_content(local_file=results_json, )
else:
results = {
"passed": int(returncode == 0),
}
results["returncode"] = returncode
_update_results(results)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
def _run(logger):
# These values will be set as the test runs.
session_url = None
runtime = None
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
test_uses_ray_connect = test_config["run"].get("use_connect")
session_id = None
scd_id = None
try:
# First, look for running sessions
session_id = search_running_session(sdk, project_id, session_name)
compute_tpl_name = None
app_config_id = app_config_id_override
app_config_name = None
build_id = build_id_override
if not session_id:
logger.info("No session found.")
# Start session
session_options = dict(
name=session_name, project_id=project_id)
if cluster_config is not None:
logging.info("Starting session with cluster config")
cluster_config_str = json.dumps(cluster_config)
session_options["cluster_config"] = cluster_config_str
session_options["cloud_id"] = cloud_id
session_options["uses_app_config"] = False
else:
logging.info("Starting session with app/compute config")
# Find/create compute template
compute_tpl_id, compute_tpl_name = \
create_or_find_compute_template(
sdk, project_id, compute_tpl)
logger.info(f"Link to compute template: "
f"{anyscale_compute_tpl_url(compute_tpl_id)}")
# Find/create app config
if app_config_id is None:
(
app_config_id,
app_config_name,
) = create_or_find_app_config(sdk, project_id,
app_config)
else:
logger.info(
f"Using override app config {app_config_id}")
app_config_name = sdk.get_app_config(
app_config_id).result.name
if build_id is None:
# We might have already retrieved the build ID when
# installing app config packages locally if using
# connect, so only get the build ID if it's not set.
build_id = wait_for_build_or_raise(sdk, app_config_id)
session_options["compute_template_id"] = compute_tpl_id
session_options["build_id"] = build_id
session_options["uses_app_config"] = True
# Start session
session_id = create_and_wait_for_session(
sdk=sdk,
stop_event=stop_event,
session_name=session_name,
session_options=session_options,
project_id=project_id,
)
prepare_command = test_config["run"].get("prepare")
# Write test state json
test_state_file = os.path.join(local_dir, "test_state.json")
with open(test_state_file, "wt") as f:
json.dump({
"start_time": time.time(),
"test_name": test_name
}, f)
if prepare_command or not test_uses_ray_connect:
if test_uses_ray_connect:
logger.info("Found a prepare command, so pushing it "
"to the session.")
# Rsync up
logger.info("Syncing files to session...")
session_controller.push(
session_name=session_name,
source=None,
target=None,
config=None,
all_nodes=False,
)
logger.info("Syncing test state to session...")
session_controller.push(
session_name=session_name,
source=test_state_file,
target=state_json,
config=None,
all_nodes=False,
)
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
session_id=session_id)
_check_stop(stop_event, "file_sync")
# Optionally run preparation command
if prepare_command:
logger.info(
f"Running preparation command: {prepare_command}")
scd_id, result = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=prepare_command,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_PREPARE")
_, _ = wait_for_session_command_to_complete(
result,
sdk=sdk,
scd_id=scd_id,
stop_event=stop_event,
state_str="CMD_PREPARE")
if test_uses_ray_connect:
script_args = test_config["run"].get("args", [])
if smoke_test:
script_args += ["--smoke-test"]
min_workers = 0
for node_type in compute_tpl["worker_node_types"]:
min_workers += node_type["min_workers"]
# Build completed, use job timeout
result_queue.put(State("CMD_RUN", time.time(), None))
returncode, logs = run_job(
cluster_name=session_name,
compute_tpl_name=compute_tpl_name,
cluster_env_name=app_config_name,
job_name=session_name,
min_workers=min_workers,
script=test_config["run"]["script"],
script_args=script_args,
env_vars=env_vars,
autosuspend=autosuspend_mins)
_process_finished_client_command(returncode, logs)
return
# Run release test command
cmd_to_run = test_config["run"]["script"] + " "
args = test_config["run"].get("args", [])
if args:
cmd_to_run += " ".join(args) + " "
if smoke_test:
cmd_to_run += " --smoke-test"
scd_id, result = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=cmd_to_run,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_RUN")
if not kick_off_only:
_, runtime = wait_for_session_command_to_complete(
result,
sdk=sdk,
scd_id=scd_id,
stop_event=stop_event,
state_str="CMD_RUN")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
runtime=runtime,
session_url=session_url,
commit_url=commit_url)
else:
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": ""
}))
except (ReleaseTestTimeoutError, Exception) as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = logs + "; Command logs:" + get_command_logs(
session_controller, scd_id,
test_config.get("log_lines", 50))
except Exception as e2:
logger.error(e2, exc_info=True)
# Long running tests are "finished" successfully when
# timed out
if isinstance(e, ReleaseTestTimeoutError) and is_long_running:
_process_finished_command(
session_controller=session_controller, scd_id=scd_id)
else:
runtime = None
if isinstance(e, CommandTimeoutError):
error_type = "timeout"
runtime = 0
exit_code = ExitCode.COMMAND_TIMEOUT
elif isinstance(e, PrepareCommandTimeoutError):
error_type = "infra_timeout"
runtime = None
exit_code = ExitCode.PREPARE_TIMEOUT
elif isinstance(e, FileSyncTimeoutError):
error_type = "infra_timeout"
runtime = None
exit_code = ExitCode.FILESYNC_TIMEOUT
elif isinstance(e, SessionTimeoutError):
error_type = "infra_timeout"
runtime = None
exit_code = ExitCode.SESSION_TIMEOUT
elif isinstance(e, PrepareCommandRuntimeError):
error_type = "infra_timeout"
runtime = None
exit_code = ExitCode.PREPARE_ERROR
elif isinstance(e, AppConfigBuildFailure):
error_type = "infra_timeout"
runtime = None
exit_code = ExitCode.APPCONFIG_BUILD_ERROR
elif isinstance(e, ReleaseTestInfraError):
error_type = "infra_error"
exit_code = ExitCode.INFRA_ERROR
elif isinstance(e, RuntimeError):
error_type = "runtime_error"
runtime = 0
exit_code = ExitCode.RUNTIME_ERROR
else:
error_type = "unknown timeout"
runtime = None
exit_code = ExitCode.UNKNOWN
# Add these metadata here to avoid changing SQL schema.
results = {}
results["_runtime"] = runtime
results["_session_url"] = session_url
results["_commit_url"] = commit_url
results["_stable"] = test_config.get("stable", True)
result_queue.put(
State(
"END", time.time(), {
"status": error_type,
"last_logs": logs,
"results": results,
"exit_code": exit_code.value
}))
finally:
if no_terminate:
logger.warning(
"`no_terminate` is set to True, so the session will "
"*not* be terminated!")
else:
_cleanup_session(sdk, session_id)
def _check_progress(logger):
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
should_terminate = False
session_id = None
scd_id = None
try:
existing_session = find_session_by_test_name(
sdk=sdk,
session_controller=session_controller,
temp_dir=temp_dir,
state_json=state_json,
project_id=project_id,
test_name=test_name)
if existing_session is None:
logger.info(f"Found no existing session for {test_name}")
result_queue.put(
State("END", time.time(), {
"status": "nosession",
"last_logs": ""
}))
return
session_id, session_name, session_state = existing_session
logger.info(f"Found existing session for {test_name}: "
f"{session_name}")
scd_id, success = get_latest_running_command_id(
sdk=sdk, session_id=session_id)
latest_result = get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
# Fetch result json and check if it has been updated recently
result_time_key = test_config["run"].get("time_key", None)
maximum_update_delay = test_config["run"].get(
"max_update_delay", None)
if result_time_key and maximum_update_delay:
last_update = latest_result.get(result_time_key, None)
if not last_update:
result_queue.put(
State(
"END", time.time(), {
"status": "error",
"last_logs": f"Test did not store "
f"{result_time_key} in the "
f"results json."
}))
return
delay = time.time() - last_update
logger.info(f"Last update was at {last_update:.2f}. "
f"This was {delay:.2f} seconds ago "
f"(maximum allowed: {maximum_update_delay})")
if delay > maximum_update_delay:
raise RuntimeError(
f"Test did not update the results json within "
f"the last {maximum_update_delay} seconds.")
if time.time() - session_state["start_time"] > timeout:
# Long running test reached timeout
logger.info(
f"Test command reached timeout after {timeout} seconds")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
elif success:
logger.info("All commands finished.")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
else:
rest_time = timeout - time.time() + session_state["start_time"]
logger.info(f"Test command should continue running "
f"for {rest_time} seconds")
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": "Test is still running"
}))
except Exception as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
logs += f"\n{str(e)}"
except Exception as e2:
logger.error(e2, exc_info=True)
result_queue.put(
State("END", time.time(), {
"status": "error",
"last_logs": logs
}))
should_terminate = True
finally:
if should_terminate:
logger.warning("Terminating session")
_cleanup_session(sdk, session_id)
if not check_progress:
process = multiprocessing.Process(target=_run, args=(logger, ))
else:
process = multiprocessing.Process(
target=_check_progress, args=(logger, ))
build_timeout = test_config["run"].get("build_timeout", 1800)
prepare_timeout = test_config["run"].get("prepare_timeout", timeout)
project_url = anyscale_project_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"])
logger.info(f"Link to project: {project_url}")
msg = f"This will now run test {test_name}."
if smoke_test:
msg += " This is a smoke test."
if is_long_running:
msg += " This is a long running test."
logger.info(msg)
logger.info(f"Starting process with timeout {timeout} "
f"(prepare timeout {prepare_timeout}, "
f"build timeout {build_timeout})")
process.start()
# The timeout time will be updated after the build finished
# Build = App config + compute template build and session start
timeout_time = time.time() + build_timeout
result = {}
while process.is_alive():
try:
state: State = result_queue.get(timeout=1)
except (Empty, TimeoutError):
if time.time() > timeout_time:
stop_event.set()
logger.warning("Process timed out.")
if not is_long_running:
logger.warning("Terminating process in 10 seconds.")
time.sleep(10)
logger.warning("Terminating process now.")
process.terminate()
else:
logger.info("Process is long running. Give 2 minutes to "
"fetch result and terminate.")
start_terminate = time.time()
while time.time(
) < start_terminate + 120 and process.is_alive():
time.sleep(1)
if process.is_alive():
logger.warning("Terminating forcefully now.")
process.terminate()
else:
logger.info("Long running results collected.")
break
continue
if not isinstance(state, State):
raise RuntimeError(f"Expected `State` object, got {result}")
if state.state == "CMD_PREPARE":
# Reset timeout after build finished
timeout_time = state.timestamp + prepare_timeout
if state.state == "CMD_RUN":
# Reset timeout after prepare command or build finished
timeout_time = state.timestamp + timeout
elif state.state == "END":
result = state.data
break
while not result_queue.empty():
state = result_queue.get_nowait()
result = state.data
logger.info("Final check if everything worked.")
try:
result.setdefault("status", "error (status not found)")
except (TimeoutError, Empty):
result = {"status": "timeout", "last_logs": "Test timed out."}
logger.info(f"Final results: {result}")
log_results_and_artifacts(result)
if not keep_results_dir:
logger.info(f"Removing results dir {temp_dir}")
shutil.rmtree(temp_dir)
else:
# Write results.json
with open(os.path.join(temp_dir, "results.json"), "wt") as fp:
json.dump(result, fp)
out_dir = os.path.expanduser(GLOBAL_CONFIG["RELEASE_RESULTS_DIR"])
logger.info(f"Moving results dir {temp_dir} to persistent location "
f"{out_dir}")
shutil.rmtree(out_dir, ignore_errors=True)
shutil.copytree(temp_dir, out_dir)
logger.info(f"Dir contents: {os.listdir(out_dir)}")
return result
def run_test(test_config_file: str,
test_name: str,
project_id: str,
commit_url: str,
category: str = "unspecified",
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress: bool = False,
report: bool = True,
keep_results_dir: bool = False,
session_name: Optional[str] = None,
app_config_id_override=None) -> Dict[str, Any]:
with open(test_config_file, "rt") as f:
test_configs = yaml.safe_load(f)
test_config_dict = {}
for test_config in test_configs:
name = test_config.pop("name")
test_config_dict[name] = test_config
if test_name not in test_config_dict:
raise ValueError(
f"Test with name `{test_name}` not found in test config file "
f"at `{test_config_file}`.")
test_config = test_config_dict[test_name]
if smoke_test and "smoke_test" in test_config:
smoke_test_config = test_config.pop("smoke_test")
test_config = _deep_update(test_config, smoke_test_config)
local_dir = os.path.dirname(test_config_file)
if "local_dir" in test_config:
# local_dir is relative to test_config_file
local_dir = os.path.join(local_dir, test_config["local_dir"])
if test_config["run"].get("use_connect"):
assert not kick_off_only, \
"--kick-off-only is unsupported when running with " \
"Anyscale connect."
assert not check_progress, \
"--check is unsupported when running with Anyscale connect."
if test_config.get("artifacts", {}):
logger.error(
"Saving artifacts are not yet supported when running with "
"Anyscale connect.")
# Perform necessary driver side setup.
driver_setup_script = test_config.get("driver_setup", None)
if driver_setup_script:
run_bash_script(local_dir, driver_setup_script)
result = run_test_config(
local_dir,
project_id,
test_name,
test_config,
commit_url,
session_name=session_name,
smoke_test=smoke_test,
no_terminate=no_terminate,
kick_off_only=kick_off_only,
check_progress=check_progress,
upload_artifacts=report,
keep_results_dir=keep_results_dir,
app_config_id_override=app_config_id_override)
status = result.get("status", "invalid")
if kick_off_only:
if status != "kickoff":
raise RuntimeError("Error kicking off test.")
logger.info("Kicked off test. It's now up to the `--check` "
"part of the script to track its process.")
return {}
else:
# `--check` or no kick off only
if status == "nosession":
logger.info(f"No running session found for test {test_name}, so "
f"assuming everything is fine.")
return {}
if status == "kickoff":
logger.info(f"Test {test_name} is still running.")
return {}
last_logs = result.get("last_logs", "No logs.")
test_suite = os.path.basename(test_config_file).replace(".yaml", "")
report_kwargs = dict(
test_suite=test_suite,
test_name=test_name,
status=status,
last_logs=last_logs,
results=result.get("results", {}),
artifacts=result.get("artifacts", {}),
category=category,
)
if not has_errored(result):
# Check if result are met if test succeeded
alert = maybe_get_alert_for_result(report_kwargs)
if alert:
# If we get an alert, the test failed.
logger.error(f"Alert has been raised for "
f"{test_suite}/{test_name} "
f"({category}): {alert}")
result["status"] = "error (alert raised)"
report_kwargs["status"] = "error (alert raised)"
# For printing/reporting to the database
report_kwargs["last_logs"] = alert
last_logs = alert
else:
logger.info(f"No alert raised for test "
f"{test_suite}/{test_name} "
f"({category}) - the test successfully passed!")
if report:
try:
report_result(**report_kwargs)
except Exception as e:
# On database error the test should still pass
# Todo: flag somewhere else?
logger.error(f"Error persisting results to database: {e}")
else:
logger.info(f"Usually I would now report the following results:\n"
f"{report_kwargs}")
if has_errored(result):
# If the script terminates due to an uncaught error, it
# will return exit code 1, so we use 2 per default to
# catch these cases.
exit_code = result.get("exit_code", ExitCode.UNSPECIFIED.value)
logger.error(last_logs)
sys.exit(exit_code)
return report_kwargs
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--test-config", type=str, required=True, help="Test config file")
parser.add_argument("--test-name", type=str, help="Test name in config")
parser.add_argument(
"--ray-wheels", required=False, type=str, help="URL to ray wheels")
parser.add_argument(
"--no-terminate",
action="store_true",
default=False,
help="Don't terminate session after failure")
parser.add_argument(
"--report",
action="store_true",
default=False,
help="Do not report any results or upload to S3")
parser.add_argument(
"--kick-off-only",
action="store_true",
default=False,
help="Kick off only (don't wait for command to finish)")
parser.add_argument(
"--check",
action="store_true",
default=False,
help="Check (long running) status")
parser.add_argument(
"--keep-results-dir",
action="store_true",
default=False,
help="Keep results in directory (named RELEASE_RESULTS_DIR), e.g. "
"for Buildkite artifact upload.")
parser.add_argument(
"--category",
type=str,
default="unspecified",
help="Category name, e.g. `release-1.3.0` (will be saved in database)")
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--session-name",
required=False,
type=str,
help="Name of the session to run this test.")
parser.add_argument(
"--app-config-id-override",
required=False,
type=str,
help=("An app config ID, which will override the test config app "
"config."))
args, _ = parser.parse_known_args()
if not GLOBAL_CONFIG["ANYSCALE_PROJECT"]:
raise RuntimeError(
"You have to set the ANYSCALE_PROJECT environment variable!")
ray_wheels = args.ray_wheels or os.environ.get("RAY_WHEELS", "")
maybe_fetch_api_token()
if ray_wheels:
logger.info(f"Using Ray wheels provided from URL/commit: "
f"{ray_wheels}")
url = commit_or_url(str(ray_wheels))
logger.info(f"Resolved url link is: {url}")
# Overwrite with actual URL
os.environ["RAY_WHEELS"] = url
elif not args.check:
url = find_ray_wheels(
GLOBAL_CONFIG["RAY_REPO"],
GLOBAL_CONFIG["RAY_BRANCH"],
GLOBAL_CONFIG["RAY_VERSION"],
)
if not url:
raise RuntimeError(f"Could not find wheels for "
f"Ray {GLOBAL_CONFIG['RAY_VERSION']}, "
f"branch {GLOBAL_CONFIG['RAY_BRANCH']}")
# RAY_COMMIT is set by commit_or_url and find_ray_wheels
populate_wheels_sanity_check(os.environ.get("RAY_COMMIT", ""))
test_config_file = os.path.abspath(os.path.expanduser(args.test_config))
# Override it from the global variable.
report = GLOBAL_CONFIG["REPORT_RESULT"]
if report.lower() == "1" or report.lower() == "true":
report = True
else:
report = args.report
run_test(
test_config_file=test_config_file,
test_name=args.test_name,
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
commit_url=url,
category=args.category,
smoke_test=args.smoke_test,
no_terminate=args.no_terminate or args.kick_off_only,
kick_off_only=args.kick_off_only,
check_progress=args.check,
report=report,
session_name=args.session_name,
keep_results_dir=args.keep_results_dir,
app_config_id_override=args.app_config_id_override,
)
|
python.thread.py
|
import threading
import time
event = threading.Event()
def lighter():
count = 0
event.clear() # 初始者为绿灯
while True:
if 5 < count <= 10:
event.clear() # 红灯,清除标志位
print("red light is on... ")
elif count > 10:
event.set() # 绿灯,设置标志位
count = 0
else:
print("green light is on... ")
time.sleep(1)
count += 1
def car(name):
while True:
if event.is_set(): # 判断是否设置了标志位
print('[%s] running.....' % name)
time.sleep(1)
else:
print('[%s] sees red light,waiting...' % name)
event.wait()
print('[%s] green light is on,start going...' % name)
startTime = time.time()
light = threading.Thread(target=lighter,)
light.start()
car = threading.Thread(target=car, args=('Car',))
car.start()
endTime = time.time()
print('Time Cost:', endTime-startTime)
# sudo py-spy record -o python.thread.svg -- python python.thread.py
|
wait_for_topics.py
|
# Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import string
from threading import Event
from threading import Thread
import rclpy
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
class WaitForTopics:
"""
Wait to receive messages on supplied topics.
Example usage:
--------------
from std_msgs.msg import String
# Method 1, using the 'with' keyword
def method_1():
topic_list = [('topic_1', String), ('topic_2', String)]
with WaitForTopics(topic_list, timeout=5.0):
# 'topic_1' and 'topic_2' received at least one message each
print('Given topics are receiving messages !')
# Method 2, calling wait() and shutdown() manually
def method_2():
topic_list = [('topic_1', String), ('topic_2', String)]
wait_for_topics = WaitForTopics(topic_list, timeout=5.0)
assert wait_for_topics.wait()
print('Given topics are receiving messages !')
print(wait_for_topics.topics_not_received()) # Should be an empty set
print(wait_for_topics.topics_received()) # Should be {'topic_1', 'topic_2'}
wait_for_topics.shutdown()
"""
def __init__(self, topic_tuples, timeout=5.0):
self.topic_tuples = topic_tuples
self.timeout = timeout
self.__ros_context = rclpy.Context()
rclpy.init(context=self.__ros_context)
self.__ros_executor = SingleThreadedExecutor(context=self.__ros_context)
self._prepare_ros_node()
# Start spinning
self.__running = True
self.__ros_spin_thread = Thread(target=self._spin_function)
self.__ros_spin_thread.start()
def _prepare_ros_node(self):
node_name = '_test_node_' +\
''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
self.__ros_node = _WaitForTopicsNode(name=node_name, node_context=self.__ros_context)
self.__ros_executor.add_node(self.__ros_node)
def _spin_function(self):
while self.__running:
self.__ros_executor.spin_once(1.0)
def wait(self):
self.__ros_node.start_subscribers(self.topic_tuples)
return self.__ros_node.msg_event_object.wait(self.timeout)
def shutdown(self):
self.__running = False
self.__ros_spin_thread.join()
self.__ros_node.destroy_node()
rclpy.shutdown(context=self.__ros_context)
def topics_received(self):
"""Topics that received at least one message."""
return self.__ros_node.received_topics
def topics_not_received(self):
"""Topics that did not receive any messages."""
return self.__ros_node.expected_topics - self.__ros_node.received_topics
def __enter__(self):
if not self.wait():
raise RuntimeError('Did not receive messages on these topics: ',
self.topics_not_received())
return self
def __exit__(self, exep_type, exep_value, trace):
if exep_type is not None:
raise Exception('Exception occured, value: ', exep_value)
self.shutdown()
class _WaitForTopicsNode(Node):
"""Internal node used for subscribing to a set of topics."""
def __init__(self, name='test_node', node_context=None):
super().__init__(node_name=name, context=node_context)
self.msg_event_object = Event()
def start_subscribers(self, topic_tuples):
self.subscriber_list = []
self.expected_topics = {name for name, _ in topic_tuples}
self.received_topics = set()
for topic_name, topic_type in topic_tuples:
# Create a subscriber
self.subscriber_list.append(
self.create_subscription(
topic_type,
topic_name,
self.callback_template(topic_name),
10
)
)
def callback_template(self, topic_name):
def topic_callback(data):
if topic_name not in self.received_topics:
self.get_logger().debug('Message received for ' + topic_name)
self.received_topics.add(topic_name)
if self.received_topics == self.expected_topics:
self.msg_event_object.set()
return topic_callback
|
test_collection_count.py
|
import pdb
import pytest
import logging
import itertools
from time import sleep
import threading
from multiprocessing import Process
from milvus import IndexType, MetricType
from utils import *
dim = 128
index_file_size = 10
add_time_interval = 3
tag = "1970-01-01"
nb = 6000
class TestCollectionCount:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
20000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_collection_rows_count(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(collection_name=collection, records=vectors)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb
def test_collection_rows_count_partition(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection, create partition and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
assert status.OK()
res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=tag)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb
def test_collection_rows_count_multi_partitions_A(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection, create partitions and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
new_tag = "new_tag"
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
status = connect.create_partition(collection, new_tag)
assert status.OK()
res = connect.add_vectors(collection_name=collection, records=vectors)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb
def test_collection_rows_count_multi_partitions_B(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection, create partitions and add vectors in one of the partitions,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
new_tag = "new_tag"
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
status = connect.create_partition(collection, new_tag)
assert status.OK()
res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=tag)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb
def test_collection_rows_count_multi_partitions_C(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection, create partitions and add vectors in one of the partitions,
assert the value returned by count_collection method is equal to length of vectors
expected: the collection count is equal to the length of vectors
'''
new_tag = "new_tag"
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
status = connect.create_partition(collection, new_tag)
assert status.OK()
res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=tag)
res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=new_tag)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb * 2
def test_collection_rows_count_after_index_created(self, connect, collection, get_simple_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
nb = 100
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(collection_name=collection, records=vectors)
connect.flush([collection])
connect.create_index(collection, index_type, index_param)
status, res = connect.count_collection(collection)
assert res == nb
@pytest.mark.level(2)
def test_count_without_connection(self, collection, dis_connect):
'''
target: test count_collection, without connection
method: calling count_collection with correct params, with a disconnected instance
expected: count_collection raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.count_collection(collection)
def test_collection_rows_count_no_vectors(self, connect, collection):
'''
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_collection method is equal to 0
expected: the count is equal to 0
'''
collection_name = gen_unique_str()
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_collection(param)
status, res = connect.count_collection(collection)
assert res == 0
# TODO: enable
@pytest.mark.level(2)
@pytest.mark.timeout(20)
def _test_collection_rows_count_multiprocessing(self, connect, collection, args):
'''
target: test collection rows_count is correct or not with multiprocess
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 2
vectors = gen_vectors(nq, dim)
res = connect.add_vectors(collection_name=collection, records=vectors)
time.sleep(add_time_interval)
def rows_count(milvus):
status, res = milvus.count_collection(collection)
logging.getLogger().info(status)
assert res == nq
process_num = 8
processes = []
for i in range(process_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
p = Process(target=rows_count, args=(milvus, ))
processes.append(p)
p.start()
logging.getLogger().info(p)
for p in processes:
p.join()
def test_collection_rows_count_multi_collections(self, connect):
'''
target: test collection rows_count is correct or not with multiple collections of L2
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str()
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
res = connect.add_vectors(collection_name=collection_name, records=vectors)
connect.flush(collection_list)
for i in range(20):
status, res = connect.count_collection(collection_list[i])
assert status.OK()
assert res == nq
class TestCollectionCountIP:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
20000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_collection_rows_count(self, connect, ip_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(collection_name=ip_collection, records=vectors)
connect.flush([ip_collection])
status, res = connect.count_collection(ip_collection)
assert res == nb
def test_collection_rows_count_after_index_created(self, connect, ip_collection, get_simple_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
nb = 100
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(collection_name=ip_collection, records=vectors)
connect.flush([ip_collection])
connect.create_index(ip_collection, index_type, index_param)
status, res = connect.count_collection(ip_collection)
assert res == nb
@pytest.mark.level(2)
def test_count_without_connection(self, ip_collection, dis_connect):
'''
target: test count_collection, without connection
method: calling count_collection with correct params, with a disconnected instance
expected: count_collection raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.count_collection(ip_collection)
def test_collection_rows_count_no_vectors(self, connect, ip_collection):
'''
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_collection method is equal to 0
expected: the count is equal to 0
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_collection(param)
status, res = connect.count_collection(ip_collection)
assert res == 0
# TODO: enable
@pytest.mark.timeout(60)
def _test_collection_rows_count_multiprocessing(self, connect, ip_collection, args):
'''
target: test collection rows_count is correct or not with multiprocess
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 2
vectors = gen_vectors(nq, dim)
res = connect.add_vectors(collection_name=ip_collection, records=vectors)
time.sleep(add_time_interval)
def rows_count(milvus):
status, res = milvus.count_collection(ip_collection)
logging.getLogger().info(status)
assert res == nq
process_num = 8
processes = []
for i in range(process_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
p = Process(target=rows_count, args=(milvus,))
processes.append(p)
p.start()
logging.getLogger().info(p)
for p in processes:
p.join()
def test_collection_rows_count_multi_collections(self, connect):
'''
target: test collection rows_count is correct or not with multiple collections of IP
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_collection_rows_count_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
res = connect.add_vectors(collection_name=collection_name, records=vectors)
connect.flush(collection_list)
for i in range(20):
status, res = connect.count_collection(collection_list[i])
assert status.OK()
assert res == nq
class TestCollectionCountJAC:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
20000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
def test_collection_rows_count(self, connect, jac_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=jac_collection, records=vectors)
connect.flush([jac_collection])
status, res = connect.count_collection(jac_collection)
assert res == nb
def test_collection_rows_count_after_index_created(self, connect, jac_collection, get_jaccard_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
nb = 100
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=jac_collection, records=vectors)
connect.flush([jac_collection])
connect.create_index(jac_collection, index_type, index_param)
status, res = connect.count_collection(jac_collection)
assert res == nb
@pytest.mark.level(2)
def test_count_without_connection(self, jac_collection, dis_connect):
'''
target: test count_collection, without connection
method: calling count_collection with correct params, with a disconnected instance
expected: count_collection raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.count_collection(jac_collection)
def test_collection_rows_count_no_vectors(self, connect, jac_collection):
'''
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_collection method is equal to 0
expected: the count is equal to 0
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_collection(param)
status, res = connect.count_collection(jac_collection)
assert res == 0
def test_collection_rows_count_multi_collections(self, connect):
'''
target: test collection rows_count is correct or not with multiple collections of IP
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
tmp, vectors = gen_binary_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_collection_rows_count_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.JACCARD}
connect.create_collection(param)
res = connect.add_vectors(collection_name=collection_name, records=vectors)
connect.flush(collection_list)
for i in range(20):
status, res = connect.count_collection(collection_list[i])
assert status.OK()
assert res == nq
class TestCollectionCountBinary:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
20000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_hamming_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_substructure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_superstructure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
def test_collection_rows_count(self, connect, ham_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=ham_collection, records=vectors)
connect.flush([ham_collection])
status, res = connect.count_collection(ham_collection)
assert res == nb
def test_collection_rows_count_substructure(self, connect, substructure_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=substructure_collection, records=vectors)
connect.flush([substructure_collection])
status, res = connect.count_collection(substructure_collection)
assert res == nb
def test_collection_rows_count_superstructure(self, connect, superstructure_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=superstructure_collection, records=vectors)
connect.flush([superstructure_collection])
status, res = connect.count_collection(superstructure_collection)
assert res == nb
def test_collection_rows_count_after_index_created(self, connect, ham_collection, get_hamming_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
nb = 100
index_type = get_hamming_index["index_type"]
index_param = get_hamming_index["index_param"]
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=ham_collection, records=vectors)
connect.flush([ham_collection])
connect.create_index(ham_collection, index_type, index_param)
status, res = connect.count_collection(ham_collection)
assert res == nb
def test_collection_rows_count_after_index_created_substructure(self, connect, substructure_collection, get_substructure_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
nb = 100
index_type = get_substructure_index["index_type"]
index_param = get_substructure_index["index_param"]
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=substructure_collection, records=vectors)
connect.flush([substructure_collection])
connect.create_index(substructure_collection, index_type, index_param)
status, res = connect.count_collection(substructure_collection)
assert res == nb
def test_collection_rows_count_after_index_created_superstructure(self, connect, superstructure_collection, get_superstructure_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
nb = 100
index_type = get_superstructure_index["index_type"]
index_param = get_superstructure_index["index_param"]
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=superstructure_collection, records=vectors)
connect.flush([superstructure_collection])
connect.create_index(superstructure_collection, index_type, index_param)
status, res = connect.count_collection(superstructure_collection)
assert res == nb
@pytest.mark.level(2)
def test_count_without_connection(self, ham_collection, dis_connect):
'''
target: test count_collection, without connection
method: calling count_collection with correct params, with a disconnected instance
expected: count_collection raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.count_collection(ham_collection)
def test_collection_rows_count_no_vectors(self, connect, ham_collection):
'''
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_collection method is equal to 0
expected: the count is equal to 0
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_collection(param)
status, res = connect.count_collection(ham_collection)
assert res == 0
def test_collection_rows_count_multi_collections(self, connect):
'''
target: test collection rows_count is correct or not with multiple collections of IP
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
tmp, vectors = gen_binary_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_collection_rows_count_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.HAMMING}
connect.create_collection(param)
res = connect.add_vectors(collection_name=collection_name, records=vectors)
connect.flush(collection_list)
for i in range(20):
status, res = connect.count_collection(collection_list[i])
assert status.OK()
assert res == nq
class TestCollectionCountTANIMOTO:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
20000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_tanimoto_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
def test_collection_rows_count(self, connect, tanimoto_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=tanimoto_collection, records=vectors)
connect.flush([tanimoto_collection])
status, res = connect.count_collection(tanimoto_collection)
assert status.OK()
assert res == nb
|
postfix_message_info.py
|
import collections
import re
import os
import time
import king_phisher.plugins as plugin_opts
import king_phisher.server.database.manager as db_manager
import king_phisher.server.database.models as db_models
import king_phisher.server.fs_utilities as fs_utilities
import king_phisher.server.plugins as plugins
import king_phisher.server.signals as signals
import king_phisher.utilities as utilities
EXAMPLE_CONFIG = """\
log_file: /var/log/mail.log
"""
#Reduce debuging log lines L96:L98
#LOG_LINE_BLACKLIST = ['connect from localhost', 'disconnect from localhost', 'daemon started --']
def get_modified_time(path):
return os.stat(path).st_mtime
class LogInformation(object):
__slots__ = ('message_id', 'statuses', 'message_details')
def __init__(self, message_id):
self.message_id = message_id
self.statuses = collections.deque()
self.message_details = None
@property
def message_status(self):
if not self.statuses:
return None
return self.statuses[-1]
class Plugin(plugins.ServerPlugin):
authors = ['Skyler Knecht']
classifiers = ['Plugin :: Server']
title = 'Postfix Message Information'
description = """
A plugin that analyzes message information from the postfix logs to provide
King Phisher clients message status and detail information.
"""
homepage = 'https://github.com/securestate/king-phisher-plugins'
version = '1.0.1'
req_min_version = '1.14.0b1'
options = [
plugin_opts.OptionString(
name='log_file',
description='Location of the log file to parse through for information.',
default='/var/log/mail.log'
)
]
def initialize(self):
log_file = self.config['log_file']
setuid_username = self.root_config.get('server.setuid_username')
if setuid_username and not fs_utilities.access(log_file, mode=os.R_OK, user=setuid_username):
self.logger.error('permissions error, invalid access to {}'.format(log_file))
return False
signals.server_initialized.connect(self.on_server_initialized)
self.logger.info('{} has been initialized.'.format(self.title))
return True
def on_server_initialized(self, server):
self._worker_thread = utilities.Thread(target=self.check_file_change, args=(self.config['log_file'],))
self._worker_thread.start()
def finalize(self):
self._worker_thread.stop()
self._worker_thread.join()
def check_file_change(self, file):
old_modified_time = get_modified_time(file)
old_file_contents = self.get_file_contents(file)
while self._worker_thread.stop_flag.is_clear():
new_modified_time = get_modified_time(file)
if old_modified_time < new_modified_time:
new_file_contents = self.get_file_contents(file)
self.post_to_database(self.parse_logs(new_file_contents))
old_modified_time = new_modified_time
time.sleep(5)
@staticmethod
def get_file_contents(path):
with open(path, 'r') as file_h:
return file_h.readlines()
def parse_logs(self, log_lines):
results = {}
for line_number, line in enumerate(log_lines, 1):
log_id = re.search(r'postfix/[a-z]+\[\d+\]:\s+(?P<log_id>[0-9A-Z]{7,12}):\s+', line)
if not log_id:
# check blacklist strings to not spam log files
# if not any(string in line for string in LOG_LINE_BLACKLIST):
# self.logger.warning('failed to parse postfix log line: ' + str(line_number))
continue
log_id = log_id.group('log_id')
message_id = re.search(r'message-id=<(?P<mid>[0-9A-Za-z]{12,20})@', line)
status = re.search(r'status=(?P<status>[a-z]+)\s', line)
details = re.search(r'status=[a-z]+\s\((?P<details>.+)\)', line)
if log_id not in results and message_id:
results[log_id] = LogInformation(message_id=message_id.group('mid'))
if log_id in results and status:
results[log_id].statuses.append(status.group('status'))
if log_id in results and details:
results[log_id].message_details = details.group('details')
return results
@staticmethod
def post_to_database(results):
session = db_manager.Session
for log_info in results.values():
if not log_info.message_status:
continue
message = session.query(db_models.Message).filter_by(id=log_info.message_id).first()
if message:
message.delivery_status = log_info.message_status
message.delivery_details = log_info.message_details
session.add(message)
session.commit()
|
process_level.py
|
#!/usr/bin/python2
# TODO
# - Dont assume WGS84, instead read from input layer.
# - Find a better way to separate different same object type with multiple
# geometry types.
import subprocess
import multiprocessing as mp
from osgeo import osr
from osgeo import ogr
import sys
import fnmatch
import os
import time
import math
import socket
import itertools
rundir = "/opt/data/chart-installation/data_files_conversion/"
def send_progress(socket, min, max, current):
if socket:
s = "p:min={},max={},current={}\n".format(min, max, current)
try:
r = socket.sendall(s)
except:
print("Error sending progress information")
def process(level, source_path, output_path, progress_address=None, debug=None):
s = None
if progress_address:
try:
host, port = progress_address.split(":")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, int(port)))
except:
print("Error Creating socket")
print("CL{} | Starting processing".format(level))
# setup OGR environment options
os.environ["OGR_S57_OPTIONS"] = "SPLIT_MULTIPOINT=ON,ADD_SOUNDG_DEPTH=ON,RECODE_BY_DSSI=ON"
input_driver = "S57"
output_driver = "ESRI Shapefile"
output_path = os.path.join(output_path, str(level))
result = {}
os.makedirs(output_path)
files = glob(source_path, level)
driver = ogr.GetDriverByName(input_driver)
send_progress(s, 0, 5, 0)
print("CL{} | Merging S57 features".format(level))
for f in files:
# print f[:-4]
datasource = driver.Open(f, 0)
for layer in datasource:
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326) # dont assume WGS84!
objtype = layer.GetName()
for feature in layer:
# need to find a better way to separate different geometry
# types(Point,polygon,line etc.) with same objectcode(eg.
# SOUNDG)
dtype = feature_datatype_name(feature)
if not dtype:
continue
key = objtype + "-" + dtype
if key not in result:
result[key] = []
result[key].append(feature)
<<<<<<< HEAD
send_progress(s,0,5,2)
=======
send_progress(s, 0, 5, 2)
>>>>>>> Automatically fix a bunch of pep8 issues
out_driver = ogr.GetDriverByName(output_driver)
# dont assume WGS84
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
print("CL{} | Creating output shape files".format(level))
out_files = []
for key in result:
objtype, geom_type = key.split("-")
out_filename = get_name(geom_type, level, objtype)
out_files.append(out_filename)
filepath = os.path.join(output_path, out_filename)
featuresToFile(result[key], output_driver, filepath, srs, key)
send_progress(s, 0, 5, 3)
# PROCESS SOME OF THE LAYERS (SOUNDG, WRECKS and UWTROC)
print("CL{} | Preprocessing".format(level))
# SOUNDG
print("CL{} | Preprocessing soundg".format(level))
# get the soundg file path
try:
soundgs = [os.path.join(output_path, f)
for f in out_files if "SOUNDG" in f][0]
dsrc = ogr.Open(soundgs, 1)
layer = dsrc.GetLayer()
# add WHOLE NUM and FRACNUM fields
add_fields(layer, [("WHOLE_NUM", ogr.OFTInteger),
("FRAC_NUM", ogr.OFTInteger)])
# fill the WHOLE_NUM and FRAC_NUM fields
fill_fields(layer, [("WHOLE_NUM", fill_preproc_value_whole, ("DEPTH",)),
("FRAC_NUM", fill_preproc_value_frac, ("DEPTH",))])
except IndexError:
print("CL{} | WARNING {} is not available in data sources file".format(
level, dataset))
# WRECKS
print("CL{} | Preprocessing wrecks".format(level))
# get the wrecks file path
try:
wrecks = [os.path.join(output_path, f)
for f in out_files if "WRECKS" in f][0]
dsrc = ogr.Open(wrecks, 1)
layer = dsrc.GetLayer()
# add WHOLE NUM and FRACNUM fields
add_fields(layer, [("WHOLE_NUM", ogr.OFTInteger),
("FRAC_NUM", ogr.OFTInteger)])
# fill the WHOLE_NUM and FRAC_NUM fields
fill_fields(layer, [("WHOLE_NUM", fill_preproc_value_whole, ("VALSOU",)),
("FRAC_NUM", fill_preproc_value_frac, ("VALSOU",))])
except IndexError:
print("CL{} | WARNING {} is not available in data sources file".format(
level, dataset))
# UWTROC
print("CL{} | Preprocessing uwtroc".format(level))
# get the uwtroc file path
try:
uwtroc = [os.path.join(output_path, f)
for f in out_files if "UWTROC" in f][0]
dsrc = ogr.Open(uwtroc, 1)
layer = dsrc.GetLayer()
# add WHOLE NUM and FRACNUM fields
add_fields(layer, [("WHOLE_NUM", ogr.OFTInteger),
("FRAC_NUM", ogr.OFTInteger)])
# fill the WHOLE_NUM and FRAC_NUM fields
fill_fields(layer, [("WHOLE_NUM", fill_preproc_value_whole, ("VALSOU",)),
("FRAC_NUM", fill_preproc_value_frac, ("VALSOU",))])
except IndexError:
print("CL{} | WARNING {} is not available in data sources file".format(
level, dataset))
# SBDARE
print("CL{} | Preprocessing sbdare".format(level))
sbdares = [os.path.join(output_path, f)
for f in out_files if "SBDARE" in f]
for sbdare in sbdares:
dsrc = ogr.Open(sbdare, 1)
layer = dsrc.GetLayer()
# Add SEABED field for seabed material
add_fields(layer, [("SEABED", ogr.OFTString)])
# Fill the seabed material column with text
<<<<<<< HEAD
fill_fields(layer,[("SEABED", fill_seabed, ())])
=======
fill_fields(layer, [("SEABED", fill_seabed, ())])
>>>>>>> Automatically fix a bunch of pep8 issues
# MIPARE
print("CL{} | Preprocessing mipare".format(level))
# get the file
mipare = [os.path.join(output_path, f)
for f in out_files if ("MIPARE" in f) and ("poly" in f)]
for m in mipare:
<<<<<<< HEAD
subprocess.call(os.path.join(os.getcwd(), "preproc_MIPARE.sh {}").format(m[:-4]), shell=True)
=======
subprocess.call(os.path.join(
os.getcwd(), "preproc_MIPARE.sh {}").format(m[:-4]), shell=True)
>>>>>>> Automatically fix a bunch of pep8 issues
# RESARE
print("CL{} | Preprocessing resare".format(level))
mipare = [os.path.join(output_path, f)
for f in out_files if ("RESARE" in f) and ("poly" in f)]
for m in mipare:
subprocess.call(os.path.join(
os.getcwd(), "preproc_RESARE.sh {}").format(m[:-4]), shell=True)
# run shptree on the created shapefiles
print("CL{} | Indexing files".format(level))
send_progress(s, 0, 5, 4)
processes = []
for f in out_files:
processes.append(mp.Process(
target=shptree, args=(output_path + "/" + f,)))
# start
for p in processes:
p.start()
# join
for p in processes:
p.join()
send_progress(s, 0, 5, 5)
print("CL{} | Done".format(level))
if s:
s.close()
return
<<<<<<< HEAD
=======
>>>>>>> Automatically fix a bunch of pep8 issues
def glob(source_path, CL):
# Get all S57 basefiles (ending with .000) for a specific zoom level and
# return a list with the full path to all those files.
matches = []
for root, dirnames, filenames in os.walk(source_path):
for filename in fnmatch.filter(filenames, '??{CL}*.000'.format(CL=CL)):
matches.append(root + "/" + filename)
return matches
def shptree(f):
subprocess.call("shptree {} 1>/dev/null".format(f), shell=True)
def add_fields(layer, fielddefs):
# fielddefs = [(new_field_name, data_type),...]
#
for fielddef in fielddefs:
layer.CreateField(ogr.FieldDefn(fielddef[0], fielddef[1]))
def fill_preproc_value_whole(feature, depth_field):
depth = feature.GetField(depth_field)
if depth == None:
return None
return abs(int(depth))
def fill_preproc_value_frac(feature, depth_field):
depth = feature.GetField(depth_field)
if depth == None:
return None
whole = feature.GetField("WHOLE_NUM")
frac = int(round(10 * (depth - math.floor(depth))))
if depth < 0:
return 10 - frac
else:
return frac
<<<<<<< HEAD
=======
>>>>>>> Automatically fix a bunch of pep8 issues
def fill_seabed(feature):
natsur_dic = {
4: "S",
1: "M",
2: "Cy",
3: "Si",
5: "St",
6: "G",
7: "P",
8: "Cb",
9: "R",
18: "Bo",
14: "Co",
17: "Sh"
}
natqua_dic = {
1: "f",
2: "m",
3: "c",
4: "bk",
5: "sy",
6: "so",
7: "sf",
8: "v",
9: "ca",
10: "h"
}
# natsur = seabed material
natsur = feature.GetField("NATSUR")
if natsur is not None:
try:
natsur = [int(n) for n in natsur.split(",")]
except:
print("ERROR: while processing natsur (%s)" % natsur)
else:
natsur = []
# natqua = seabed structure
natqua = feature.GetField("NATQUA")
if natqua is not None:
try:
natqua = [int(n) for n in natqua.split(",")]
except:
print("ERROR: while processing natqua (%s)" % natqua)
else:
natqua = []
<<<<<<< HEAD
=======
>>>>>>> Automatically fix a bunch of pep8 issues
# Merge the two seabed type columns
if natqua is not None:
data = itertools.zip_longest(natsur, natqua)
res = []
# build up the res list with strings to be merged to create the final text
for d in data:
natqua = d[1]
natsur = d[0]
if natsur is None:
natsur = ""
if natqua is None:
natqua = ""
if natsur in natsur_dic:
natsur_str = natsur_dic[natsur]
else:
natsur_str = ""
if natqua in natqua_dic:
natqua_str = natqua_dic[natqua]
else:
natqua_str = ""
a = natqua_str + natsur_str
res.append(a)
return ".".join(res)
def fill_fields(layer, fieldinfos):
# Fills the fields of all features in layer. Fieldinfo says which fields to
# change and to what.
# fieldinfo = [("Field_name_to_fill", function that takes a feature as first
# argument and returns the value to fill, optional extra args), ...]
for feature in layer:
for fieldinfo in fieldinfos:
field_name = fieldinfo[0]
func = fieldinfo[1]
args = fieldinfo[2]
data = func(feature, *args)
if data == None:
feature.UnsetField(field_name)
else:
feature.SetField(field_name, data)
layer.SetFeature(feature)
def feature_datatype(feature):
# returns the geometry datatype (point, line, polygon) of the feature
geomref = feature.GetGeometryRef()
if geomref:
return geomref.GetGeometryType()
else:
return None
def feature_datatype_name(feature):
# returns the name of the geometry datatype of the feature
geomref = feature.GetGeometryRef()
if geomref:
return geomref.GetGeometryName()
else:
return None
def get_name(geom_type, level, objtype):
# returns the final filename of the a feature
if geom_type == "POLYGON":
geom_type = "poly"
elif geom_type == "LINESTRING":
geom_type = "line"
else:
geom_type = geom_type.lower()
return "CL{}-{}-{}.shp".format(level, geom_type, objtype)
def featuresToFile(features, dst_drv, dst_name, dst_srs, layer_name=None,
geomtype=None, overwrite=True):
if not features: # features is empty list
print("No Features Created")
return
drv = ogr.GetDriverByName(dst_drv)
if drv is None:
print("Driver not available ({})".format(dst_drv))
return
dsrc = drv.CreateDataSource(dst_name)
if dsrc is None:
print("DataSource creation failed")
return
if not geomtype:
f0 = features[0]
geomref = features[0].GetGeometryRef()
if geomref is not None:
geomtype = geomref.GetGeometryType()
else:
return
layer = dsrc.CreateLayer(layer_name, srs=dst_srs, geom_type=geomtype)
# Create the fields for the new file
for i in range(features[0].GetFieldCount()):
fieldDef = features[0].GetFieldDefnRef(i)
if "List" in ogr.GetFieldTypeName(fieldDef.GetType()):
t = ogr.GetFieldTypeName(fieldDef.GetType())[:-4]
if t == "String":
fieldDef = ogr.FieldDefn(fieldDef.GetName(), ogr.OFTString)
elif t == "Integer":
fieldDef = ogr.FieldDefn(fieldDef.GetName(), ogr.OFTInteger)
layer.CreateField(fieldDef)
# print layer_name
for feature in features:
layer.CreateFeature(feature)
|
find_spots_server.py
|
from __future__ import annotations
import http.server as server_base
import json
import logging
import multiprocessing
import sys
import time
import urllib.parse
import libtbx.phil
from cctbx import uctbx
from dxtbx.model.experiment_list import ExperimentListFactory
from libtbx.introspection import number_of_processors
from dials.algorithms.indexing import indexer
from dials.algorithms.integration.integrator import create_integrator
from dials.algorithms.profile_model.factory import ProfileModelFactory
from dials.algorithms.spot_finding import per_image_analysis
from dials.array_family import flex
from dials.command_line.find_spots import phil_scope as find_spots_phil_scope
from dials.command_line.index import phil_scope as index_phil_scope
from dials.command_line.integrate import phil_scope as integrate_phil_scope
from dials.util import Sorry, show_mail_handle_errors
from dials.util.options import ArgumentParser
logger = logging.getLogger("dials.command_line.find_spots_server")
help_message = """\
A client/server version of dials.find_spots with additional analysis including
estimation of resolution limits. Intended for quick feedback of image quality
during grid scans and data collections.
On the server machine::
dials.find_spots_server [nproc=8] [port=1234]
On the client machine::
dials.find_spots_client [host=hostname] [port=1234] [nproc=8] /path/to/image.cbf
The client will return a short xml string indicating the number of spots found
and several estimates of the resolution limit.
e.g.::
<response>

<spot_count>352</spot_count>
<spot_count_no_ice>263</spot_count_no_ice>
<d_min>1.46</d_min>
<d_min_method_1>1.92</d_min_method_1>
<d_min_method_2>1.68</d_min_method_2>
<total_intensity>56215</total_intensity>
</response>
* ``spot_count`` is the total number of spots found in given image
* ``spot_count_no_ice`` is the number of spots found excluding those at resolutions
where ice rings may be found
* ``d_min_method_1`` is equivalent to distl's resolution estimate method 1
* ``d_min_method_2`` is equivalent to distl's resolution estimate method 2
* ``total_intensity`` is the total intensity of all strong spots excluding those
at resolutions where ice rings may be found
Any valid ``dials.find_spots`` parameter may be passed to
``dials.find_spots_client``, e.g.::
dials.find_spots_client /path/to/image.cbf min_spot_size=2 d_min=2
To stop the server::
dials.find_spots_client stop [host=hostname] [port=1234]
"""
stop = False
def _filter_by_resolution(experiments, reflections, d_min=None, d_max=None):
reflections.centroid_px_to_mm(experiments)
reflections.map_centroids_to_reciprocal_space(experiments)
d_star_sq = flex.pow2(reflections["rlp"].norms())
reflections["d"] = uctbx.d_star_sq_as_d(d_star_sq)
# Filter based on resolution
if d_min is not None:
selection = reflections["d"] >= d_min
reflections = reflections.select(selection)
logger.debug(f"Selected {len(reflections)} reflections with d >= {d_min:f}")
# Filter based on resolution
if d_max is not None:
selection = reflections["d"] <= d_max
reflections = reflections.select(selection)
logger.debug(f"Selected {len(reflections)} reflections with d <= {d_max:f}")
return reflections
def work(filename, cl=None):
if cl is None:
cl = []
phil_scope = libtbx.phil.parse(
"""\
ice_rings {
filter = True
.type = bool
width = 0.004
.type = float(value_min=0.0)
}
index = False
.type = bool
integrate = False
.type = bool
indexing_min_spots = 10
.type = int(value_min=1)
"""
)
interp = phil_scope.command_line_argument_interpreter()
params, unhandled = interp.process_and_fetch(
cl, custom_processor="collect_remaining"
)
filter_ice = params.extract().ice_rings.filter
ice_rings_width = params.extract().ice_rings.width
index = params.extract().index
integrate = params.extract().integrate
indexing_min_spots = params.extract().indexing_min_spots
interp = find_spots_phil_scope.command_line_argument_interpreter()
phil_scope, unhandled = interp.process_and_fetch(
unhandled, custom_processor="collect_remaining"
)
logger.info("The following spotfinding parameters have been modified:")
logger.info(find_spots_phil_scope.fetch_diff(source=phil_scope).as_str())
params = phil_scope.extract()
# no need to write the hot mask in the server/client
params.spotfinder.write_hot_mask = False
experiments = ExperimentListFactory.from_filenames([filename])
if params.spotfinder.scan_range and len(experiments) > 1:
# This means we've imported a sequence of still image: select
# only the experiment, i.e. image, we're interested in
((start, end),) = params.spotfinder.scan_range
experiments = experiments[start - 1 : end]
# Avoid overhead of calculating per-pixel resolution masks in spotfinding
# and instead perform post-filtering of spot centroids by resolution
d_min = params.spotfinder.filter.d_min
d_max = params.spotfinder.filter.d_max
params.spotfinder.filter.d_min = None
params.spotfinder.filter.d_max = None
t0 = time.perf_counter()
reflections = flex.reflection_table.from_observations(experiments, params)
if d_min or d_max:
reflections = _filter_by_resolution(
experiments, reflections, d_min=d_min, d_max=d_max
)
t1 = time.perf_counter()
logger.info("Spotfinding took %.2f seconds", t1 - t0)
imageset = experiments.imagesets()[0]
reflections.centroid_px_to_mm(experiments)
reflections.map_centroids_to_reciprocal_space(experiments)
stats = per_image_analysis.stats_for_reflection_table(
reflections, filter_ice=filter_ice, ice_rings_width=ice_rings_width
)._asdict()
t2 = time.perf_counter()
logger.info("Resolution analysis took %.2f seconds", t2 - t1)
if index and stats["n_spots_no_ice"] > indexing_min_spots:
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
interp = index_phil_scope.command_line_argument_interpreter()
phil_scope, unhandled = interp.process_and_fetch(
unhandled, custom_processor="collect_remaining"
)
logger.info("The following indexing parameters have been modified:")
index_phil_scope.fetch_diff(source=phil_scope).show()
params = phil_scope.extract()
if (
imageset.get_goniometer() is not None
and imageset.get_scan() is not None
and imageset.get_scan().is_still()
):
imageset.set_goniometer(None)
imageset.set_scan(None)
try:
idxr = indexer.Indexer.from_parameters(
reflections, experiments, params=params
)
indexing_results = []
idxr.index()
indexed_sel = idxr.refined_reflections.get_flags(
idxr.refined_reflections.flags.indexed
)
indexed_sel &= ~(
idxr.refined_reflections.get_flags(
idxr.refined_reflections.flags.centroid_outlier
)
)
for i_expt, expt in enumerate(idxr.refined_experiments):
sel = idxr.refined_reflections["id"] == i_expt
sel &= indexed_sel
indexing_results.append(
{
"crystal": expt.crystal.to_dict(),
"n_indexed": sel.count(True),
"fraction_indexed": sel.count(True) / sel.size(),
}
)
stats["lattices"] = indexing_results
stats["n_indexed"] = indexed_sel.count(True)
stats["fraction_indexed"] = indexed_sel.count(True) / len(reflections)
except Exception as e:
logger.error(e)
stats["error"] = str(e)
finally:
t3 = time.perf_counter()
logger.info("Indexing took %.2f seconds", t3 - t2)
if integrate and "lattices" in stats:
interp = integrate_phil_scope.command_line_argument_interpreter()
phil_scope, unhandled = interp.process_and_fetch(
unhandled, custom_processor="collect_remaining"
)
logger.error("The following integration parameters have been modified:")
integrate_phil_scope.fetch_diff(source=phil_scope).show()
params = phil_scope.extract()
try:
params.profile.gaussian_rs.min_spots = 0
experiments = idxr.refined_experiments
reference = idxr.refined_reflections
predicted = flex.reflection_table.from_predictions_multi(
experiments,
dmin=params.prediction.d_min,
dmax=params.prediction.d_max,
margin=params.prediction.margin,
force_static=params.prediction.force_static,
)
matched, reference, unmatched = predicted.match_with_reference(
reference
)
assert len(matched) == len(predicted)
assert matched.count(True) <= len(reference)
if matched.count(True) == 0:
raise Sorry(
"""
Invalid input for reference reflections.
Zero reference spots were matched to predictions
"""
)
elif matched.count(True) != len(reference):
logger.info("")
logger.info("*" * 80)
logger.info(
"Warning: %d reference spots were not matched to predictions",
len(reference) - matched.count(True),
)
logger.info("*" * 80)
logger.info("")
# Compute the profile model
experiments = ProfileModelFactory.create(params, experiments, reference)
# Compute the bounding box
predicted.compute_bbox(experiments)
# Create the integrator
integrator = create_integrator(params, experiments, predicted)
# Integrate the reflections
reflections = integrator.integrate()
# print len(reflections)
stats["integrated_intensity"] = flex.sum(
reflections["intensity.sum.value"]
)
except Exception as e:
logger.error(e)
stats["error"] = str(e)
finally:
t4 = time.perf_counter()
logger.info("Integration took %.2f seconds", t4 - t3)
return stats
class handler(server_base.BaseHTTPRequestHandler):
def do_GET(self):
"""Respond to a GET request."""
if self.path == "/Ctrl-C":
self.send_response(200)
self.end_headers()
global stop
stop = True
return
filename = self.path.split(";")[0]
params = self.path.split(";")[1:]
# If we're passing a url through, then unquote and ignore leading /
if "%3A//" in filename:
filename = urllib.parse.unquote(filename[1:])
d = {"image": filename}
try:
stats = work(filename, params)
d.update(stats)
response = 200
except Exception as e:
d["error"] = str(e)
response = 500
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
response = json.dumps(d).encode()
self.wfile.write(response)
def serve(httpd):
try:
while not stop:
httpd.handle_request()
except KeyboardInterrupt:
pass
phil_scope = libtbx.phil.parse(
"""\
nproc = Auto
.type = int(value_min=1)
port = 1701
.type = int(value_min=1)
"""
)
def main(nproc, port):
server_class = server_base.HTTPServer
httpd = server_class(("", port), handler)
print(time.asctime(), "Serving %d processes on port %d" % (nproc, port))
for j in range(nproc - 1):
proc = multiprocessing.Process(target=serve, args=(httpd,))
proc.daemon = True
proc.start()
serve(httpd)
httpd.server_close()
print(time.asctime(), "done")
@show_mail_handle_errors()
def run(args=None):
usage = "dials.find_spots_server [options]"
# Python 3.8 on macOS... needs fork
if sys.hexversion >= 0x3080000 and sys.platform == "darwin":
multiprocessing.set_start_method("fork")
parser = ArgumentParser(usage=usage, phil=phil_scope, epilog=help_message)
params, options = parser.parse_args(args, show_diff_phil=True)
if params.nproc is libtbx.Auto:
params.nproc = number_of_processors(return_value_if_unknown=-1)
main(params.nproc, params.port)
if __name__ == "__main__":
run()
|
managers.py
|
#
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import os
import sys
import weakref
import threading
import array
import Queue
from traceback import format_exc
from multiprocessing import Process, current_process, active_children, Pool, util, connection
from multiprocessing.process import AuthenticationString
from multiprocessing.forking import exit, Popen, assert_spawning, ForkingPickler
from multiprocessing.util import Finalize, info
try:
from cPickle import PicklingError
except ImportError:
from pickle import PicklingError
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tostring())
ForkingPickler.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return 'Token(typeid=%r, address=%r, id=%r)' % \
(self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind == '#TRACEBACK':
assert type(result) is str
return RemoteError(result)
elif kind == '#UNSERIALIZABLE':
assert type(result) is str
return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if hasattr(func, '__call__'):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
self.authkey = AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.mutex = threading.RLock()
self.stop = 0
def serve_forever(self):
'''
Run the server forever
'''
current_process()._manager_server = self
try:
try:
while 1:
try:
c = self.listener.accept()
except (OSError, IOError):
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.stop = 999
self.listener.close()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception, e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop:
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception, e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception, e:
send(('#UNSERIALIZABLE', repr(msg)))
except Exception, e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
self.mutex.acquire()
try:
result = []
keys = self.id_to_obj.keys()
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
finally:
self.mutex.release()
def number_of_objects(self, c):
'''
Number of shared objects
'''
return len(self.id_to_obj) - 1 # don't count ident='0'
def shutdown(self, c):
'''
Shutdown this process
'''
try:
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
util._run_finalizers(0)
for p in active_children():
util.debug('terminating a child process of manager')
p.terminate()
for p in active_children():
util.debug('terminating a child process of manager')
p.join()
util._run_finalizers()
util.info('manager exiting with exitcode 0')
except:
import traceback
traceback.print_exc()
finally:
exit(0)
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
self.mutex.acquire()
try:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(c, ident)
return ident, tuple(exposed)
finally:
self.mutex.release()
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
self.mutex.acquire()
try:
self.id_to_refcount[ident] += 1
finally:
self.mutex.release()
def decref(self, c, ident):
self.mutex.acquire()
try:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
util.debug('disposing of obj with id %r', ident)
finally:
self.mutex.release()
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle'):
if authkey is None:
authkey = current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
def __reduce__(self):
return type(self).from_address, \
(self._address, self._authkey, self._serializer)
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == State.INITIAL
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
assert self._state.value == State.INITIAL
if initializer is not None and not hasattr(initializer, '__call__'):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
self._process.join(timeout)
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=0.2)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in method_to_typeid.items():
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True):
BaseProxy._mutex.acquire()
try:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
finally:
BaseProxy._mutex.release()
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
if authkey is not None:
self._authkey = AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception, e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception, e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if Popen.thread_is_spawning():
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %s>' % \
(type(self).__name__, self._token.typeid, '0x%x' % id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return a proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec '''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth) in dic
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = self.__dict__.items()
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
# XXX remove methods for Py3.0 and Py2.6
_exposed_ = ('__next__', 'next', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def next(self, *args):
return self._callmethod('next', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True):
return self._callmethod('acquire', (blocking,))
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
# XXX will Condition.notfyAll() name be available in Py3.0?
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self):
return self._callmethod('notify')
def notify_all(self):
return self._callmethod('notify_all')
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__delslice__',
'__getitem__', '__getslice__', '__len__', '__mul__',
'__reversed__', '__rmul__', '__setitem__', '__setslice__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
PoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'terminate'
))
PoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', Queue.Queue)
SyncManager.register('JoinableQueue', Queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Pool', Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
|
threading.py
|
"""A threading based handler.
The :class:`SequentialThreadingHandler` is intended for regular Python
environments that use threads.
.. warning::
Do not use :class:`SequentialThreadingHandler` with applications
using asynchronous event loops (like gevent). Use the
:class:`~kazoo.handlers.gevent.SequentialGeventHandler` instead.
"""
from __future__ import absolute_import
import atexit
import logging
import select
import socket
import threading
import time
try:
import Queue
except ImportError: # pragma: nocover
import queue as Queue
from kazoo.handlers.utils import create_tcp_socket, create_tcp_connection
# sentinel objects
_NONE = object()
_STOP = object()
log = logging.getLogger(__name__)
class TimeoutError(Exception):
pass
class AsyncResult(object):
"""A one-time event that stores a value or an exception"""
def __init__(self, handler):
self._handler = handler
self.value = None
self._exception = _NONE
self._condition = threading.Condition()
self._callbacks = []
def ready(self):
"""Return true if and only if it holds a value or an
exception"""
return self._exception is not _NONE
def successful(self):
"""Return true if and only if it is ready and holds a value"""
return self._exception is None
@property
def exception(self):
if self._exception is not _NONE:
return self._exception
def set(self, value=None):
"""Store the value. Wake up the waiters."""
with self._condition:
self.value = value
self._exception = None
for callback in self._callbacks:
self._handler.completion_queue.put(
lambda: callback(self)
)
self._condition.notify_all()
def set_exception(self, exception):
"""Store the exception. Wake up the waiters."""
with self._condition:
self._exception = exception
for callback in self._callbacks:
self._handler.completion_queue.put(
lambda: callback(self)
)
self._condition.notify_all()
def get(self, block=True, timeout=None):
"""Return the stored value or raise the exception.
If there is no value raises TimeoutError.
"""
with self._condition:
if self._exception is not _NONE:
if self._exception is None:
return self.value
raise self._exception
elif block:
self._condition.wait(timeout)
if self._exception is not _NONE:
if self._exception is None:
return self.value
raise self._exception
# if we get to this point we timeout
raise TimeoutError()
def get_nowait(self):
"""Return the value or raise the exception without blocking.
If nothing is available, raises TimeoutError
"""
return self.get(block=False)
def wait(self, timeout=None):
"""Block until the instance is ready."""
with self._condition:
self._condition.wait(timeout)
return self._exception is not _NONE
def rawlink(self, callback):
"""Register a callback to call when a value or an exception is
set"""
with self._condition:
# Are we already set? Dispatch it now
if self.ready():
self._handler.completion_queue.put(
lambda: callback(self)
)
return
if callback not in self._callbacks:
self._callbacks.append(callback)
def unlink(self, callback):
"""Remove the callback set by :meth:`rawlink`"""
with self._condition:
if self.ready():
# Already triggered, ignore
return
if callback in self._callbacks:
self._callbacks.remove(callback)
class SequentialThreadingHandler(object):
"""Threading handler for sequentially executing callbacks.
This handler executes callbacks in a sequential manner. A queue is
created for each of the callback events, so that each type of event
has its callback type run sequentially. These are split into two
queues, one for watch events and one for async result completion
callbacks.
Each queue type has a thread worker that pulls the callback event
off the queue and runs it in the order the client sees it.
This split helps ensure that watch callbacks won't block session
re-establishment should the connection be lost during a Zookeeper
client call.
Watch and completion callbacks should avoid blocking behavior as
the next callback of that type won't be run until it completes. If
you need to block, spawn a new thread and return immediately so
callbacks can proceed.
.. note::
Completion callbacks can block to wait on Zookeeper calls, but
no other completion callbacks will execute until the callback
returns.
"""
name = "sequential_threading_handler"
timeout_exception = TimeoutError
sleep_func = staticmethod(time.sleep)
queue_impl = Queue.Queue
queue_empty = Queue.Empty
def __init__(self):
"""Create a :class:`SequentialThreadingHandler` instance"""
self.callback_queue = self.queue_impl()
self.completion_queue = self.queue_impl()
self._running = False
self._state_change = threading.Lock()
self._workers = []
def _create_thread_worker(self, queue):
def thread_worker(): # pragma: nocover
while True:
try:
func = queue.get()
try:
if func is _STOP:
break
func()
except Exception:
log.exception("Exception in worker queue thread")
finally:
queue.task_done()
except self.queue_empty:
continue
t = threading.Thread(target=thread_worker)
# Even though these should be joined, it's possible stop might
# not issue in time so we set them to daemon to let the program
# exit anyways
t.daemon = True
t.start()
return t
def start(self):
"""Start the worker threads."""
with self._state_change:
if self._running:
return
# Spawn our worker threads, we have
# - A callback worker for watch events to be called
# - A completion worker for completion events to be called
for queue in (self.completion_queue, self.callback_queue):
w = self._create_thread_worker(queue)
self._workers.append(w)
self._running = True
atexit.register(self.stop)
def stop(self):
"""Stop the worker threads and empty all queues."""
with self._state_change:
if not self._running:
return
self._running = False
for queue in (self.completion_queue, self.callback_queue):
queue.put(_STOP)
self._workers.reverse()
while self._workers:
worker = self._workers.pop()
worker.join()
# Clear the queues
self.callback_queue = self.queue_impl()
self.completion_queue = self.queue_impl()
if hasattr(atexit, "unregister"):
atexit.unregister(self.stop)
def select(self, *args, **kwargs):
return select.select(*args, **kwargs)
def socket(self):
return create_tcp_socket(socket)
def create_connection(self, *args, **kwargs):
return create_tcp_connection(socket, *args, **kwargs)
def event_object(self):
"""Create an appropriate Event object"""
return threading.Event()
def lock_object(self):
"""Create a lock object"""
return threading.Lock()
def rlock_object(self):
"""Create an appropriate RLock object"""
return threading.RLock()
def async_result(self):
"""Create a :class:`AsyncResult` instance"""
return AsyncResult(self)
def spawn(self, func, *args, **kwargs):
t = threading.Thread(target=func, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def dispatch_callback(self, callback):
"""Dispatch to the callback object
The callback is put on separate queues to run depending on the
type as documented for the :class:`SequentialThreadingHandler`.
"""
self.callback_queue.put(lambda: callback.func(*callback.args))
|
__init__.py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# NextionDisplay Version 0.0.9.6
# Assembled by JamFfm
#
# sudo pip install pyserial # install serial, unlikely you have to
# # install it because usually it is already installed
# python -m serial.tools.list_ports # List all ports in command-box
# dmesg | grep tty # List serial Connections
from modules import cbpi, fermenter, app
import time
import serial
import socket # ip adr
import fcntl # ip adr
import struct # ip adr
from time import strftime # Time display
from time import sleep
import threading
DEBUG = False # toggle writing of debug information in the app.log
TERMINATOR = bytearray([0xFF, 0xFF, 0xFF])
liste = []
listetarget = []
FERMLISTE = []
FERMLISTETARGET = []
global max_value_old
max_value_old = 0
global min_value_old
min_value_old = 0
global fmax_value_old
fmax_value_old = 0
global fmin_value_old
fmin_value_old = 0
def nx_setsys(ser, sysvar, value): # Set system variables. sysvar as text. example: sysvar='dim'
# Possible commands: 'bkcmd', 'dp', 'dim', 'dims', 'baud', 'bauds', 'ussp', 'thsp', 'thup', 'delay', 'sleep'
# see instruction set of NEXTION device to see possible values for each system variable
setdisplay = ('%s=%s' % (sysvar, str(value)))
if DEBUG: cbpi.app.logger.info('NextionDisplay - nx_setsys:%s' % setdisplay)
ser.write(setdisplay)
ser.write(TERMINATOR)
def writingDigittoNextion(ser, kettleID=1):
unit = "°" + str(set_nextion_unit())
ctemp = currenttemp_float(kettleID)
# Current Temperature in text field
TextDigitTxt2 = ("%6.2f%s" % (ctemp, unit)) # UTF8° = U+00B0
textCurrTemp2 = str(TextDigitTxt2)
# if DEBUG: cbpi.app.logger.info('NextionDisplay - digit CurrTempTxt.txt:%s' % (textCurrTemp2))
NextionwriteString(ser, "CurrTempTxt", textCurrTemp2)
# Target Temp in Text Field
TextDigitTxt3 = ("%6.2f%s" % (float(TempTargTemp(kettleID)), unit))
textCurrTemp3 = str(TextDigitTxt3)
# if DEBUG: cbpi.app.logger.info('NextionDisplay - TargTempTxt.txt:%s' % (textCurrTemp3))
NextionwriteString(ser, "TargetTempTxt", textCurrTemp3)
# Current Kettlename in text field
kname = kettlename(kettleID)
textname = "Temperature of %s" % kname
NextionwriteString(ser, "t3", textname)
def writing_multi_to_nextion(ser):
unit = ("°" + str(set_nextion_unit()))
NextionwriteString(ser, "unitMBrew", str(unit))
stepnameMTxt = restname()
NextionwriteString(ser, "StepnameMTxt", stepnameMTxt)
for x in range(1, 5):
try:
kettle_name_field = "KettlenMTxt" + str(x)
KettlenMTxt = kettlename(x)
current_temp_field = "CurrMTemp" + str(x)
CurrMTemp = ("%6.2f%s" % (currenttemp_float(x), "°"))
target_temp_field = "TarTempMTxt" + str(x)
TarTempMTxt = ("%6.2f%s" % (targettemp_float(x), "°"))
heater_status_field = "heaterstatusM" + str(x)
NextionwriteString(ser, kettle_name_field, KettlenMTxt)
NextionwriteString(ser, current_temp_field, CurrMTemp)
NextionwriteString(ser, target_temp_field, TarTempMTxt)
if heater_status(x) == "on":
if DEBUG: cbpi.app.logger.info("NextionDisplay - writing_multi_to_nextion: heater status on")
Nextionshowhideobject(ser, heater_status_field, "on")
else:
Nextionshowhideobject(ser, heater_status_field, "off")
except:
if DEBUG: cbpi.app.logger.info("NextionDisplay - writing_multi_to_nextion: no Kettle %s found" % x)
heater_status_field = "heaterstatusM" + str(x)
Nextionshowhideobject(ser, heater_status_field, "off")
pass
pass
time_remaining(ser) # attention the name of the textfield is defined in the function currently "remBrewTime")
val = multiprogressbarvalue()
Nextionprogressbar(ser, "RemainTimeMBar", val)
def Nextionshowhideobject(ser, objectname, onoff):
if onoff == "on":
onoff = 1
elif onoff == "off":
onoff = 0
else:
cbpi.app.logger.info("NextionDisplay - Nextionshowhideobject onoff is not on or off: %s" % onoff)
if onoff == 1 or onoff == 0:
command = ('vis %s,%s' % (objectname, onoff))
ser.write(command)
ser.write(TERMINATOR)
pass
def multiprogressbarvalue():
s = cbpi.cache.get("active_step")
if DEBUG: cbpi.app.logger.info('NextionDisplay - multiprogressbarvalue: %s' % s)
try:
if s.timer_end is not None:
val = int(100 - (((s.timer_end - time.time()) * 100) / (s.timer*60)))
if DEBUG: cbpi.app.logger.info('NextionDisplay - multiprogressbarvalue: %s%s' % (val, "%"))
return val
else:
if DEBUG: cbpi.app.logger.info('NextionDisplay - multiprogressbarvalue: no timer running')
return 0
pass
except:
if DEBUG: cbpi.app.logger.info('NextionDisplay - multiprogressbarvalue: no active step ')
return 0
pass
pass
def Nextionprogressbar(ser, barname, val):
try:
command = ("%s.val=%s" % (barname, str(val)))
if DEBUG: cbpi.app.logger.info('NextionDisplay - Nextionprogressbar command Txt:%s' % command)
ser.write(command)
ser.write(TERMINATOR)
except:
cbpi.app.logger.info('NextionDisplay - Nextionprogressbar can not convert val into string: %s' % command)
pass
def NextionwriteString(ser, TextLableName, string):
"""
:param ser: name of the serial connection
:param TextLableName: name of the "textlable" on the Nextion
:param string: the "string" to write in this lable
use like NextionwriteString(ser, "TextLabelName", "string")
"""
command = ('%s.txt="%s"' % (TextLableName, string))
# if DEBUG: cbpi.app.logger.info('NextionDisplay - command Txt:%s' % command)
ser.write(command)
ser.write(TERMINATOR)
def NextionwriteWave(ser, WaveID, Channnel, intValue):
command = ('add %s,%s,%s' % (WaveID, Channnel, intValue))
# if DEBUG: cbpi.app.logger.info('NextionDisplay - command Wave:%s' % command)
ser.write(command)
ser.write(TERMINATOR)
def NextionwriteNumber(ser, NumberLableName, integer):
command = ('%s.val=%s' % (NumberLableName, integer))
# if DEBUG: cbpi.app.logger.info('NextionDisplay - command Number:%s' % command)
ser.write(command)
ser.write(TERMINATOR)
def NextionwriteClear(ser, WaveID, channel):
command = ('cle %s,%s' % (WaveID, channel))
# if DEBUG: cbpi.app.logger.info('NextionDisplay - command Number:%s' % command)
ser.write(command)
ser.write(TERMINATOR)
def Nextion_ref_wave(ser, stop_start):
"""
:param ser:name of the serial connection
:param stop_start: ether "ref_stop" or "ref_star"
use as: ref_wave(ser, "ref_stop") or ref_wave(ser, "ref_star")
this is like a substitude of addt Nextion function
stops and starts refresh of wave graph
"""
if stop_start == "ref_stop" or stop_start == "ref_star":
command = stop_start
ser.write(command)
ser.write(TERMINATOR)
else:
cbpi.app.logger.info("NextionDisplay - ref_wave error: stop_start not ref_stop or ref_star: %s" % stop_start)
pass
def Nextion_refresh_wave(ser, waveid, channnel, amountofbytes, addtliste):
"""
not used anymore because addt function is not reliable, use ref_stop or ref_star instead
:param ser: serial object
:param waveid: id of the wave item on the Nextion as integer
:param channnel: channel of wave as integer
:param amountofbytes: amount of byte equal to amount of values to send as integer
:param addtliste: name of the list if values to send as a list
"""
command = ('addt %s,%s,%s' % (waveid, channnel, amountofbytes))
# if DEBUG: cbpi.app.logger.info('NextionDisplay - command Wave:%s' % command)
ser.write(command)
ser.write(TERMINATOR)
sleep(0.6)
ser.write(addtliste)
ser.write(TERMINATOR)
def writewave(ser, kettleID=1, erase=False, rewrite=False, dubbleline=True):
currenttemp = currenttemp_float(kettleID)
targettemp = targettemp_float(kettleID)
unit = set_nextion_unit()
# Current Temperature in text field
TextDigitTxt0 = ("%6.2f%s" % (currenttemp, ("°"+str(unit))))
textCurrTemp0 = str(TextDigitTxt0)
NextionwriteString(ser, "CurrTempBrwTxt", textCurrTemp0)
# Target Temp in Text Field
TextDigitTxt1 = ("%6.2f%s" % (targettemp, ("°"+str(unit))))
textCurrTemp1 = str(TextDigitTxt1)
NextionwriteString(ser, "TargTempBrwTxt", textCurrTemp1)
# Current Kettlename in text field
kettlen = kettlename(kettleID)
NextionwriteString(ser, "KettleNameTxt", kettlen)
# rest name
restn = restname()
NextionwriteString(ser, "RestNameTxt", restn)
# remaining time of step
time_remaining(ser)
# build liste of current temp values
if erase is True:
del liste[:]
elif len(liste) < 406: # the with of the wave object on Nextion if you change the wave-with this has to be adjusted
liste.append(currenttemp)
else:
del liste[0]
liste.append(currenttemp)
# if DEBUG: cbpi.app.logger.info('NextionDisplay - TempListe bigger 407:%s' % (len(liste)))
if DEBUG: cbpi.app.logger.info('NextionDisplay - TempListe len(liste):%s' % (len(liste)))
# build liste of current targettemp values len(listetarget) can be different to len(liste)
if erase is True:
del listetarget[:]
if len(listetarget) < 406: # the with of the wave object on
# Nextion if you change the wave-with this has to be adjusted
listetarget.append(targettemp)
else:
del listetarget[0]
listetarget.append(targettemp)
# if DEBUG: cbpi.app.logger.info('NextionDisplay - targetListe len(listetarget):%s' % (len(listetarget)))
# min max labels for scale
max_value = round((max(liste)+0.1), 1)
min_value = round((min(liste)-0.1), 1)
NextionwriteString(ser, "tmax", "%s%s" % (max_value, ("°"+str(unit))))
NextionwriteString(ser, "tmin", "%s%s" % (min_value, ("°"+str(unit))))
NextionwriteString(ser, "tavarage", "%s%s" % (round(((max_value+min_value)/2), 1), ("°"+str(unit))))
# get the scaling-factor
offset = (max_value - min_value)
xpixel = 202 # the height of the wave object on Nextion if you change the wave-height this has to be adjusted
factor = (xpixel / offset)
global min_value_old
global max_value_old
if max_value != max_value_old or min_value != min_value_old or rewrite is True:
if DEBUG: cbpi.app.logger.info('NextionDisplay - rewrite')
NextionwriteClear(ser, 1, 0) # BrewTemp
NextionwriteClear(ser, 1, 1) # BrewTemp try to adjust thickness of line
NextionwriteClear(ser, 1, 2) # TargetTemp
NextionwriteClear(ser, 1, 3) # TargetTemp try to adjust thickness of line
i = 0
Nextion_ref_wave(ser, "ref_stop")
while i < len(liste):
if DEBUG: cbpi.app.logger.info('NextionDisplay - liste:%s' % (liste[i]))
digit = (round(float((liste[i] - min_value) * factor), 2))
digit2 = digit + 1 # try to adjust thickness of line
string = (str(round(float(digit)))[:-2])
string2 = (str(round(float(digit2)))[:-2]) # try to adjust thickness of line
NextionwriteWave(ser, 1, 0, string)
if dubbleline: NextionwriteWave(ser, 1, 1, string2) # try to adjust thickness of line
if DEBUG: cbpi.app.logger.info('NextionDisplay - dubbleline rewrite: %s' % dubbleline)
# targettemp
# if DEBUG: cbpi.app.logger.info('NextionDisplay - listetarget:%s' % (listetarget[i]))
target = (round(float((listetarget[i] - min_value) * factor), 2))
target2 = target + 1
tstring = (str(round(float(target)))[:-2])
tstring2 = (str(round(float(target2)))[:-2])
if 0 < target < xpixel: # do not write target line if not in temp/screen range
NextionwriteWave(ser, 1, 2, tstring)
if dubbleline: NextionwriteWave(ser, 1, 3, tstring2)
if DEBUG: cbpi.app.logger.info(
'NextionDisplay - listetarget[i], target, tstring: %s, %s, %s' % (listetarget[i], target, tstring))
pass
if DEBUG: cbpi.app.logger.info('NextionDisplay - liste(i), digit, string: %s, %s, %s' % (liste[i], digit, string))
i += 1
Nextion_ref_wave(ser, "ref_star")
else:
digit = (round(float((currenttemp - min_value) * factor), 2))
digit2 = digit+1 # try to adjust thickness of line
string = (str(round(float(digit)))[:-2])
string2 = (str(round(float(digit2)))[:-2]) # try to adjust thickness of line
NextionwriteWave(ser, 1, 0, string)
if dubbleline: NextionwriteWave(ser, 1, 1, string2) # try to adjust thickness of line
if DEBUG: cbpi.app.logger.info('NextionDisplay - currenttemp, digit, string: %s, %s, %s' % (currenttemp, digit, string))
# target Temp
target = (round(float((targettemp - min_value) * factor), 2))
target2 = target + 1
tstring = (str(round(float(target)))[:-2])
tstring2 = (str(round(float(target2)))[:-2]) # try to adjust thickness of line
if 0 < target < xpixel: # do not write target line if not in temp/ screen range
NextionwriteWave(ser, 1, 2, tstring)
if dubbleline: NextionwriteWave(ser, 1, 3, tstring2) # try to adjust thickness of line
if DEBUG: cbpi.app.logger.info(
'NextionDisplay - targettemp, target, tstring: %s, %s, %s' % (targettemp, target, tstring))
else:
pass
pass
# if DEBUG: cbpi.app.logger.info('NextionDisplay - max and min value: %s, %s' % (max_value, min_value))
global max_value_old
max_value_old = max_value
global min_value_old
min_value_old = min_value
return None
def writefermwave(ser, fermid=1, erase=False, frewrite=False, dubbleline=True):
unit = set_nextion_unit()
# Current Temperature in text field
cfermtemp = currentfermtemp(fermid)
TextDigitTxt0 = ("%6.2f%s" % (cfermtemp, ("°"+str(unit))))
textCurrTemp0 = str(TextDigitTxt0)
NextionwriteString(ser, "CurFermTmpTxt", textCurrTemp0)
# Target Temp in Text Field
tfermtemp = targetfermtemp(fermid)
TextDigitTxt1 = ("%6.2f%s" % (tfermtemp, ("°"+str(unit))))
textCurrTemp1 = str(TextDigitTxt1)
NextionwriteString(ser, "targFermTmpTxt", textCurrTemp1)
# Current Kettlename and beername in text field
fermn = ferm_name(fermid)
fbeername = ferm_beername(fermid)
displayfername = ("%s, %s" % (fermn, fbeername))
NextionwriteString(ser, "FermNameTxt", displayfername)
# rest name
fstepname = fermstepname(fermid)
NextionwriteString(ser, "FermStepName", fstepname)
# remaining time of step
fermtime_remaining(ser, fermid)
# build liste of current temp values
if erase is True:
del FERMLISTE[:]
elif len(FERMLISTE) < 406: # the with of the wave object on Nextion if you change the wave-with this has to be
# adjusted
FERMLISTE.append(cfermtemp)
else:
del FERMLISTE[0]
FERMLISTE.append(cfermtemp)
if DEBUG: cbpi.app.logger.info('NextionDisplay - fermTempListe len(liste):%s' % (len(FERMLISTE)))
# build liste of current targettemp values len(listetarget) can be different to len(liste)
if erase is True:
del FERMLISTETARGET[:]
if len(FERMLISTETARGET) < 406: # the with of the wave object on
# Nextion if you change the wave with this has to be adjusted
FERMLISTETARGET.append(tfermtemp)
else:
del FERMLISTETARGET[0]
FERMLISTETARGET.append(tfermtemp)
# if DEBUG: cbpi.app.logger.info('NextionDisplay - targetListe len(FERMLISTETARGET):%s' % (len(FERMLISTETARGET)))
# min max labels for scale
fmax_value = round((max(FERMLISTE)+0.1), 1)
fmin_value = round((min(FERMLISTE)-0.1), 1)
NextionwriteString(ser, "tfermmax", "%s%s" % (fmax_value, ("°"+str(unit))))
NextionwriteString(ser, "tfermmin", "%s%s" % (fmin_value, ("°"+str(unit))))
NextionwriteString(ser, "tfermavarage", "%s%s" % (round(((fmax_value+fmin_value)/2), 1), ("°"+str(unit))))
# get the scaling-factor
offset = (fmax_value - fmin_value)
xpixel = 202 # the height of the wave object on Nextion if you change the wave height this has to be adjusted
ffactor = (xpixel / offset)
global fmin_value_old
global fmax_value_old
if fmax_value != fmax_value_old or fmin_value != fmin_value_old or frewrite is True:
if DEBUG: cbpi.app.logger.info('NextionDisplay - fermenter rewrite')
NextionwriteClear(ser, 5, 0) # BrewTemp
NextionwriteClear(ser, 5, 1) # BrewTemp bold line
NextionwriteClear(ser, 5, 2) # TargetTemp
NextionwriteClear(ser, 5, 3) # TargetTemp bold line
i = 0
Nextion_ref_wave(ser, "ref_stop")
while i < len(FERMLISTE):
# if DEBUG: cbpi.app.logger.info('NextionDisplay - FERMLISTE:%s' % (FERMLISTE[i]))
digit = (round(float((FERMLISTE[i] - fmin_value) * ffactor), 2))
digit2 = digit + 1
string = (str(round(float(digit)))[:-2])
string2 = (str(round(float(digit2)))[:-2])
NextionwriteWave(ser, 5, 0, string)
if dubbleline: NextionwriteWave(ser, 5, 1, string2) # try to adjust thickness of line
if DEBUG: cbpi.app.logger.info('NextionDisplay - dubbleline fermenter rewrite: %s' % dubbleline)
# targettemp
# if DEBUG: cbpi.app.logger.info('NextionDisplay - FERMLISTETARGET:%s' % (FERMLISTETARGET[i]))
target = (round(float((FERMLISTETARGET[i] - fmin_value) * ffactor), 2))
targetbold = target + 1
tstring = (str(round(float(target)))[:-2])
tstringbold = (str(round(float(targetbold)))[:-2])
if 0 < target < xpixel: # do not write target line if not in temp/screen range
NextionwriteWave(ser, 5, 2, tstring)
if dubbleline: NextionwriteWave(ser, 5, 3, tstringbold) # try to adjust thickness of line
if DEBUG: cbpi.app.logger.info(
'NextionDisplay - fermlistetarget[i], target, tstring: %s, %s, %s' % (FERMLISTETARGET[i], target, tstring))
pass
if DEBUG: cbpi.app.logger.info('NextionDisplay - FERMLISTE(i), digit, string: %s, %s, %s' % (FERMLISTE[i], digit, string))
i += 1
Nextion_ref_wave(ser, "ref_star")
else:
digit = (round(float((cfermtemp - fmin_value) * ffactor), 2))
digitbold = digit + 1
if digit != 0: # prevent digit from becoming zero
string = (str(round(float(digit)))[:-2])
stringbold = (str(round(float(digitbold)))[:-2])
NextionwriteWave(ser, 5, 0, string)
if dubbleline: NextionwriteWave(ser, 5, 1, stringbold)
pass
if DEBUG: cbpi.app.logger.info('NextionDisplay - currentfermtemp, digit, string: %s, %s, %s' % (cfermtemp, digit, string))
# target Temp
target = (round(float((tfermtemp - fmin_value) * ffactor), 2))
targetbold = target + 1
tstring = (str(round(float(target)))[:-2])
tstringbold = (str(round(float(targetbold)))[:-2])
if 0 < target < xpixel: # do not write target line if not in temp/ screen range
NextionwriteWave(ser, 5, 2, tstring)
if dubbleline: NextionwriteWave(ser, 5, 3, tstringbold)
if DEBUG: cbpi.app.logger.info(
'NextionDisplay - targettemp, target, tstring: %s, %s, %s' % (tfermtemp, target, tstring))
else:
pass
pass
# if DEBUG: cbpi.app.logger.info('NextionDisplay - max and min value: %s, %s' % (max_value, min_value))
global fmax_value_old
fmax_value_old = fmax_value
global fmin_value_old
fmin_value_old = fmin_value
return None
def cbidecode(string):
# if DEBUG: cbpi.app.logger.info('NextionDisplay - string:%s' % string)
udata=string.encode("utf-8")
asciidata=udata.decode("utf-8", "ignore")
# if DEBUG: cbpi.app.logger.info('NextionDisplay - encoded_str:%s' % (asciidata))
return asciidata.encode("utf-8")
def time_remaining(ser):
s = cbpi.cache.get("active_step")
try:
if s.timer_end is not None:
time_remain = time.strftime("%H:%M:%S", time.gmtime(s.timer_end - time.time()))
if DEBUG: cbpi.app.logger.info('NextionDisplay - time_remaining:%s %s %s' % (s.timer_end, time.time(), s.timer))
NextionwriteString(ser, "remBrewTime", time_remain)
else:
NextionwriteString(ser, "remBrewTime", "")
pass
except:
NextionwriteString(ser, "remBrewTime", "")
pass
def time_remaining1(ser):
while True:
s = cbpi.cache.get("active_step")
try:
if s.timer_end is not None:
time_remain = time.strftime("%H:%M:%S", time.gmtime(s.timer_end - time.time()))
if DEBUG: cbpi.app.logger.info('NextionDisplay - time_remaining:%s %s %s' % (s.timer_end, time.time(), s.timer))
NextionwriteString(ser, "remBrewTime", time_remain)
else:
NextionwriteString(ser, "remBrewTime", "")
pass
except:
NextionwriteString(ser, "remBrewTime", "")
pass
pass
# todo: seems not to be used
def fermtime_remaining(ser, fermid):
try:
if cbpi.cache.get("fermenter_task")[(int(fermid))].timer_start is not None:
ftimeremain = (cbpi.cache.get("fermenter_task").get(fermid).timer_start - time.time())
ftimeremain = interval(ftimeremain)
# if DEBUG: cbpi.app.logger.info('NextionDisplay - fermstepname:%s' % ftimeremain)
NextionwriteString(ser, "remainfer", ftimeremain)
else:
NextionwriteString(ser, "remainfer", "")
pass
except:
NextionwriteString(ser, "remainfer", "")
def set_nextion_unit():
try:
nx_unit = cbpi.get_config_parameter("unit", None)
return nx_unit
except:
pass
pass
def fermstepname(fermid):
if is_fermenter_step_running() == "active":
try:
fstepname = cbpi.cache.get("fermenter_task")[(int(fermid))].name
fstepname = cbidecode(fstepname)
if DEBUG: cbpi.app.logger.info('NextionDisplay - fermstepname:%s' % fstepname)
return fstepname
except:
if DEBUG: cbpi.app.logger.info('NextionDisplay - fermstepname not runnig?')
return "error"
pass
else:
return "no active ferm step"
def is_fermenter_step_running():
for key, value2 in cbpi.cache["fermenter_task"].items():
if value2.state == "A":
return "active"
else:
pass
def currentfermtemp(fermid):
# read the current temperature of fermenter with fermenter ID from parameters
current_sensor_value_ferm = (cbpi.get_sensor_value(int(cbpi.cache.get("fermenter").get(fermid).sensor)))
if DEBUG: cbpi.app.logger.info('NextionDisplay - currentfermtemp.txt:%s' % current_sensor_value_ferm)
return current_sensor_value_ferm
def targetfermtemp(fermid):
# cbpi.app.logger.info("NEXTIONDisplay - Target Temp detect")
current_sensor_value_temptargid = cbpi.cache.get("fermenter")[(int(fermid))].target_temp
# targfermTemp = ("%6.2f" % (float(current_sensor_value_temptargid)))
if DEBUG: cbpi.app.logger.info("NEXTIONDisplay - TargfermTemp: %s" % current_sensor_value_temptargid)
return current_sensor_value_temptargid
def currenttemp_float(kettleID):
temp = float(Temp(kettleID))
return temp
def targettemp_float(kettleID):
targettemp = float(TempTargTemp(kettleID))
return targettemp
def heater_status(kettleid):
heater_of_kettle = int(cbpi.cache.get("kettle").get(kettleid).heater)
heater_status_of_kettle = int(cbpi.cache.get("actors").get(heater_of_kettle).state)
if heater_status_of_kettle == 1:
return "on"
else:
return "off"
pass
def ferm_name(fermid):
try:
fname = ('%s' % cbpi.cache.get("fermenter").get(fermid).name)
fname = cbidecode(fname)
# if DEBUG: cbpi.app.logger.info('NextionDisplay - ferm_name:%s' % fname)
return fname
except:
return "not found"
pass
def ferm_beername(fermid):
try:
beername = cbpi.cache.get("fermenter").get(fermid).brewname
beername = cbidecode(beername)
# if DEBUG: cbpi.app.logger.info('NextionDisplay - ferm_beername:%s' % beername)
return beername
except:
return "not found"
pass
def kettlename(kettleID):
brewkettlename = ('%s' % cbpi.cache.get("kettle").get(int(kettleID)).name)
# if DEBUG: cbpi.app.logger.info('NextionDisplay - KettleNameTxt.txt:%s' % (brewkettlename))
brewkettlename = cbidecode(brewkettlename)
# if DEBUG: cbpi.app.logger.info('NextionDisplay - decodeKettleNameTxt.txt:%s' % (brewkettlename))
return brewkettlename
def restname():
s = cbpi.cache.get("active_step")
if s is not None:
try:
brewrestname = s.name
# if DEBUG: cbpi.app.logger.info('NextionDisplay - brewrestname:%s' % (brewrestname))
brewrestname = cbidecode(brewrestname)
return brewrestname
except:
pass
return "restart not detected"
else:
return "no active rest"
pass
def Temp(kkid):
# if DEBUG: cbpi.app.logger.info("NEXTIONDisplay - Temp detect")
current_sensor_value_id3 = (cbpi.get_sensor_value(int(cbpi.cache.get("kettle").get(int(kkid)).sensor)))
curTemp = ("%6.2f" % (float(current_sensor_value_id3)))
# if DEBUG: cbpi.app.logger.info("NEXTIONDisplay - Temp: %s" % (curTemp))
return curTemp
def TempTargTemp(temptargid):
# if DEBUG: cbpi.app.logger.info("NEXTIONDisplay - Target Temp detect")
current_sensor_value_temptargid = cbpi.cache.get("kettle")[(int(temptargid))].target_temp
targTemp = ("%6.2f" % (float(current_sensor_value_temptargid)))
# if DEBUG: cbpi.app.logger.info("NEXTIONDisplay - TargTemp: %s" % (targTemp))
return targTemp
def get_ip(interface):
ip_addr = "Not connected"
so = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
ip_addr = socket.inet_ntoa(fcntl.ioctl(so.fileno(), 0x8915, struct.pack('256s', interface[:15]))[20:24])
finally:
pass
return ip_addr
def set_ip():
if get_ip('wlan0') != 'Not connected':
ip = get_ip('wlan0')
elif get_ip('eth0') != 'Not connected':
ip = get_ip('eth0')
elif get_ip('enxb827eb488a6e') != 'Not connected':
ip = get_ip('enxb827eb488a6e')
else:
ip = 'Not connected'
pass
return ip
def get_version_fo(path):
version = ""
try:
if path is not "":
fo = open(path, "r")
else:
fo = open("/home/pi/craftbeerpi3/config/version.yaml", "r")
version = fo.read()
fo.close()
finally:
return version
def interval(seconds):
"""
gives back interval as tuple
@return: (weeks, days, hours, minutes, seconds)
formats string for fermtime_remaining
returns the formatted string for text-field
"""
WEEK = 60 * 60 * 24 * 7
DAY = 60 * 60 * 24
HOUR = 60 * 60
MINUTE = 60
weeks = seconds // WEEK
seconds = seconds % WEEK
days = seconds // DAY
seconds = seconds % DAY
hours = seconds // HOUR
seconds = seconds % HOUR
minutes = seconds // MINUTE
seconds = seconds % MINUTE
if weeks >= 1:
remaining_time = ("Week:%d Days:%d Hours:%02d:%02d" % (int(weeks), int(days), int(hours), int(minutes)))
return remaining_time
elif weeks == 0 and days >= 1:
remaining_time = ("Days:%d Hours:%02d:%02d:%02d" % (int(days), int(hours), int(minutes), int(seconds)))
return remaining_time
elif weeks == 0 and days == 0:
remaining_time = ("Hours:%02d:%02d:%02d" % (int(hours), int(minutes), int(seconds)))
return remaining_time
else:
pass
pass
def set_parameter_kettleID():
kettleid = cbpi.get_config_parameter("NEXTION_Kettle_ID", None)
if kettleid is None:
kettleid = 1
cbpi.add_config_parameter("NEXTION_Kettle_ID", 1, "kettle", "Select kettle (Number), NO! CBPi reboot required")
cbpi.app.logger.info("NEXTIONDisplay - KettleID added: %s" % kettleid)
return kettleid
def set_parameter_fermID():
fermid = cbpi.get_config_parameter("NEXTION_Fermenter_ID", None)
if fermid is None:
fermid = 1
cbpi.add_config_parameter("NEXTION_Fermenter_ID", 1, "number", "Select Fermenter (Number), NO! CBPi reboot required")
cbpi.app.logger.info("NEXTIONDisplay - FermenterID added: %s" % fermid)
return fermid
def set_serial_port():
port = cbpi.get_config_parameter("NEXTION_Serial_Port", None)
if port is None:
port = "/dev/ttyUSB0"
cbpi.add_config_parameter("NEXTION_Serial_Port", "/dev/ttyUSB0", "string",
"Select the Serial Port, Windows like COM1, Linux like dev/ttyS0,"
" /dev/ttyAM0, /dev/ttyUSB0, etc.")
cbpi.app.logger.info("TFTDisplay - NEXTION_Serial_Port added: %s" % port)
return port
def set_parameter_dubbleline(ser):
dubbleline = cbpi.get_config_parameter("NEXTION_bold_line", None)
if dubbleline is None:
dubbleline = "on"
cbpi.add_config_parameter("NEXTION_bold_line", "on", "select", "Select if chart is drawn in bold line or not, "
"CBPi reboot recommended", ["on", "off"])
cbpi.app.logger.info("TFTDisplay - NEXTION_bold_line added: %s" % dubbleline)
if dubbleline == "on":
return True
else:
NextionwriteClear(ser, 1, 1) # BrewTemp try to adjust thickness of line # brew
NextionwriteClear(ser, 1, 3) # BrewTemp try to adjust thickness of line # brew target
NextionwriteClear(ser, 5, 1) # BrewTemp try to adjust thickness of line # ferment
NextionwriteClear(ser, 5, 3) # BrewTemp try to adjust thickness of line # ferment target
return False
pass
def set_time(ser):
look_time = 1 # in seconds
while True:
timestr = ((strftime("%Y-%m-%d %H:%M:%S", time.localtime())).ljust(20))
NextionwriteString(ser, "t3start", timestr)
# if DEBUG: cbpi.app.logger.info("NextionDisplay - thread set_time " + timestr)
sleep(look_time) # showing time only every second <look_time>
def detect_touch(ser, kettleID, dubbleline):
look_touch = 1 # in seconds
while True:
touch = ser.read_until(TERMINATOR)
if len(touch) != 0:
istouch = hex(ord(touch[0]))
if istouch == "0x65":
cbpi.app.logger.info("NextionDisplay - touch: A button has been pushed %s" % istouch)
pageID_touch = hex(ord(touch[1]))
compID_touch = hex(ord(touch[2]))
event_touch = hex(ord(touch[3]))
cbpi.app.logger.info("NextionDisplay - page:%s, component:%s, event:%s" % (pageID_touch, compID_touch, event_touch))
# if pageID_touch == "0x1" and compID_touch == "0x10":
if (pageID_touch == "0x1" or pageID_touch == "0x5") and compID_touch == "0x5":
cbpi.app.logger.info("NextionDisplay - touch: Clearbutton of Brewpage pushed")
writewave(ser, kettleID, erase=True, dubbleline=dubbleline)
elif pageID_touch == "0x0" and compID_touch == "0x3":
cbpi.app.logger.info("NextionDisplay - touch: Brewpage button pushed")
if DEBUG: cbpi.app.logger.info("NextionDisplay - touch: dubbleline = %s" % dubbleline)
writewave(ser, kettleID, erase=False, rewrite=True, dubbleline=dubbleline)
elif (pageID_touch == "0x3" and compID_touch == "0x3") or (pageID_touch == "0x6" and compID_touch == "0x4"):
cbpi.app.logger.info("NextionDisplay - touch: Clearbutton of Fermpage pushed")
writefermwave(ser, erase=True)
elif pageID_touch == "0x0" and compID_touch == "0x5":
cbpi.app.logger.info("NextionDisplay - touch: Fermpage button pushed")
writefermwave(ser, erase=False, frewrite=True)
else:
pass
sleep(look_touch) # timeout the bigger the larger the chance of missing a push
@cbpi.initalizer(order=3150)
def initNextion(app):
port = set_serial_port()
time.sleep(3)
ser = serial.Serial(
port=port,
baudrate=38400,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=0.1
)
ser.reset_output_buffer()
try:
cbpi.app.logger.info("NEXTIONDisplay - NEXTION_KetteID: %s" % set_parameter_kettleID())
cbpi.app.logger.info("NEXTIONDisplay - NEXTION_Serial Port: %s" % port)
cbpi.app.logger.info("NEXTIONDisplay - NEXTION_Fermenter_ID: %s" % set_parameter_fermID())
cbpi.app.logger.info("NEXTIONDisplay - NEXTION_bold_line: %s" % set_parameter_dubbleline(ser))
except:
pass
# nx_setsys(ser, 'bauds', 38400) # already set in display
# nx_setsys(ser, 'bkcmd', 0) # already set in display
# Time as thread
t_timethread = threading.Thread(target=set_time, name='Time Display', args=(ser,))
t_timethread.start()
cbpi.app.logger.info("NEXTIONDisplay - NEXTION init passed")
# end of init
@cbpi.backgroundtask(key="Nextionjob", interval=6) # 4 = 27 min, 5 = 33.8 min, 6 = 40.6 min
def Nextionjob(api):
# This is the main job
kettleID = int(set_parameter_kettleID())
fermid = int(set_parameter_fermID())
dubbleline = set_parameter_dubbleline(ser)
ip = set_ip()
cbpi_version = "CBPi %s" % (get_version_fo(""))
# for any reason the first value will be dropped so this is just fake and does nothing
NextionwriteString(ser, "t1startfake", cbpi_version)
NextionwriteString(ser, "t1start", cbpi_version)
iptext = "IP: %s" % ip
NextionwriteString(ser, "t2start", iptext)
writing_multi_to_nextion(ser)
writewave(ser, kettleID, dubbleline=dubbleline)
writingDigittoNextion(ser, kettleID)
writefermwave(ser, fermid, dubbleline=dubbleline)
# THREAD - DETECT push buttons
threadnames = threading.enumerate()
# if DEBUG: cbpi.app.logger.info("NextionDisplay - names current thread %s" % threadnames)
threadnames = str(threadnames)
if "<Thread(read serial," in threadnames:
# if DEBUG: cbpi.app.logger.info("NextionDisplay - thread read serial detected")
pass
else:
t_serialread = threading.Thread(target=detect_touch, name='read serial', args=(ser, kettleID, dubbleline))
t_serialread.start()
if DEBUG: cbpi.app.logger.info("NextionDisplay - threads Thread started")
pass
|
parallel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Parallel """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import threading
import functools
import collections
def foreach(source, action, n_jobs=0):
"""
Parallel for
If n_jobs == 0, there is no limit to the number of jobs, i.e. a thread is
launched for each item in `source`.
If n_jobs == 1, this is equivalent to:
>>> for i in source:
>>> action(i)
"""
if not n_jobs:
_foreach_no_limit(source, action)
elif n_jobs == 1:
for item in source:
action(item)
else:
_foreach_with_limit(source, action, n_jobs)
def _foreach_worker(q, action):
""" Foreach Worker """
while True:
try:
item = q.popleft()
except IndexError:
break
action(item)
def _foreach_with_limit(source, action, n_jobs):
""" Foreach With Limit """
assert n_jobs >= 0
q = collections.deque()
for item in source:
q.append(item)
threads = [threading.Thread(target=functools.partial(_foreach_worker, q, action))
for _ in range(n_jobs)]
for t in threads:
t.start()
for t in threads:
t.join()
def _foreach_no_limit(source, action):
""" Foreach No Limit """
threads = [
threading.Thread(target=functools.partial(action, item))
for item in source
]
for t in threads:
t.start()
for t in threads:
t.join()
|
FTP_photogrammetry_Client.py
|
'''
*
* FTP Client to pull images from FTP server
* hosted on RPi for 3D image reconstruction.
*
* AUTHOR : Mohammad Odeh
* DATE WRITTEN : Aug. 10th, 2018 Year of Our Lord
* LAST CONTRIBUTION DATE : Aug. 14th, 2018 Year of Our Lord
*
'''
# Import modules
from time import sleep # Add delays and wait times
from threading import Thread # Use threads to free up main()
from subprocess import Popen # Run batch script from within Python
from ftplib import FTP # For file transfer
import paho.mqtt.client as mqtt # For general communications
# ************************************************************************
# ============================> DEFINE CLASS <============================
# ************************************************************************
class FTP_photogrammetery_Client( object ):
def __init__( self, MQTT_broker_ip, username, password ):
'''
Initialize class
'''
self.MQTT_topics = { "IP_addr": "ftp/IP_addr", # For IP address communications
"status" : "ftp/status" , # For handshakes communications
"images" : "ftp/images" , # For image name communications
"general": "ftp/general" } # For things that are not in any previous category
self.FTP_server_ip = None # FTP IP address placeholder
self.MQTT_client_setup( MQTT_broker_ip ) # Setup MQTT client
self.USER, self.PASS = username, password # FTP Username and Password
self.run() # Run program
# ------------------------------------------------------------------------
def MQTT_client_setup( self, addr ):
'''
Setup MQTT client
'''
# Error handling in case MQTT communcation setup fails (1/2)
try:
self.client = mqtt.Client( client_id="Client", # Initialize MQTT client object
clean_session=True ) # ...
self.client.max_inflight_messages_set( 60 ) # Max number of messages that can be part of network flow at once
self.client.max_queued_messages_set( 0 ) # Size 0 == unlimited
self.client.will_set( self.MQTT_topics[ "status" ], # "Last Will" message. Sent when connection is
"CONERR_CLNT", qos=1, retain=False ) # ...lost (aka, disconnect was not called)
self.client.reconnect_delay_set( min_delay=1, # Min/max wait time in case of reconnection
max_delay=2 ) # ...
self.client.on_connect = self.on_connect # Assign callback functions
self.client.on_message = self.on_message # ...
self.client.connect( addr, port=1883, keepalive=60 ) # Connect to MQTT network
self.t_client_loop=Thread(target=self.client_loop, args=()) # Start threaded MQTT data processing loop()
self.t_client_loop.deamon = True # Allow program to shutdown even if thread is running
self.t_client_loop.start() # ...
sleep( 0.5 ) # Allow some time for connection to be established
# Error handling in case MQTT communcation setup fails (2/2)
except Exception as e:
print( "Could NOT setup MQTT communications" ) # Indicate error type and arguments
print( "Error Type : {}".format(type(e))) # ...
print( "Error Arguments : {}".format(e.args) ) # ...
sleep( 1.0 )
quit() # Shutdown entire program
# ------------------------------------------------------------------------
def on_connect( self, client, userdata, flags, rc ):
'''
Callback function for when connection is established/attempted.
Prints connection status and subscribes to ftp/# topic on
successful connection.
'''
if ( rc == 0 ): # Upon successful connection
print( "MQTT Connection Successful" ) # Subscribe to topic of choice
self.client.subscribe( "ftp/#", qos=1 ) # ...
elif( rc == 1 ): # Otherwise if connection failed
print( "Connection Refused - Incorrect Protocol Version" ) # Troubleshoot
elif( rc == 2 ): # Same ^
print( "Connection Refused - Invalid Client Identifier" ) # ...
elif( rc == 3 ):
print( "Connection Refused - Server Unavailable" )
elif( rc == 4 ):
print( "Connection Refused - Bad Username or Password" )
elif( rc == 5 ):
print( "Connection Refused - Not Authorized" )
else:
print( "Troubleshoot RPi - Result Code {}".format(rc) )
print( "Terminating Program" )
quit()
# ------------------------------------------------------------------------
def on_message( self, client, userdata, msg ):
'''
Callback function for when a message is received.
'''
if( msg.topic == self.MQTT_topics[ "IP_addr" ] ): # If we receive something on the IP topic
inData = msg.payload.decode( "utf-8" ) # Decode payload
if( len(inData) < 4 ): pass # Check it is a valid IP address
else: # If IP address is valid
self.FTP_server_ip = inData # Store IP address
self.client.publish( self.MQTT_topics[ "status" ], # Send SOH to indicate that we are ready
"SOH", qos=1, retain=False ) # ...
print( "Using IP: {}\n".format(self.FTP_server_ip) ) # [INFO] ...
elif( msg.topic == self.MQTT_topics[ "images" ] ): # If we receive something on the images topic
img_name = msg.payload.decode( "utf-8" ) # Decode image name
if( img_name == '' ): pass # If empty string (used to clear retained messages), pass
else: self.get_file( img_name ) # Else, retrieve it from FTP folder on server
elif( msg.topic == self.MQTT_topics[ "status" ] ): # If we receive something on the status topic
status = msg.payload.decode( "utf-8" ) # Decode it and determine next action
if( status == "EOT" ): # If end of transmission is indicated
print( "Disconnectiong MQTT" ) , # [INFO] ...
self.client.publish( self.MQTT_topics[ "status" ], # Send EOT to inform server to
"EOT", qos=1, retain=False ) # ...shuwtdown MQTT client as
self.loop = False # Set loop flag to FALSE
sleep( 0.10 ) # Allow time for state of flag to change
self.client.disconnect() # Disconnect MQTT client
print( "...DONE!" ) # [INFO] ...
else : pass
else: pass
# ------------------------------------------------------------------------
def client_loop( self ):
'''
A simple, basic workaround for the MQTT's library stupid
threaded implementation of loop() that doesn't really work.
'''
self.loop = True # Boolean loop flag
while( self.loop ): # Loop 43va while loop flag is TRUE
self.client.loop( timeout=1.0 ) # Pool messages queue for new data
# ------------------------------------------------------------------------
def get_file( self, file_name ):
'''
Get file from FTP directory
INPUT:
- file_name: Name of file we want to get
'''
print( "Retrieving {}".format(file_name) ) , # [INFO] ...
localfile = r".\imgs\{}".format(file_name) # Specify image storage path
with open( localfile, 'wb' ) as f: # Open file for writing
self.ftp.retrbinary( "RETR "+file_name, # Retrieve file from FTP directory and copy
f.write, 2048 ) # contents to localfile at 2048 bytes chunks
print( "...DONE!" ) # [INFO] ...
# ------------------------------------------------------------------------
def run( self ):
'''
Main thread
'''
sleep( 0.5 ) # Sleep for stability
print( "Client Initialized" ) # [INFO] ...
print( "Waiting for FTP IP address" ) # [INFO] ...
cntr = 0 # Counter for displaying "waiting" dots
while( self.FTP_server_ip is None ): # Wait until we get a vlid IP address
sleep( 1 ) # ...
print( '.' ) , # ...
cntr += 1 # ...
if( cntr == 15 ): # If we already printed 15 dots
cntr = 0 # Reset counter
print( '' ) # Start a new line
if( cntr is not 0 ): print( '' ) # Start a new line
print( "Client Ready\n" ) # [INFO] ...
self.ftp = FTP( self.FTP_server_ip ) # Connect to host using default port
self.ftp.login( FTP_USER, FTP_PASS ) # Login as a known user (NOT anonymous user)
## self.ftp.cwd( "/home/pi/FTP/" ) # Change current working directory to FTP directory (Raspbian)
self.ftp.cwd( "./Pictures/" ) # Change current working directory to FTP directory (DietPi)
## self.ftp.retrlines( "LIST" ) # List contents of FTP directory (make sure things are working)
while( self.loop ): # While we are still receiving data (images)
sleep( 0.1 ) # Stay in loop to waste time
print( "Running VisualSFM" ) , # [INFO] ...
p = Popen( [r".\main.bat"] ) # Call batch file with VisualSFM commands
stdout, stderr = p.communicate() # ...
print( "...DONE!" ) # {INFO] ...
# ************************************************************************
# ===========================> SETUP PROGRAM <===========================
# ************************************************************************
MQTT_IP_ADDRESS = "192.168.42.1" # IP address for MQTT broker
##FTP_USER, FTP_PASS = "pi", "raspberry" # FTP login credentials (Raspbian)
FTP_USER, FTP_PASS = "dietpi", "dietpi" # FTP login credentials (DietPi)
prog = FTP_photogrammetery_Client( MQTT_IP_ADDRESS, FTP_USER, FTP_PASS )# Start program
|
fifo.py
|
import threading
import time
mutex = threading.Semaphore(1)
procesos = [['A', 'B', 'C', 'D', 'E'], [3, 5, 2, 5, 5], [0, 1, 3, 9, 12], [0, 0, 0, 0, 0]]
resultados = [['T', 'E', 'P'], [0, 0, 0], [0, 0, 0]]
tiempo_espera = procesos[2][0]
tiempo_total = 0
def proceso(proc_actual):
global procesos
actual = procesos[0][proc_actual]
print "Ah llegado el proceso %s" %actual
dura_proceso(proc_actual)
def dura_proceso(proc_actual):
global procesos
actual = procesos[0][proc_actual]
tiempo_eje = procesos[1][proc_actual]
mutex.acquire()
tiempo_de_espera(proc_actual)
print "Inicia ejecucion proceso %s" %actual
time.sleep(tiempo_eje)
print "Termina ejecuacion proceso %s" %actual
mutex.release()
def lanza_proceso(proc_actual):
global procesos
tiempo_actual = procesos[2][proc_actual]
time.sleep(tiempo_actual)
threading.Thread(target=proceso, args=[proc_actual]).start()
def tiempo_de_espera(proc_actual):
global tiempo_espera
global tiempo_total
tiempo_espera = tiempo_espera - procesos[2][proc_actual]
print "El proceso %s espero %d segundos" %(procesos[0][proc_actual], tiempo_espera)
procesos[3][proc_actual] = tiempo_espera
tiempo_espera = tiempo_espera + procesos[2][proc_actual]
tiempo_espera = tiempo_espera + procesos[1][proc_actual]
for i in range(len(procesos[0])):
threading.Thread(target=lanza_proceso, args=[i]).start()
time.sleep(40)
def promedio():
prom = 0
num_max = len(procesos[0])
for i in range (num_max):
prom = prom + procesos[3][i]
resultados[1][1] = prom
prom = (float(prom) / 5.0)
resultados[2][1] = prom
def tiempo():
prom = 0
num_max = len(procesos[0])
for i in range (num_max):
prom = prom + procesos[1][i]
resultados[1][0] = prom + resultados[1][1]
prom = prom + resultados[1][1]
prom = (float(prom) / 5.0)
resultados[2][0] = prom
def penalizacion():
prom = 0
num_max = len(procesos[0])
for i in range (num_max):
a = procesos[1][i] + procesos[3][i]
a = (float(a) / procesos[1][i])
prom = prom + a
resultados[1][2] = "{0:.2f}".format(prom)
prom = (float(prom) / 5.0)
resultados[2][2] = "{0:.2f}".format(prom)
promedio()
tiempo()
penalizacion()
print (resultados)
|
threads.py
|
# Sleeping doesn't block one complete process, it blocks one thread. By the POSIX/Linux/UNIX
# definitions a process (for instance one python program) may include multiple threads which all
# share the same memory, but behave like other processes would in all other ways.
# This means processes are more seperated than threads are.
# Pythons threading module includes way to run a function on another thread
from threading import Thread
import time
# Define our function to be run by the threads
def sleep_print(s):
time.sleep(s)
print(s)
threads = []
for i in range(0, 4):
# Create the thread. This thread will run target with args like target(args)
th = Thread(target=sleep_print, args=(i,))
# Actually start the thread
th.start()
# append the thread to the threads list
threads.append(th)
# Wait for all threads to finish
for th in threads:
th.join()
# TODO(side note about concurrency vs parallelism, thread types and threading models)
|
robotLibrary.py
|
# Copyright 2020 Jan Feitsma (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/env python3
#
# Python library for on-robot commands.
import sys, os, signal
import traceback
import logging
import readline
from time import sleep
import threading
try:
import EnvironmentField
except:
print("warning: EnvironmentField is not yet compatible with python3, joystick functionality won't work")
import robotControlInterface
# globals
logging.basicConfig(level=logging.INFO)
def printHelp():
print(helpText())
def helpText():
return """
most commands are blocking, which means you cannot type anything until it finished
(TODO: implement ctrl-C to interrupt such blocking commands and return back to the prompt)
COMMANDS:
h / help / ? : show this help
q / quit / shutdown : exit program
ctrl-C : (TODO) interrupt running command
stop : stop whatever robot is doing
bh on|off : enable/disable ball handlers
velocity vx vy vphi s : give a relative velocity setpoint (vx,vy,vphi) for s seconds
move x y phi : move robot to (x,y,phi) using motionPlanning
getBall : get the ball
kick power height : (TODO) kick ball with given power (30..180) and optional height (0..180)
pass x y : pass to (x,y)
shoot x y [z] : straight shot at (x,y,z)
lob x y [z] : lob shot at (x,y,z)
keeper : start acting as keeper
behavior name : perform a behavior
"""
class RobotLibrary():
def __init__(self, robotId, joystick=True):
"""
Setup the robot interface.
"""
self._robotId = robotId
self._rci = robotControlInterface.RobotControlInterface(robotId)
self._rci.connect()
self._shutdown = False
# Initialize joystickThread
# daemon=True means thread is killed during shutdown of main thread, and not block main thread from shutdown
if joystick:
self._joystickThreadIsAlive = True
self._joystickThread = threading.Thread(target=self._joystickThreadExecutor)
self._joystickThread.setDaemon(True)
self._joystickThread.start()
else:
self._joystickThreadIsAlive = False
self._joystickThread = None
self.initializeLogger()
def initializeLogger(self):
"""
Setup the logging environment
"""
log = logging.getLogger() # root logger
log.setLevel(logging.INFO)
format_str = '%(asctime)s.%(msecs)03d - %(levelname)-8s - %(message)s'
date_format = '%Y-%m-%dT%H:%M:%S'
formatter = logging.Formatter(format_str, date_format)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
log.handlers = [] # clear
log.addHandler(stream_handler)
return logging.getLogger(__name__)
def shutdown(self):
"""
Cleanup such that robot is in a good state to play (refbox).
"""
self._joystickThreadIsAlive = False
# make all threads stop, see e.g. ballMonitor
self._shutdown = True
sleep(0.1)
self._rci.disconnect()
def prompt(self):
"""
Provide a prompt to users, so they can enter commands.
"""
logging.info("(enter h for help)")
while True:
try:
# TODO: prepend with timestamp, nicely align with logging?
s = input("enter command: ")
except:
logging.error("error while reading input")
break
if s in ["q", "quit", "shutdown"]:
break
try:
self.parse(s)
except:
logging.warning("something went wrong while evaluating '%s'" % (s))
traceback.print_exc(file=sys.stdout)
logging.info("shutting down ...")
self.shutdown()
def stop(self):
self._rci.setMotionPlanningAction("STOP", 0.0, 0.0, 0.0, "NORMAL", False)
self._rci.clearStimulation()
self._rci.setRobotVelocity(0.0, 0.0, 0.0)
def enableBallHandlers(self):
self._rci.setBallHandlers(True)
def disableBallHandlers(self):
self._rci.setBallHandlers(False)
def velocitySetpoint(self, vx, vy, vphi, duration):
self._rci.setRobotPosVel("VEL_ONLY", 0.0, 0.0, 0.0, vx, vy, vphi, "NORMAL")
# next line was old style, before VC was split out of PP, so no acc limiter:
#self._rci.setRobotVelocity(0.0, 0.0, 0.0)
sleep(duration)
self.stop()
def move(self, x, y, phi, motionType="NORMAL"):
# TODO: is there still a use case to overrule (pp) tolerance limits?
# pathPlanning tolerances might be configured a bit too tight for smooth testing
bh = self._rci.getBallHandlersEnabled() # maintain current state
self._rci.setMotionPlanningAction("MOVE", x, y, phi, motionType, bh)
self._rci.blockUntilTPOverridePassedOrFailed()
self.stop()
def getBall(self):
self._rci.setTeamplayAction("GET_BALL", {})
self._rci.blockUntilTPOverridePassedOrFailed()
self.stop()
def interceptBall(self):
self._rci.setTeamplayAction("INTERCEPT_BALL", {})
self._rci.blockUntilTPOverridePassedOrFailed()
self.stop()
def passTo(self, x, y):
self._rci.setMotionPlanningAction("PASS", x, y, 0.0, "NORMAL", True)
self._rci.blockUntilTPOverridePassedOrFailed()
self.stop()
def shootAt(self, x, y, z):
self._rci.setMotionPlanningAction("SHOOT", x, y, z, "NORMAL", True)
self._rci.blockUntilTPOverridePassedOrFailed()
self.stop()
def lobShotAt(self, x, y, z):
self._rci.setMotionPlanningAction("LOB", x, y, z, "NORMAL", True)
self._rci.blockUntilTPOverridePassedOrFailed()
self.stop()
def kick(self, power, height=0.0):
self._rci.setMotionPlanningAction("KICK", power, height, 0.0, "NORMAL", True)
self._rci.blockUntilTPOverridePassedOrFailed()
self.stop()
def keeper(self):
self.behavior("B_GOALKEEPER")
def behavior(self, behaviorName, params={}):
self._rci.setTeamplayBehavior(args[0], params)
self._rci.blockUntilTPOverridePassedOrFailed()
self._rci.clearStimulation()
def parse(self, commandString):
"""
Parse given command string.
"""
logging.info("parsing command: '%s'" % (commandString))
# split string
words = commandString.split()
if len(words) == 0:
return
mode = words[0].lower()
args = words[1:]
# determine what to do, catch interrupts
try:
if mode in ['h', 'help', '?']:
printHelp()
elif mode in ['stop']:
self.stop()
elif mode in ['bh']:
if len(args) == 1:
if str(args[0]) == "on":
self.enableBallHandlers()
if str(args[0]) == "off":
self.disableBallHandlers()
elif mode in ['velocity']:
vx = float(args[0])
vy = float(args[1])
vphi = float(args[2])
duration = float(args[3])
self.velocitySetpoint(vx, vy, vphi, duration)
elif mode in ['move', 'target']:
x = float(args[0])
y = float(args[1])
# phi not required, default to zero
if len(args) > 2:
phi = float(args[2])
else:
phi = 0.0
self.move(x, y, phi)
elif mode in ['getball']:
if self._robotId == 1:
logging.info("skipping: r1 can not grab ball")
else:
self.getBall()
elif mode in ['pass']:
x = float(args[0])
y = float(args[1])
self.passTo(x, y)
elif mode in ['shoot']:
x = float(args[0])
y = float(args[1])
# z not required, default to zero
if len(args) > 2:
z = float(args[2])
else:
z = 0
self.shootAt(x, y, z)
elif mode in ['lob']:
x = float(args[0])
y = float(args[1])
# z not required, default to zero
if len(args) > 2:
z = float(args[2])
else:
z = 0
self.lobShotAt(x, y, z)
elif mode in ['kick']:
power = float(args[0])
height = 0
if len(args) > 1:
height = float(args[1])
self.kick(power, height)
elif mode in ['keeper']:
self.keeper()
elif mode in ['behavior']:
# allow parameters as key=value strings, for instance 'role=defenderMain'
params = {}
if len(args) > 1:
for s in args[1:]:
kv = s.split("=")
if len(kv) == 2:
params[kv[0]] = kv[1]
self.behavior(args[0], params)
elif mode in ['listscenarios']:
print("Loaded scenarios are:")
print(listScenarios())
print("\n To execute, type scenario <name>")
elif mode in ['scenario']:
# If no arguments given, prompt with a selection menu
if len(args) == 0:
# No argument given, prompt for the scenarios
scenarios = listScenarios()
scenarios.sort()
idx = 1
print("Scenarios available for execution:")
print("0: Cancel")
for scenario in scenarios:
print("%s: %s" % (idx, scenario))
idx += 1
userinput = input("Choose a scenario to run: ")
if isinstance( userinput, ( int, long ) ):
if int(userinput) > 0:
runScenario( scenarios[int(userinput) - 1] ) # TODO Add support for arguments when prompting selection menu
robot.restoreAllConfigs()
else:
# relay to scenario selector/executor
runScenario(*args)
# After running a scenario, restore all dynamic_reconfigure configs
# We do this to reset the state of the robot after every scenario
robot.restoreAllConfigs()
else:
logging.error("invalid or not-yet-implemented mode '%s'" % (mode))
"""TODO
except KeyboardInterruptException:
# probably a blocking call was interrupted, ok to continue (if prompted)
logging.warning("--interrupted--")
return
"""
except:
logging.warning("something went wrong while evaluating '%s'" % (commandString))
traceback.print_exc(file=sys.stdout)
###################
# Joystick Thread #
###################
# This thread is always running
def _joystickThreadExecutor(self):
while self._joystickThreadIsAlive:
item = self._rci._robotControl._rtdb2Store.get(0, "JOYSTICK_CONTROL_" + str(self._robotId), timeout=False) # robotId = 0 = coach
if item != None and item.age() < 1.0:
self._rci.setBallHandlers(item.value[2])
self._rci.setRobotVelocity(item.value[1][0], item.value[1][1], item.value[1][2])
if item.value[4] > 0:
self._rci.setMotionPlanningAction("KICK", item.value[4], item.value[3], 0.0, "NORMAL", True)
self._rci.blockUntilTPOverridePassedOrFailed()
self._rci.clearStimulation()
action = item.value[5] # blocking actions
if action == "getBall":
self._rci.setTeamplayAction("GET_BALL", {})
self._rci.blockUntilTPOverridePassedOrFailed()
self._rci.clearStimulation()
elif action == "passToTeamMember":
# TODO
#passToTeamMember()
pass
elif action == "shootAtGoal":
goalPos = EnvironmentField.cEnvironmentField.getInstance().getFieldPOI( EnvironmentField.P_OPP_GOALLINE_CENTER )
self._rci.setMotionPlanningAction("SHOOT", goalPos.x, goalPos.y, 0.7, "NORMAL", True)
self._rci.blockUntilTPOverridePassedOrFailed()
self._rci.clearStimulation()
elif item != None and item.age() >= 1.0:
# When the age > 1 second, clearStimulation and remove JOYSTICK_CONTROL such that the next tick the joystick code does not triggered (item == None)
self._rci.clearStimulation()
self._rci._robotControl._rtdb2Store.get_and_clear(0, "JOYSTICK_CONTROL_" + str(self._robotId)) # robotId = 0 = coach
# Sleep to maintain 30Hz
sleep(1.0 / 30.0)
|
batcher.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to process data into batches"""
import queue as Queue
from random import shuffle
from threading import Thread
import time
import numpy as np
import tensorflow as tf
import data
class Example(object):
"""Class representing a train/val/test example for text summarization."""
def __init__(self, article, abstract_sentences, vocab, hps):
"""Initializes the Example, performing tokenization and truncation to produce the encoder, decoder and target sequences, which are stored in self.
Args:
article: source text; a string. each token is separated by a single space.
abstract_sentences: list of strings, one per abstract sentence. In each sentence, each token is separated by a single space.
vocab: Vocabulary object
hps: hyperparameters
"""
self.hps = hps
# Get ids of special tokens
start_decoding = vocab.word2id(data.START_DECODING)
stop_decoding = vocab.word2id(data.STOP_DECODING)
# Process the article
article_words = article.split()
if len(article_words) > hps.max_enc_steps:
article_words = article_words[:hps.max_enc_steps]
self.enc_len = len(article_words) # store the length after truncation but before padding
self.enc_input = [vocab.word2id(w) for w in article_words] # list of word ids; OOVs are represented by the id for UNK token
# Process the abstract
abstract = ' '.join(abstract_sentences) # string
abstract_words = abstract.split() # list of strings
abs_ids = [vocab.word2id(w) for w in abstract_words] # list of word ids; OOVs are represented by the id for UNK token
# Get the decoder input sequence and target sequence
self.dec_input, self.target = self.get_dec_inp_targ_seqs(abs_ids, hps.max_dec_steps, start_decoding, stop_decoding)
self.dec_len = len(self.dec_input)
# If using pointer-generator mode, we need to store some extra info
if hps.pointer_gen:
# Store a version of the enc_input where in-article OOVs are represented by their temporary OOV id; also store the in-article OOVs words themselves
self.enc_input_extend_vocab, self.article_oovs = data.article2ids(article_words, vocab)
# Get a verison of the reference summary where in-article OOVs are represented by their temporary article OOV id
abs_ids_extend_vocab = data.abstract2ids(abstract_words, vocab, self.article_oovs)
# Overwrite decoder target sequence so it uses the temp article OOV ids
_, self.target = self.get_dec_inp_targ_seqs(abs_ids_extend_vocab, hps.max_dec_steps, start_decoding, stop_decoding)
# Store the original strings
self.original_article = article
self.original_abstract = abstract
self.original_abstract_sents = abstract_sentences
def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):
"""Given the reference summary as a sequence of tokens, return the input sequence for the decoder, and the target sequence which we will use to calculate loss. The sequence will be truncated if it is longer than max_len. The input sequence must start with the start_id and the target sequence must end with the stop_id (but not if it's been truncated).
Args:
sequence: List of ids (integers)
max_len: integer
start_id: integer
stop_id: integer
Returns:
inp: sequence length <=max_len starting with start_id
target: sequence same length as input, ending with stop_id only if there was no truncation
"""
inp = [start_id] + sequence[:]
target = sequence[:]
if len(inp) > max_len: # truncate
inp = inp[:max_len]
target = target[:max_len] # no end_token
else: # no truncation
target.append(stop_id) # end token
assert len(inp) == len(target)
return inp, target
def pad_decoder_inp_targ(self, max_len, pad_id):
"""Pad decoder input and target sequences with pad_id up to max_len."""
while len(self.dec_input) < max_len:
self.dec_input.append(pad_id)
while len(self.target) < max_len:
self.target.append(pad_id)
def pad_encoder_input(self, max_len, pad_id):
"""Pad the encoder input sequence with pad_id up to max_len."""
while len(self.enc_input) < max_len:
self.enc_input.append(pad_id)
if self.hps.pointer_gen:
while len(self.enc_input_extend_vocab) < max_len:
self.enc_input_extend_vocab.append(pad_id)
class Batch(object):
"""Class representing a minibatch of train/val/test examples for text summarization."""
def __init__(self, example_list, hps, vocab):
"""Turns the example_list into a Batch object.
Args:
example_list: List of Example objects
hps: hyperparameters
vocab: Vocabulary object
"""
self.pad_id = vocab.word2id(data.PAD_TOKEN) # id of the PAD token used to pad sequences
self.init_encoder_seq(example_list, hps) # initialize the input to the encoder
self.init_decoder_seq(example_list, hps) # initialize the input and targets for the decoder
self.store_orig_strings(example_list) # store the original strings
def init_encoder_seq(self, example_list, hps):
"""Initializes the following:
self.enc_batch:
numpy array of shape (batch_size, <=max_enc_steps) containing integer ids (all OOVs represented by UNK id), padded to length of longest sequence in the batch
self.enc_lens:
numpy array of shape (batch_size) containing integers. The (truncated) length of each encoder input sequence (pre-padding).
self.enc_padding_mask:
numpy array of shape (batch_size, <=max_enc_steps), containing 1s and 0s. 1s correspond to real tokens in enc_batch and target_batch; 0s correspond to padding.
If hps.pointer_gen, additionally initializes the following:
self.max_art_oovs:
maximum number of in-article OOVs in the batch
self.art_oovs:
list of list of in-article OOVs (strings), for each example in the batch
self.enc_batch_extend_vocab:
Same as self.enc_batch, but in-article OOVs are represented by their temporary article OOV number.
"""
# Determine the maximum length of the encoder input sequence in this batch
max_enc_seq_len = max([ex.enc_len for ex in example_list])
# Pad the encoder input sequences up to the length of the longest sequence
for ex in example_list:
ex.pad_encoder_input(max_enc_seq_len, self.pad_id)
# Initialize the numpy arrays
# Note: our enc_batch can have different length (second dimension) for each batch because we use dynamic_rnn for the encoder.
self.enc_batch = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.int32)
self.enc_lens = np.zeros((hps.batch_size), dtype=np.int32)
self.enc_padding_mask = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.enc_batch[i, :] = ex.enc_input[:]
self.enc_lens[i] = ex.enc_len
for j in range(ex.enc_len):
self.enc_padding_mask[i][j] = 1
# For pointer-generator mode, need to store some extra info
if hps.pointer_gen:
# Determine the max number of in-article OOVs in this batch
self.max_art_oovs = max([len(ex.article_oovs) for ex in example_list])
# Store the in-article OOVs themselves
self.art_oovs = [ex.article_oovs for ex in example_list]
# Store the version of the enc_batch that uses the article OOV ids
self.enc_batch_extend_vocab = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.int32)
for i, ex in enumerate(example_list):
self.enc_batch_extend_vocab[i, :] = ex.enc_input_extend_vocab[:]
def init_decoder_seq(self, example_list, hps):
"""Initializes the following:
self.dec_batch:
numpy array of shape (batch_size, max_dec_steps), containing integer ids as input for the decoder, padded to max_dec_steps length.
self.target_batch:
numpy array of shape (batch_size, max_dec_steps), containing integer ids for the target sequence, padded to max_dec_steps length.
self.dec_padding_mask:
numpy array of shape (batch_size, max_dec_steps), containing 1s and 0s. 1s correspond to real tokens in dec_batch and target_batch; 0s correspond to padding.
"""
# Pad the inputs and targets
for ex in example_list:
ex.pad_decoder_inp_targ(hps.max_dec_steps, self.pad_id)
# Initialize the numpy arrays.
# Note: our decoder inputs and targets must be the same length for each batch (second dimension = max_dec_steps) because we do not use a dynamic_rnn for decoding. However I believe this is possible, or will soon be possible, with Tensorflow 1.0, in which case it may be best to upgrade to that.
self.dec_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)
self.target_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)
self.dec_padding_mask = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.dec_batch[i, :] = ex.dec_input[:]
self.target_batch[i, :] = ex.target[:]
for j in range(ex.dec_len):
self.dec_padding_mask[i][j] = 1
def store_orig_strings(self, example_list):
"""Store the original article and abstract strings in the Batch object"""
self.original_articles = [ex.original_article for ex in example_list] # list of lists
self.original_abstracts = [ex.original_abstract for ex in example_list] # list of lists
self.original_abstracts_sents = [ex.original_abstract_sents for ex in example_list] # list of list of lists
class Batcher(object):
"""A class to generate minibatches of data. Buckets examples together based on length of the encoder sequence."""
BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold
def __init__(self, data_path, vocab, hps, single_pass, abstract=None, article=None):
"""Initialize the batcher. Start threads that process the data into batches.
Args:
data_path: tf.Example filepattern.
vocab: Vocabulary object
hps: hyperparameters
single_pass: If True, run through the dataset exactly once (useful for when you want to run evaluation on the dev or test set). Otherwise generate random batches indefinitely (useful for training).
"""
self._data_path = data_path
self._vocab = vocab
self._hps = hps
self._single_pass = single_pass
self._abstract = abstract
self._article = article
# Initialize a queue of Batches waiting to be used, and a queue of Examples waiting to be batched
self._batch_queue = Queue.Queue(self.BATCH_QUEUE_MAX)
self._example_queue = Queue.Queue(self.BATCH_QUEUE_MAX * self._hps.batch_size)
# Different settings depending on whether we're in single_pass mode or not
if single_pass:
self._num_example_q_threads = 1 # just one thread, so we read through the dataset just once
self._num_batch_q_threads = 1 # just one thread to batch examples
self._bucketing_cache_size = 1 # only load one batch's worth of examples before bucketing; this essentially means no bucketing
self._finished_reading = False # this will tell us when we're finished reading the dataset
else:
self._num_example_q_threads = 16 # num threads to fill example queue
self._num_batch_q_threads = 4 # num threads to fill batch queue
self._bucketing_cache_size = 100 # how many batches-worth of examples to load into cache before bucketing
# Start the threads that load the queues
self._example_q_threads = []
for _ in range(self._num_example_q_threads):
self._example_q_threads.append(Thread(target=self.fill_example_queue))
self._example_q_threads[-1].daemon = True
self._example_q_threads[-1].start()
self._batch_q_threads = []
for _ in range(self._num_batch_q_threads):
self._batch_q_threads.append(Thread(target=self.fill_batch_queue))
self._batch_q_threads[-1].daemon = True
self._batch_q_threads[-1].start()
# Start a thread that watches the other threads and restarts them if they're dead
if not single_pass: # We don't want a watcher in single_pass mode because the threads shouldn't run forever
self._watch_thread = Thread(target=self.watch_threads)
self._watch_thread.daemon = True
self._watch_thread.start()
def next_batch(self):
"""Return a Batch from the batch queue.
If mode='decode' then each batch contains a single example repeated beam_size-many times; this is necessary for beam search.
Returns:
batch: a Batch object, or None if we're in single_pass mode and we've exhausted the dataset.
"""
# If the batch queue is empty, print a warning
if self._batch_queue.qsize() == 0:
tf.logging.warning('Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i', self._batch_queue.qsize(), self._example_queue.qsize())
if self._single_pass and self._finished_reading:
tf.logging.info("Finished reading dataset in single_pass mode.")
return None
batch = self._batch_queue.get() # get the next Batch
return batch
def fill_example_queue(self):
"""Reads data from file and processes into Examples which are then placed into the example queue."""
if self._abstract is not None and self._article is not None:
input_gen = self.text_generator(data.example_generator2(self._abstract, self._article))
else:
input_gen = self.text_generator(data.example_generator(self._data_path, self._single_pass))
while True:
try:
(article, abstract) = next(input_gen) # read the next example from file. article and abstract are both strings.
except StopIteration: # if there are no more examples:
tf.logging.info("The example generator for this example queue filling thread has exhausted data.")
if self._single_pass:
tf.logging.info("single_pass mode is on, so we've finished reading dataset. This thread is stopping.")
self._finished_reading = True
break
else:
raise Exception("single_pass mode is off but the example generator is out of data; error.")
abstract_sentences = [sent.strip() for sent in data.abstract2sents(abstract)] # Use the <s> and </s> tags in abstract to get a list of sentences.
example = Example(article, abstract_sentences, self._vocab, self._hps) # Process into an Example.
self._example_queue.put(example) # place the Example in the example queue.
def fill_batch_queue(self):
"""Takes Examples out of example queue, sorts them by encoder sequence length, processes into Batches and places them in the batch queue.
In decode mode, makes batches that each contain a single example repeated.
"""
while True:
if self._hps.mode != 'decode':
# Get bucketing_cache_size-many batches of Examples into a list, then sort
inputs = []
for _ in range(self._hps.batch_size * self._bucketing_cache_size):
inputs.append(self._example_queue.get())
inputs = sorted(inputs, key=lambda inp: inp.enc_len) # sort by length of encoder sequence
# Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.
batches = []
for i in range(0, len(inputs), self._hps.batch_size):
batches.append(inputs[i:i + self._hps.batch_size])
if not self._single_pass:
shuffle(batches)
for b in batches: # each b is a list of Example objects
self._batch_queue.put(Batch(b, self._hps, self._vocab))
else: # beam search decode mode
ex = self._example_queue.get()
b = [ex for _ in range(self._hps.batch_size)]
self._batch_queue.put(Batch(b, self._hps, self._vocab))
def watch_threads(self):
"""Watch example queue and batch queue threads and restart if dead."""
while True:
time.sleep(60)
for idx,t in enumerate(self._example_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found example queue thread dead. Restarting.')
new_t = Thread(target=self.fill_example_queue)
self._example_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
for idx,t in enumerate(self._batch_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found batch queue thread dead. Restarting.')
new_t = Thread(target=self.fill_batch_queue)
self._batch_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
def text_generator(self, example_generator):
"""Generates article and abstract text from tf.Example.
Args:
example_generator: a generator of tf.Examples from file. See data.example_generator"""
while True:
e = next(example_generator) # e is a tf.Example
try:
article_text = e.features.feature['article'].bytes_list.value[0].decode() # the article text was saved under the key 'article' in the data files
abstract_text = e.features.feature['abstract'].bytes_list.value[0].decode() # the abstract text was saved under the key 'abstract' in the data files
except ValueError:
tf.logging.error('Failed to get article or abstract from example')
continue
if len(article_text)==0: # See https://github.com/abisee/pointer-generator/issues/1
tf.logging.warning('Found an example with empty article text. Skipping it.')
else:
yield (article_text, abstract_text)
|
stats.py
|
#!/usr/bin/env python3
# coding=utf-8
#
# Python Script
#
# Copyleft © Manoel Vilela
#
#
import pandas as pd # sudo pip install pandas
import numpy as np # sudo pip install numpy
from distutils.spawn import find_executable
from optparse import OptionParser
from os import path
import os
import time
import itertools
import threading
import subprocess
import re
import sys
import hashlib
import fileinput
import signal
# #
# Bulding classes
# #
SOLUTION_TIMEOUT_VALUE = 60
class TimeOutController:
class TimeOut(Exception):
pass
def __init__(self, sec=SOLUTION_TIMEOUT_VALUE):
signal.signal(signal.SIGALRM, self.raise_timeout)
signal.alarm(sec)
def cancel(self):
signal.alarm(0) # disable alarm
def raise_timeout(self, a, n):
raise TimeOutController.TimeOut()
class Checker(object):
checked = []
def __init__(self, compiler, path):
self.compiler = compiler.split()
self.path = os.path.abspath(path)
self.check()
def check(self):
binary = self.compiler[0]
if binary not in self.checked and not find_executable(binary):
raise EnvironmentError("{!r} not found. Do you have the compilers?".format(binary)) # noqa
elif binary not in self.checked:
self.checked += binary
class Execute(Checker):
"""Interactive languages building"""
def enter_dir(self):
self.old_dir = os.getcwd()
os.chdir(path.dirname(self.path))
def exit_dir(self):
os.chdir(self.old_dir)
def execute(self):
self.enter_dir()
before = time.time()
args = self.compiler
args += [self.path]
try:
toc = TimeOutController()
program = subprocess.Popen(args, stdout=subprocess.PIPE)
out, _ = program.communicate()
except TimeOutController.TimeOut:
out = b"TIMEOUT"
program.kill()
finally:
toc.cancel()
time_passed = time.time() - before
self.exit_dir()
return out, program.returncode, time_passed
class Build(Checker):
"""For compiled languages: C++, C for example"""
fout = "compiled.out"
def compile(self):
args = [self.compiler[0], self.path, "-o", self.output] + self.compiler[1:]
program = subprocess.Popen(args, stdout=subprocess.PIPE)
return program.wait() == 0
def execute(self):
self.output = path.join(path.dirname(self.path), self.fout)
if self.compile():
compiled = path.abspath(self.output)
program = Execute("bash -c", "{}".format(compiled))
output = program.execute()
os.remove(compiled)
return output
return b"compiles fails", EnvironmentError, 0
ERASE_LINE = "\x1b[2K"
BUILD_SUPPORT = [
"Python", # you need python | pacman -Su python
"Go", # you need golang | pacman -Su golang
"Clojure", # you need clojure | pacman -Su clojure
"CommonLisp", # you need clisp | pacman -Su clisp
"Haskell", # you need ghc | pacman -Su ghc
"Lua", # you need lua | pacman -Su lua5.3
"Ruby", # you need ruby | pacman -Su ruby
"C", # you need gcc | pacman -Su gcc
"C++", # you need | pacman -Su g++
"Elixir", # you need elixir | pacman -Su elixir
"PHP", # you need php | pacman -Su php
# "Swift", # you need swift | yaourt -Su swift
# "Objective-C", # you need gcc-objc | pacman -Su gcc-objc
"Scheme",
"Racket",
"Bash", # hmm, i think you already have this
]
BUILD_FILES = ["stats.py", "stats.exs", "test", "add"]
BUILD_MACHINE = {
"Python": {
"cmdline": "python3",
"builder": Execute
},
"Go": {
"cmdline": "go run",
"builder": Execute
},
"Clojure": {
"cmdline": "clojure",
"builder": Execute
},
"CommonLisp": {
"cmdline": "sbcl --script",
"builder": Execute
},
"Racket": {
"cmdline": "racket --script",
"builder": Execute
},
"Scheme": {
"cmdline": "racket --script",
"builder": Execute
},
"Haskell": {
"cmdline": "runhaskell",
"builder": Execute
},
"C": {
"cmdline": "gcc -std=c99 -lm",
"builder": Build
},
"C++": {
"cmdline": "g++ -std=c++0x",
"builder": Build
},
"Lua": {
"cmdline": "lua",
"builder": Execute
},
"Ruby": {
"cmdline": "ruby",
"builder": Execute
},
"Bash": {
"cmdline": "bash",
"builder": Execute
},
"Elixir": {
"cmdline": "elixir",
"builder": Execute
},
"Objective-C": {
"cmdline": "gcc -Wall -lm -lobjc",
"builder": Build
},
"PHP": {
"cmdline": "php",
"builder": Execute
},
"Swift": {
"cmdline": "swift",
"builder": Execute
}
}
# CLI INTERFACE
# -l (list languages with solutions)
# -c (do count solutions)
# -p (print the path)
# -a all languages selected
# -s language (search)
# -b (build)
# Examples of usage:
# python stats.py --list
# python stats.py --list --count
# python stats.py --all --path
# python stats.py --all --count
# python stats.py -s Python -s Haskell -c
# #
# Cmdline parsing definitions
# #
def _callback(option, opt_str, value, parser):
"""
Used to parse several arguments for one option, knowing that arguments
never start with a `-` and `--`
"""
assert value is None
value = []
for arg in parser.rargs:
# stop on --foo like options
if arg[:2] == "--" and len(arg) > 2:
break
if arg[:1] == "-" and len(arg) > 1:
break
value.append(arg)
del parser.rargs[:len(value)]
setattr(parser.values, option.dest, value)
parser = OptionParser()
parser.add_option(
"-l", "--list",
help="Print a list of the languages whose have solutions",
dest="list",
action="store_true",
default=False,
)
parser.add_option(
"-s", "--search",
help="Choose the languages for print information",
dest="search",
action="append",
default=[],
nargs=1,
)
parser.add_option(
"-f", "--files",
help="Receive a list of file paths to build them",
dest="files",
action="callback",
callback=_callback,
)
parser.add_option(
"-c", "--count",
help="Print the count of each solution",
dest="count",
action="store_true",
default=False,
)
parser.add_option(
"-b", "--build",
help="Execute the solutions and print each solution",
dest="build",
action="store_true",
default=False,
)
parser.add_option(
"-p", "--path",
help="Print the path of each solution",
dest="path",
action="store_true",
default=False,
)
parser.add_option(
"-a", "--all",
help="Select all the languages for search",
dest="all",
action="store_true",
default=False,
)
parser.add_option(
"-m", "--blame",
help="Show the slowest solutions that needs help",
dest="blame",
action="store_true",
default=False,
)
parser.add_option(
"-g", "--graph",
help="Make a cool graph with the final DataFrame data",
dest="graph",
action="store_true",
default=False,
)
parser.usage = "%prog [-s language] [-al] [-cpb] [--blame] [--files] [-g]"
def walk_problems(root="."):
"""
Function: walk_problems
Summary: Walking for repository to get each content of ProblemXXX
Examples: Uniq behavior
Returns: list of 3-uples of strings <list ("1", "2", "3"), ...>
"""
problem = re.compile("./Problem[0-9]{3}/")
problems = []
for x in os.walk(root):
if problem.match(x[0]) and "pycache" not in x[0]:
problems.append(x)
return problems
def read_hashfile(fpath):
"""Read .hash based on fpath and clean the weird chars"""
return open(fpath).read().strip(' -\n')
def get_problem_hashes():
"""
Function: get_problem_hashes
Summary: Walking from each problem and return a tuple
(problem_name, hash_content)
Returns: list of tuples <problem_name: string, hash_content: string>
"""
hash_pattern = re.compile("./Problem[0-9]{3}")
hashes = {}
for file_tuple in os.walk("."):
if hash_pattern.match(file_tuple[0]) and ".hash" in file_tuple[-1]:
problem = file_tuple[0]
hash_path = path.join(problem, '.hash')
hash_content = read_hashfile(hash_path)
hashes[problem.strip('./')] = hash_content
return hashes
def digest_answer(answer):
clean_answer = answer.strip(' \n')
return hashlib.md5(clean_answer.encode('utf-8')).hexdigest()
def search_language(query, languages):
"""
Function: search_language
Summary: Search for languages based on regex
Examples:
>>> search_language(["C"], ["C", "C++", "Python"])
["C", "C++"]
Attributes:
@param (query): list of languages for search
@param (languages): collections of languages normalized
Returns: list of results as strings <list (string)>
"""
return set(query) & set(languages)
def split_problem_language(path):
"""
Function: split_problem_language
Summary: Get a path and split into problem and language
Examples:
>>> split_problem_language("./Problem001/Python")
["Problem001", "Python]
Attributes:
@param (path): path like ./Folder/Language
Returns: [Problem, Language] <(string, string)>
"""
return path.strip("./").split("/")
def is_solution(string):
solution = re.compile("solution_+(?!out)")
return solution.match(string)
def parse_solutions(problems):
"""
Function: parse_solutions
Summary: Organize the solutions of problems
Examples: <NONE>
Attributes:
@param (problems): os.walk functions output
Returns: problem:lang -> [solutions] <dict>
"""
map_solutions = {}
for problem_path, dirs, files in problems:
problem, lang = split_problem_language(problem_path)
map_solutions.setdefault(problem, {}).setdefault(lang, [])
for file in files:
if is_solution(file):
map_solutions[problem][lang].append(file)
return map_solutions
def load_dataframe():
"""
Function: load_dataframe
Summary: Load all solutions of repository at dataframe
Examples:
>>> df = load_dataframe()[]
>>> py = df["Python"]
Python
Problem001 [solution_1.py, solution_2.py]
Problem002 [solution_1.py]
Problem003 [solution_1.py]
Problem004 [solution_1.py]
If you observe: (index + column_name) <- list_solutions -> filepaths!
Returns: pd.DataFrame
"""
return pd.DataFrame.from_dict(parse_solutions(walk_problems()), "index")
def solutions_paths(df, from_files=None):
"""
Function: load_filepaths
Summary: Get each filepath of solutions based on pd.DataFrame
Examples:
>>> df = load_dataframe()
>>> py = df[["CommonLisp"]]
>>> load_filepaths(py)
["..."]
Attributes:
@param (df): pd.DataFrame
Returns: list of file paths
"""
paths = []
if from_files:
for problem, lang, s in from_files:
paths.append((lang, path.join(problem, lang, s)))
return paths
for column in df.columns:
solutions = df[df[column].notnull()][column]
lang = solutions.name
problems = solutions.index
for problem in problems:
p = ((lang, path.join(problem, lang, s))
for s in solutions[problem])
paths.extend(p)
return paths
def count_solutions(df, solutions=True):
"""
Function: count_solutions
Summary: Count the number of solutions of each problem and language
Examples: Iam tired...
Attributes:
@param (df): pd.DataFrame
Returns: pd.DataFrame
"""
df = df.dropna(axis=1, how='all') # columns all nan
df = df.dropna(how='all') # rows all nan
df_ = pd.DataFrame()
df_ = df.applymap(lambda x: len(x) if x is not np.NAN else 0)
if len(df.columns) > 1 and solutions:
df_["Solutions"] = df_[df_.columns].apply(tuple, axis=1).map(sum)
df_ = df_[df_.Solutions > 0]
return df_
def handle_files(files):
"""
Analyse files to return two lists :
- solutions : list of files as 3-uple of strings that are more likely solutions
on the format: (ProblemXXX, 'Lang', 'solution_x.y')
- build_files : list of files that are more build files (stats.py,
stats.exs, ...)
"""
solutions = []
build_files = []
for f in files:
if f.count("/") == 2:
solutions.append(tuple(f.split("/")))
elif f.count("/") == 1 and f.startswith("./"):
dic = parse_solutions(walk_problems(f))
problem = list(dic.keys())[0]
for lang in dic[problem]:
for solution in dic[problem][lang]:
solutions.append((problem, lang, solution))
elif f.count("/") == 0 and f in BUILD_FILES:
build_files.append(f)
return list(filter(lambda x: is_solution(x[2]), solutions)), build_files
# docs?
def spinner(control):
animation = r"⣾⣽⣻⢿⡿⣟⣯"
sys.stdout.write(3 * " ")
for c in itertools.cycle(animation):
current_time = time.time() - control.time
message = "(" + c + ")" + " t: {:.2f}".format(current_time)
sys.stdout.write(message)
time.sleep(0.1)
sys.stdout.write(len(message) * "\010")
sys.stdout.flush()
if control.done:
break
# need docs
def choose_builder(lang, fpath):
try:
if lang in BUILD_MACHINE:
builder = BUILD_MACHINE[lang]['builder']
cmdline = BUILD_MACHINE[lang]['cmdline']
b = builder(cmdline, fpath)
else:
raise Exception("Builder not configured for {!r}! Call the developer".format(lang)) # noqa
except Exception as e:
print("\n", e)
os._exit(1)
finally:
return b
# need docs
def execute_builder(b):
out, err, t = b.execute()
answer = out.decode("utf-8").strip("\n")
if err:
print(err)
os._exit(1)
sys.stdout.write(ERASE_LINE)
building = "\rBuilt {}: Answer: {}: {:.2f}s\n".format(b.path, answer, t)
sys.stdout.write(building)
sys.stdout.flush()
return answer, t
# need docs
def build_result(df, ignore_errors=False, blame=False, only=()):
class Control: # to handle the spinner time at each solution
time = time.time()
done = False
control = Control()
columns = ["Problem", "Language", "Time", "Answer", "Correct"]
data = []
hashes = get_problem_hashes()
spin_thread = threading.Thread(target=spinner, args=(control,))
spin_thread.start()
_problems = only if only else solutions_paths(df)
for lang, spath in _problems:
if "slow" in spath and not blame:
sys.stdout.write("\rIgnored {}: bad solution (slow).\n".format(spath)) # noqa
continue
if lang in BUILD_SUPPORT:
sys.stdout.write("@Building next {}: {}".format(spath, 12 * ' '))
b = choose_builder(lang, spath)
problem = split_problem_language(spath)[0]
outtimed = False
correct = False
control.time = time.time()
answer, t = execute_builder(b)
outtimed = answer == "TIMEOUT"
if (not outtimed) and problem in hashes:
answer_hash = digest_answer(answer)
correct = answer_hash == hashes[problem]
data.append([problem, lang, t, answer, correct])
elif not ignore_errors:
sys.stdout.write("\r{}: Don't have support yet for {!r}!\n".format(spath, lang)) # noqa
sys.stdout.write("\r\n")
sys.stdout.flush()
control.done = True
spin_thread.join()
final_df = pd.DataFrame(data, columns=columns)
return final_df.sort_values("Problem")
def list_by_count(df):
df_ = count_solutions(df, solutions=False)
count = [sum(df_[lang]) for lang in df_.columns]
table = pd.DataFrame(count, index=df_.columns,
columns=["Solutions"])
return table.sort_values("Solutions", ascending=False)
def blame_solutions(df):
df_ = df.applymap(
lambda solutions:
[x for x in solutions if 'slow' in x] or np.NAN
if solutions is not np.NAN else np.NAN
)
return df_
# Problem015 -> 15
def remove_problem(df):
df_ = df
df_.Problem = df.Problem.map(lambda x: x.replace("Problem", "").strip('0'))
return df_
def build_per_language(df):
index = df.Problem.map(int).max()
languages = set(df.Language)
data = {lang: np.full(index, np.nan) for lang in languages}
for _, row in df.iterrows():
data[row['Language']][int(row['Problem']) - 1] = row['Time']
df_ = pd.DataFrame(data, index=range(1, index + 1)).dropna(how='all')
df_.index.name = 'Problems'
return df_
def header(opts):
return "Command: " + ' '.join([x.capitalize() for x in opts if opts[x]])
def handle_graph(df, options):
import matplotlib.pyplot as plt
import matplotlib
cicle_colors = itertools.cycle(['b', 'r', 'g', 'y', 'k'])
my_colors = itertools.islice(cicle_colors, None, len(df))
matplotlib.style.use('ggplot')
if options.build:
df = build_per_language(remove_problem(df))
df.plot()
elif options.list and options.count:
# Make a list by cycling through the colors you care about
# to match the length of your data.
df.plot(kind='barh', stacked=True, color=list(my_colors))
plt.show()
def handle_options(options):
df = load_dataframe()
langs = {x.lower(): x for x in df.columns}
query = [x.lower() for x in options.search]
uncommited_solutions = []
uncommited_core_files = []
tbsolutions = []
count_ws = 0 # wrong solutions
langs_selected = [langs[x] for x in search_language(query, langs)]
if options.files:
uncommited_solutions, uncommited_core_files = handle_files(options.files)
if not uncommited_solutions and uncommited_core_files:
sys.stdout.write(
"\rForced to exit: No solutions to build\nChanged_core_files : \n {}".format(
uncommited_core_files)
)
sys.exit(0)
tbsolutions = solutions_paths(df, from_files=uncommited_solutions)
if options.all:
langs_selected = [x for x in langs.values()]
if options.blame:
df = blame_solutions(df)
if options.list:
if options.count:
df = list_by_count(df)
elif options.path:
langs_selected = [x for x in langs.values()]
else:
df = '\n'.join(sorted(df.dropna(axis=1, how='all').columns))
else:
df = df[langs_selected]
if options.count and not options.list:
df = count_solutions(df)
elif options.build:
try:
df = build_result(df[langs_selected],
options.all,
options.blame,
only=tbsolutions)
count_ws = list(df["Correct"]).count(False)
correct_ratio = 1 - count_ws/len(df) if count_ws else 1
sys.stdout.write(
"Correct solutions ratio : {0}% \n".format(correct_ratio * 100)
)
except(SystemExit, KeyboardInterrupt):
os._exit(1)
elif options.path:
df = '\n'.join(path for _, path in solutions_paths(df[langs_selected]))
pd.set_option("display.max_rows", len(df))
print(df)
if count_ws:
sys.exit(1)
if options.graph:
handle_graph(df, options)
def main():
options, _ = parser.parse_args()
if not any(options.__dict__.values()):
parser.print_help()
os._exit(0)
print(header(options.__dict__))
handle_options(options)
if __name__ == "__main__":
main()
|
__init__.py
|
#!/usr/bin/env python
"""
fs.tests: testcases for the fs module
"""
from __future__ import with_statement
# Send any output from the logging module to stdout, so it will
# be captured by nose and reported appropriately
import sys
import logging
logging.basicConfig(level=logging.ERROR, stream=sys.stdout)
from fs.base import *
from fs.path import *
from fs.errors import *
from fs.filelike import StringIO
import datetime
import unittest
import os
import os.path
import pickle
import random
import copy
import time
try:
import threading
except ImportError:
import dummy_threading as threading
import six
from six import PY3, b
class FSTestCases(object):
"""Base suite of testcases for filesystem implementations.
Any FS subclass should be capable of passing all of these tests.
To apply the tests to your own FS implementation, simply use FSTestCase
as a mixin for your own unittest.TestCase subclass and have the setUp
method set self.fs to an instance of your FS implementation.
NB. The Filesystem being tested must have a capacity of at least 3MB.
This class is designed as a mixin so that it's not detected by test
loading tools such as nose.
"""
def check(self, p):
"""Check that a file exists within self.fs"""
return self.fs.exists(p)
def test_invalid_chars(self):
"""Check paths validate ok"""
# Will have to be overriden selectively for custom validepath methods
self.assertEqual(self.fs.validatepath(''), None)
self.assertEqual(self.fs.validatepath('.foo'), None)
self.assertEqual(self.fs.validatepath('foo'), None)
self.assertEqual(self.fs.validatepath('foo/bar'), None)
self.assert_(self.fs.isvalidpath('foo/bar'))
def test_meta(self):
"""Checks getmeta / hasmeta are functioning"""
# getmeta / hasmeta are hard to test, since there is no way to validate
# the implementation's response
meta_names = ["read_only",
"network",
"unicode_paths"]
stupid_meta = 'thismetashouldnotexist!"r$$%^&&*()_+'
self.assertRaises(NoMetaError, self.fs.getmeta, stupid_meta)
self.assertFalse(self.fs.hasmeta(stupid_meta))
self.assertEquals(None, self.fs.getmeta(stupid_meta, None))
self.assertEquals(3.14, self.fs.getmeta(stupid_meta, 3.14))
for meta_name in meta_names:
try:
meta = self.fs.getmeta(meta_name)
self.assertTrue(self.fs.hasmeta(meta_name))
except NoMetaError:
self.assertFalse(self.fs.hasmeta(meta_name))
def test_root_dir(self):
self.assertTrue(self.fs.isdir(""))
self.assertTrue(self.fs.isdir("/"))
# These may be false (e.g. empty dict) but mustn't raise errors
self.fs.getinfo("")
self.assertTrue(self.fs.getinfo("/") is not None)
def test_getsyspath(self):
try:
syspath = self.fs.getsyspath("/")
except NoSysPathError:
pass
else:
self.assertTrue(isinstance(syspath, unicode))
syspath = self.fs.getsyspath("/", allow_none=True)
if syspath is not None:
self.assertTrue(isinstance(syspath, unicode))
def test_debug(self):
str(self.fs)
repr(self.fs)
self.assert_(hasattr(self.fs, 'desc'))
def test_open_on_directory(self):
self.fs.makedir("testdir")
try:
f = self.fs.open("testdir")
except ResourceInvalidError:
pass
except Exception:
raise
ecls = sys.exc_info()[0]
assert False, "%s raised instead of ResourceInvalidError" % (ecls,)
else:
f.close()
assert False, "ResourceInvalidError was not raised"
def test_writefile(self):
self.assertRaises(ResourceNotFoundError, self.fs.open, "test1.txt")
f = self.fs.open("test1.txt", "wb")
f.write(b("testing"))
f.close()
self.assertTrue(self.check("test1.txt"))
f = self.fs.open("test1.txt", "rb")
self.assertEquals(f.read(), b("testing"))
f.close()
f = self.fs.open("test1.txt", "wb")
f.write(b("test file overwrite"))
f.close()
self.assertTrue(self.check("test1.txt"))
f = self.fs.open("test1.txt", "rb")
self.assertEquals(f.read(), b("test file overwrite"))
f.close()
def test_createfile(self):
test = b('now with content')
self.fs.createfile("test.txt")
self.assert_(self.fs.exists("test.txt"))
self.assertEqual(self.fs.getcontents("test.txt", "rb"), b(''))
self.fs.setcontents("test.txt", test)
self.fs.createfile("test.txt")
self.assertEqual(self.fs.getcontents("test.txt", "rb"), test)
self.fs.createfile("test.txt", wipe=True)
self.assertEqual(self.fs.getcontents("test.txt", "rb"), b(''))
def test_readline(self):
text = b"Hello\nWorld\n"
self.fs.setcontents('a.txt', text)
with self.fs.open('a.txt', 'rb') as f:
line = f.readline()
self.assertEqual(line, b"Hello\n")
def test_setcontents(self):
# setcontents() should accept both a string...
self.fs.setcontents("hello", b("world"))
self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents("hello", StringIO(b("to you, good sir!")))
self.assertEquals(self.fs.getcontents(
"hello", "rb"), b("to you, good sir!"))
# setcontents() should accept both a string...
self.fs.setcontents("hello", b("world"), chunk_size=2)
self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents("hello", StringIO(
b("to you, good sir!")), chunk_size=2)
self.assertEquals(self.fs.getcontents(
"hello", "rb"), b("to you, good sir!"))
self.fs.setcontents("hello", b(""))
self.assertEquals(self.fs.getcontents("hello", "rb"), b(""))
def test_setcontents_async(self):
# setcontents() should accept both a string...
self.fs.setcontents_async("hello", b("world")).wait()
self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents_async("hello", StringIO(
b("to you, good sir!"))).wait()
self.assertEquals(self.fs.getcontents("hello"), b("to you, good sir!"))
self.fs.setcontents_async("hello", b("world"), chunk_size=2).wait()
self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents_async("hello", StringIO(
b("to you, good sir!")), chunk_size=2).wait()
self.assertEquals(self.fs.getcontents(
"hello", "rb"), b("to you, good sir!"))
def test_isdir_isfile(self):
self.assertFalse(self.fs.exists("dir1"))
self.assertFalse(self.fs.isdir("dir1"))
self.assertFalse(self.fs.isfile("a.txt"))
self.fs.setcontents("a.txt", b(''))
self.assertFalse(self.fs.isdir("dir1"))
self.assertTrue(self.fs.exists("a.txt"))
self.assertTrue(self.fs.isfile("a.txt"))
self.assertFalse(self.fs.exists("a.txt/thatsnotadir"))
self.fs.makedir("dir1")
self.assertTrue(self.fs.isdir("dir1"))
self.assertTrue(self.fs.exists("dir1"))
self.assertTrue(self.fs.exists("a.txt"))
self.fs.remove("a.txt")
self.assertFalse(self.fs.exists("a.txt"))
def test_listdir(self):
def check_unicode(items):
for item in items:
self.assertTrue(isinstance(item, unicode))
self.fs.setcontents(u"a", b(''))
self.fs.setcontents("b", b(''))
self.fs.setcontents("foo", b(''))
self.fs.setcontents("bar", b(''))
# Test listing of the root directory
d1 = self.fs.listdir()
self.assertEqual(len(d1), 4)
self.assertEqual(sorted(d1), [u"a", u"b", u"bar", u"foo"])
check_unicode(d1)
d1 = self.fs.listdir("")
self.assertEqual(len(d1), 4)
self.assertEqual(sorted(d1), [u"a", u"b", u"bar", u"foo"])
check_unicode(d1)
d1 = self.fs.listdir("/")
self.assertEqual(len(d1), 4)
check_unicode(d1)
# Test listing absolute paths
d2 = self.fs.listdir(absolute=True)
self.assertEqual(len(d2), 4)
self.assertEqual(sorted(d2), [u"/a", u"/b", u"/bar", u"/foo"])
check_unicode(d2)
# Create some deeper subdirectories, to make sure their
# contents are not inadvertantly included
self.fs.makedir("p/1/2/3", recursive=True)
self.fs.setcontents("p/1/2/3/a", b(''))
self.fs.setcontents("p/1/2/3/b", b(''))
self.fs.setcontents("p/1/2/3/foo", b(''))
self.fs.setcontents("p/1/2/3/bar", b(''))
self.fs.makedir("q")
# Test listing just files, just dirs, and wildcards
dirs_only = self.fs.listdir(dirs_only=True)
files_only = self.fs.listdir(files_only=True)
contains_a = self.fs.listdir(wildcard="*a*")
self.assertEqual(sorted(dirs_only), [u"p", u"q"])
self.assertEqual(sorted(files_only), [u"a", u"b", u"bar", u"foo"])
self.assertEqual(sorted(contains_a), [u"a", u"bar"])
check_unicode(dirs_only)
check_unicode(files_only)
check_unicode(contains_a)
# Test listing a subdirectory
d3 = self.fs.listdir("p/1/2/3")
self.assertEqual(len(d3), 4)
self.assertEqual(sorted(d3), [u"a", u"b", u"bar", u"foo"])
check_unicode(d3)
# Test listing a subdirectory with absoliute and full paths
d4 = self.fs.listdir("p/1/2/3", absolute=True)
self.assertEqual(len(d4), 4)
self.assertEqual(sorted(d4), [u"/p/1/2/3/a", u"/p/1/2/3/b", u"/p/1/2/3/bar", u"/p/1/2/3/foo"])
check_unicode(d4)
d4 = self.fs.listdir("p/1/2/3", full=True)
self.assertEqual(len(d4), 4)
self.assertEqual(sorted(d4), [u"p/1/2/3/a", u"p/1/2/3/b", u"p/1/2/3/bar", u"p/1/2/3/foo"])
check_unicode(d4)
# Test that appropriate errors are raised
self.assertRaises(ResourceNotFoundError, self.fs.listdir, "zebra")
self.assertRaises(ResourceInvalidError, self.fs.listdir, "foo")
def test_listdirinfo(self):
def check_unicode(items):
for (nm, info) in items:
self.assertTrue(isinstance(nm, unicode))
def check_equal(items, target):
names = [nm for (nm, info) in items]
self.assertEqual(sorted(names), sorted(target))
self.fs.setcontents(u"a", b(''))
self.fs.setcontents("b", b(''))
self.fs.setcontents("foo", b(''))
self.fs.setcontents("bar", b(''))
# Test listing of the root directory
d1 = self.fs.listdirinfo()
self.assertEqual(len(d1), 4)
check_equal(d1, [u"a", u"b", u"bar", u"foo"])
check_unicode(d1)
d1 = self.fs.listdirinfo("")
self.assertEqual(len(d1), 4)
check_equal(d1, [u"a", u"b", u"bar", u"foo"])
check_unicode(d1)
d1 = self.fs.listdirinfo("/")
self.assertEqual(len(d1), 4)
check_equal(d1, [u"a", u"b", u"bar", u"foo"])
check_unicode(d1)
# Test listing absolute paths
d2 = self.fs.listdirinfo(absolute=True)
self.assertEqual(len(d2), 4)
check_equal(d2, [u"/a", u"/b", u"/bar", u"/foo"])
check_unicode(d2)
# Create some deeper subdirectories, to make sure their
# contents are not inadvertantly included
self.fs.makedir("p/1/2/3", recursive=True)
self.fs.setcontents("p/1/2/3/a", b(''))
self.fs.setcontents("p/1/2/3/b", b(''))
self.fs.setcontents("p/1/2/3/foo", b(''))
self.fs.setcontents("p/1/2/3/bar", b(''))
self.fs.makedir("q")
# Test listing just files, just dirs, and wildcards
dirs_only = self.fs.listdirinfo(dirs_only=True)
files_only = self.fs.listdirinfo(files_only=True)
contains_a = self.fs.listdirinfo(wildcard="*a*")
check_equal(dirs_only, [u"p", u"q"])
check_equal(files_only, [u"a", u"b", u"bar", u"foo"])
check_equal(contains_a, [u"a", u"bar"])
check_unicode(dirs_only)
check_unicode(files_only)
check_unicode(contains_a)
# Test listing a subdirectory
d3 = self.fs.listdirinfo("p/1/2/3")
self.assertEqual(len(d3), 4)
check_equal(d3, [u"a", u"b", u"bar", u"foo"])
check_unicode(d3)
# Test listing a subdirectory with absoliute and full paths
d4 = self.fs.listdirinfo("p/1/2/3", absolute=True)
self.assertEqual(len(d4), 4)
check_equal(d4, [u"/p/1/2/3/a", u"/p/1/2/3/b", u"/p/1/2/3/bar", u"/p/1/2/3/foo"])
check_unicode(d4)
d4 = self.fs.listdirinfo("p/1/2/3", full=True)
self.assertEqual(len(d4), 4)
check_equal(d4, [u"p/1/2/3/a", u"p/1/2/3/b", u"p/1/2/3/bar", u"p/1/2/3/foo"])
check_unicode(d4)
# Test that appropriate errors are raised
self.assertRaises(ResourceNotFoundError, self.fs.listdirinfo, "zebra")
self.assertRaises(ResourceInvalidError, self.fs.listdirinfo, "foo")
def test_walk(self):
self.fs.setcontents('a.txt', b('hello'))
self.fs.setcontents('b.txt', b('world'))
self.fs.makeopendir('foo').setcontents('c', b('123'))
sorted_walk = sorted([(d, sorted(fs)) for (d, fs) in self.fs.walk()])
self.assertEquals(sorted_walk,
[("/", ["a.txt", "b.txt"]),
("/foo", ["c"])])
# When searching breadth-first, shallow entries come first
found_a = False
for _, files in self.fs.walk(search="breadth"):
if "a.txt" in files:
found_a = True
if "c" in files:
break
assert found_a, "breadth search order was wrong"
# When searching depth-first, deep entries come first
found_c = False
for _, files in self.fs.walk(search="depth"):
if "c" in files:
found_c = True
if "a.txt" in files:
break
assert found_c, "depth search order was wrong: " + \
str(list(self.fs.walk(search="depth")))
def test_walk_wildcard(self):
self.fs.setcontents('a.txt', b('hello'))
self.fs.setcontents('b.txt', b('world'))
self.fs.makeopendir('foo').setcontents('c', b('123'))
self.fs.makeopendir('.svn').setcontents('ignored', b(''))
for dir_path, paths in self.fs.walk(wildcard='*.txt'):
for path in paths:
self.assert_(path.endswith('.txt'))
for dir_path, paths in self.fs.walk(wildcard=lambda fn: fn.endswith('.txt')):
for path in paths:
self.assert_(path.endswith('.txt'))
def test_walk_dir_wildcard(self):
self.fs.setcontents('a.txt', b('hello'))
self.fs.setcontents('b.txt', b('world'))
self.fs.makeopendir('foo').setcontents('c', b('123'))
self.fs.makeopendir('.svn').setcontents('ignored', b(''))
for dir_path, paths in self.fs.walk(dir_wildcard=lambda fn: not fn.endswith('.svn')):
for path in paths:
self.assert_('.svn' not in path)
def test_walkfiles(self):
self.fs.makeopendir('bar').setcontents('a.txt', b('123'))
self.fs.makeopendir('foo').setcontents('b', b('123'))
self.assertEquals(sorted(
self.fs.walkfiles()), ["/bar/a.txt", "/foo/b"])
self.assertEquals(sorted(self.fs.walkfiles(
dir_wildcard="*foo*")), ["/foo/b"])
self.assertEquals(sorted(self.fs.walkfiles(
wildcard="*.txt")), ["/bar/a.txt"])
def test_walkdirs(self):
self.fs.makeopendir('bar').setcontents('a.txt', b('123'))
self.fs.makeopendir('foo').makeopendir(
"baz").setcontents('b', b('123'))
self.assertEquals(sorted(self.fs.walkdirs()), [
"/", "/bar", "/foo", "/foo/baz"])
self.assertEquals(sorted(self.fs.walkdirs(
wildcard="*foo*")), ["/", "/foo", "/foo/baz"])
def test_unicode(self):
alpha = u"\N{GREEK SMALL LETTER ALPHA}"
beta = u"\N{GREEK SMALL LETTER BETA}"
self.fs.makedir(alpha)
self.fs.setcontents(alpha + "/a", b(''))
self.fs.setcontents(alpha + "/" + beta, b(''))
self.assertTrue(self.check(alpha))
self.assertEquals(sorted(self.fs.listdir(alpha)), ["a", beta])
def test_makedir(self):
check = self.check
self.fs.makedir("a")
self.assertTrue(check("a"))
self.assertRaises(
ParentDirectoryMissingError, self.fs.makedir, "a/b/c")
self.fs.makedir("a/b/c", recursive=True)
self.assert_(check("a/b/c"))
self.fs.makedir("foo/bar/baz", recursive=True)
self.assert_(check("foo/bar/baz"))
self.fs.makedir("a/b/child")
self.assert_(check("a/b/child"))
self.assertRaises(DestinationExistsError, self.fs.makedir, "/a/b")
self.fs.makedir("/a/b", allow_recreate=True)
self.fs.setcontents("/a/file", b(''))
self.assertRaises(ResourceInvalidError, self.fs.makedir, "a/file")
def test_remove(self):
self.fs.setcontents("a.txt", b(''))
self.assertTrue(self.check("a.txt"))
self.fs.remove("a.txt")
self.assertFalse(self.check("a.txt"))
self.assertRaises(ResourceNotFoundError, self.fs.remove, "a.txt")
self.fs.makedir("dir1")
self.assertRaises(ResourceInvalidError, self.fs.remove, "dir1")
self.fs.setcontents("/dir1/a.txt", b(''))
self.assertTrue(self.check("dir1/a.txt"))
self.fs.remove("dir1/a.txt")
self.assertFalse(self.check("/dir1/a.txt"))
def test_removedir(self):
check = self.check
self.fs.makedir("a")
self.assert_(check("a"))
self.fs.removedir("a")
self.assertRaises(ResourceNotFoundError, self.fs.removedir, "a")
self.assert_(not check("a"))
self.fs.makedir("a/b/c/d", recursive=True)
self.assertRaises(DirectoryNotEmptyError, self.fs.removedir, "a/b")
self.fs.removedir("a/b/c/d")
self.assert_(not check("a/b/c/d"))
self.fs.removedir("a/b/c")
self.assert_(not check("a/b/c"))
self.fs.removedir("a/b")
self.assert_(not check("a/b"))
# Test recursive removal of empty parent dirs
self.fs.makedir("foo/bar/baz", recursive=True)
self.fs.removedir("foo/bar/baz", recursive=True)
self.assert_(not check("foo/bar/baz"))
self.assert_(not check("foo/bar"))
self.assert_(not check("foo"))
self.fs.makedir("foo/bar/baz", recursive=True)
self.fs.setcontents("foo/file.txt", b("please don't delete me"))
self.fs.removedir("foo/bar/baz", recursive=True)
self.assert_(not check("foo/bar/baz"))
self.assert_(not check("foo/bar"))
self.assert_(check("foo/file.txt"))
# Ensure that force=True works as expected
self.fs.makedir("frollic/waggle", recursive=True)
self.fs.setcontents("frollic/waddle.txt", b("waddlewaddlewaddle"))
self.assertRaises(DirectoryNotEmptyError, self.fs.removedir, "frollic")
self.assertRaises(
ResourceInvalidError, self.fs.removedir, "frollic/waddle.txt")
self.fs.removedir("frollic", force=True)
self.assert_(not check("frollic"))
# Test removing unicode dirs
kappa = u"\N{GREEK CAPITAL LETTER KAPPA}"
self.fs.makedir(kappa)
self.assert_(self.fs.isdir(kappa))
self.fs.removedir(kappa)
self.assertRaises(ResourceNotFoundError, self.fs.removedir, kappa)
self.assert_(not self.fs.isdir(kappa))
self.fs.makedir(pathjoin("test", kappa), recursive=True)
self.assert_(check(pathjoin("test", kappa)))
self.fs.removedir("test", force=True)
self.assert_(not check("test"))
def test_rename(self):
check = self.check
# test renaming a file in the same directory
self.fs.setcontents("foo.txt", b("Hello, World!"))
self.assert_(check("foo.txt"))
self.fs.rename("foo.txt", "bar.txt")
self.assert_(check("bar.txt"))
self.assert_(not check("foo.txt"))
# test renaming a directory in the same directory
self.fs.makedir("dir_a")
self.fs.setcontents("dir_a/test.txt", b("testerific"))
self.assert_(check("dir_a"))
self.fs.rename("dir_a", "dir_b")
self.assert_(check("dir_b"))
self.assert_(check("dir_b/test.txt"))
self.assert_(not check("dir_a/test.txt"))
self.assert_(not check("dir_a"))
# test renaming a file into a different directory
self.fs.makedir("dir_a")
self.fs.rename("dir_b/test.txt", "dir_a/test.txt")
self.assert_(not check("dir_b/test.txt"))
self.assert_(check("dir_a/test.txt"))
# test renaming a file into a non-existent directory
self.assertRaises(ParentDirectoryMissingError,
self.fs.rename, "dir_a/test.txt", "nonexistent/test.txt")
def test_info(self):
test_str = b("Hello, World!")
self.fs.setcontents("info.txt", test_str)
info = self.fs.getinfo("info.txt")
self.assertEqual(info['size'], len(test_str))
self.fs.desc("info.txt")
self.assertRaises(ResourceNotFoundError, self.fs.getinfo, "notafile")
self.assertRaises(
ResourceNotFoundError, self.fs.getinfo, "info.txt/inval")
def test_infokeys(self):
test_str = b("Hello, World!")
self.fs.setcontents("info.txt", test_str)
info = self.fs.getinfo("info.txt")
for k, v in info.iteritems():
self.assertEqual(self.fs.getinfokeys('info.txt', k), {k: v})
test_info = {}
if 'modified_time' in info:
test_info['modified_time'] = info['modified_time']
if 'size' in info:
test_info['size'] = info['size']
self.assertEqual(self.fs.getinfokeys('info.txt', 'size', 'modified_time'), test_info)
self.assertEqual(self.fs.getinfokeys('info.txt', 'thiscantpossiblyexistininfo'), {})
def test_getsize(self):
test_str = b("*") * 23
self.fs.setcontents("info.txt", test_str)
size = self.fs.getsize("info.txt")
self.assertEqual(size, len(test_str))
def test_movefile(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path):
self.fs.setcontents(path, contents)
def checkcontents(path):
check_contents = self.fs.getcontents(path, "rb")
self.assertEqual(check_contents, contents)
return contents == check_contents
self.fs.makedir("foo/bar", recursive=True)
makefile("foo/bar/a.txt")
self.assert_(check("foo/bar/a.txt"))
self.assert_(checkcontents("foo/bar/a.txt"))
self.fs.move("foo/bar/a.txt", "foo/b.txt")
self.assert_(not check("foo/bar/a.txt"))
self.assert_(check("foo/b.txt"))
self.assert_(checkcontents("foo/b.txt"))
self.fs.move("foo/b.txt", "c.txt")
self.assert_(not check("foo/b.txt"))
self.assert_(check("/c.txt"))
self.assert_(checkcontents("/c.txt"))
makefile("foo/bar/a.txt")
self.assertRaises(
DestinationExistsError, self.fs.move, "foo/bar/a.txt", "/c.txt")
self.assert_(check("foo/bar/a.txt"))
self.assert_(check("/c.txt"))
self.fs.move("foo/bar/a.txt", "/c.txt", overwrite=True)
self.assert_(not check("foo/bar/a.txt"))
self.assert_(check("/c.txt"))
def test_movedir(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path):
self.fs.setcontents(path, contents)
self.assertRaises(ResourceNotFoundError, self.fs.movedir, "a", "b")
self.fs.makedir("a")
self.fs.makedir("b")
makefile("a/1.txt")
makefile("a/2.txt")
makefile("a/3.txt")
self.fs.makedir("a/foo/bar", recursive=True)
makefile("a/foo/bar/baz.txt")
self.fs.movedir("a", "copy of a")
self.assert_(self.fs.isdir("copy of a"))
self.assert_(check("copy of a/1.txt"))
self.assert_(check("copy of a/2.txt"))
self.assert_(check("copy of a/3.txt"))
self.assert_(check("copy of a/foo/bar/baz.txt"))
self.assert_(not check("a/1.txt"))
self.assert_(not check("a/2.txt"))
self.assert_(not check("a/3.txt"))
self.assert_(not check("a/foo/bar/baz.txt"))
self.assert_(not check("a/foo/bar"))
self.assert_(not check("a/foo"))
self.assert_(not check("a"))
self.fs.makedir("a")
self.assertRaises(
DestinationExistsError, self.fs.movedir, "copy of a", "a")
self.fs.movedir("copy of a", "a", overwrite=True)
self.assert_(not check("copy of a"))
self.assert_(check("a/1.txt"))
self.assert_(check("a/2.txt"))
self.assert_(check("a/3.txt"))
self.assert_(check("a/foo/bar/baz.txt"))
def test_cant_copy_from_os(self):
sys_executable = os.path.abspath(os.path.realpath(sys.executable))
self.assertRaises(FSError, self.fs.copy, sys_executable, "py.exe")
def test_copyfile(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path, contents=contents):
self.fs.setcontents(path, contents)
def checkcontents(path, contents=contents):
check_contents = self.fs.getcontents(path, "rb")
self.assertEqual(check_contents, contents)
return contents == check_contents
self.fs.makedir("foo/bar", recursive=True)
makefile("foo/bar/a.txt")
self.assert_(check("foo/bar/a.txt"))
self.assert_(checkcontents("foo/bar/a.txt"))
# import rpdb2; rpdb2.start_embedded_debugger('password');
self.fs.copy("foo/bar/a.txt", "foo/b.txt")
self.assert_(check("foo/bar/a.txt"))
self.assert_(check("foo/b.txt"))
self.assert_(checkcontents("foo/bar/a.txt"))
self.assert_(checkcontents("foo/b.txt"))
self.fs.copy("foo/b.txt", "c.txt")
self.assert_(check("foo/b.txt"))
self.assert_(check("/c.txt"))
self.assert_(checkcontents("/c.txt"))
makefile("foo/bar/a.txt", b("different contents"))
self.assert_(checkcontents("foo/bar/a.txt", b("different contents")))
self.assertRaises(
DestinationExistsError, self.fs.copy, "foo/bar/a.txt", "/c.txt")
self.assert_(checkcontents("/c.txt"))
self.fs.copy("foo/bar/a.txt", "/c.txt", overwrite=True)
self.assert_(checkcontents("foo/bar/a.txt", b("different contents")))
self.assert_(checkcontents("/c.txt", b("different contents")))
def test_copydir(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path):
self.fs.setcontents(path, contents)
def checkcontents(path):
check_contents = self.fs.getcontents(path)
self.assertEqual(check_contents, contents)
return contents == check_contents
self.fs.makedir("a")
self.fs.makedir("b")
makefile("a/1.txt")
makefile("a/2.txt")
makefile("a/3.txt")
self.fs.makedir("a/foo/bar", recursive=True)
makefile("a/foo/bar/baz.txt")
self.fs.copydir("a", "copy of a")
self.assert_(check("copy of a/1.txt"))
self.assert_(check("copy of a/2.txt"))
self.assert_(check("copy of a/3.txt"))
self.assert_(check("copy of a/foo/bar/baz.txt"))
checkcontents("copy of a/1.txt")
self.assert_(check("a/1.txt"))
self.assert_(check("a/2.txt"))
self.assert_(check("a/3.txt"))
self.assert_(check("a/foo/bar/baz.txt"))
checkcontents("a/1.txt")
self.assertRaises(DestinationExistsError, self.fs.copydir, "a", "b")
self.fs.copydir("a", "b", overwrite=True)
self.assert_(check("b/1.txt"))
self.assert_(check("b/2.txt"))
self.assert_(check("b/3.txt"))
self.assert_(check("b/foo/bar/baz.txt"))
checkcontents("b/1.txt")
def test_copydir_with_dotfile(self):
check = self.check
contents = b(
"If the implementation is hard to explain, it's a bad idea.")
def makefile(path):
self.fs.setcontents(path, contents)
self.fs.makedir("a")
makefile("a/1.txt")
makefile("a/2.txt")
makefile("a/.hidden.txt")
self.fs.copydir("a", "copy of a")
self.assert_(check("copy of a/1.txt"))
self.assert_(check("copy of a/2.txt"))
self.assert_(check("copy of a/.hidden.txt"))
self.assert_(check("a/1.txt"))
self.assert_(check("a/2.txt"))
self.assert_(check("a/.hidden.txt"))
def test_readwriteappendseek(self):
def checkcontents(path, check_contents):
read_contents = self.fs.getcontents(path, "rb")
self.assertEqual(read_contents, check_contents)
return read_contents == check_contents
test_strings = [b("Beautiful is better than ugly."),
b("Explicit is better than implicit."),
b("Simple is better than complex.")]
all_strings = b("").join(test_strings)
self.assertRaises(ResourceNotFoundError, self.fs.open, "a.txt", "r")
self.assert_(not self.fs.exists("a.txt"))
f1 = self.fs.open("a.txt", "wb")
pos = 0
for s in test_strings:
f1.write(s)
pos += len(s)
self.assertEqual(pos, f1.tell())
f1.close()
self.assert_(self.fs.exists("a.txt"))
self.assert_(checkcontents("a.txt", all_strings))
f2 = self.fs.open("b.txt", "wb")
f2.write(test_strings[0])
f2.close()
self.assert_(checkcontents("b.txt", test_strings[0]))
f3 = self.fs.open("b.txt", "ab")
# On win32, tell() gives zero until you actually write to the file
# self.assertEquals(f3.tell(),len(test_strings[0]))
f3.write(test_strings[1])
self.assertEquals(f3.tell(), len(test_strings[0])+len(test_strings[1]))
f3.write(test_strings[2])
self.assertEquals(f3.tell(), len(all_strings))
f3.close()
self.assert_(checkcontents("b.txt", all_strings))
f4 = self.fs.open("b.txt", "wb")
f4.write(test_strings[2])
f4.close()
self.assert_(checkcontents("b.txt", test_strings[2]))
f5 = self.fs.open("c.txt", "wb")
for s in test_strings:
f5.write(s+b("\n"))
f5.close()
f6 = self.fs.open("c.txt", "rb")
for s, t in zip(f6, test_strings):
self.assertEqual(s, t+b("\n"))
f6.close()
f7 = self.fs.open("c.txt", "rb")
f7.seek(13)
word = f7.read(6)
self.assertEqual(word, b("better"))
f7.seek(1, os.SEEK_CUR)
word = f7.read(4)
self.assertEqual(word, b("than"))
f7.seek(-9, os.SEEK_END)
word = f7.read(7)
self.assertEqual(word, b("complex"))
f7.close()
self.assertEqual(self.fs.getcontents("a.txt", "rb"), all_strings)
def test_truncate(self):
def checkcontents(path, check_contents):
read_contents = self.fs.getcontents(path, "rb")
self.assertEqual(read_contents, check_contents)
return read_contents == check_contents
self.fs.setcontents("hello", b("world"))
checkcontents("hello", b("world"))
self.fs.setcontents("hello", b("hi"))
checkcontents("hello", b("hi"))
self.fs.setcontents("hello", b("1234567890"))
checkcontents("hello", b("1234567890"))
with self.fs.open("hello", "rb+") as f:
f.truncate(7)
checkcontents("hello", b("1234567"))
with self.fs.open("hello", "rb+") as f:
f.seek(5)
f.truncate()
checkcontents("hello", b("12345"))
def test_truncate_to_larger_size(self):
with self.fs.open("hello", "wb") as f:
f.truncate(30)
self.assertEquals(self.fs.getsize("hello"), 30)
# Some file systems (FTPFS) don't support both reading and writing
if self.fs.getmeta('file.read_and_write', True):
with self.fs.open("hello", "rb+") as f:
f.seek(25)
f.write(b("123456"))
with self.fs.open("hello", "rb") as f:
f.seek(25)
self.assertEquals(f.read(), b("123456"))
def test_write_past_end_of_file(self):
if self.fs.getmeta('file.read_and_write', True):
with self.fs.open("write_at_end", "wb") as f:
f.seek(25)
f.write(b("EOF"))
with self.fs.open("write_at_end", "rb") as f:
self.assertEquals(f.read(), b("\x00")*25 + b("EOF"))
def test_with_statement(self):
# This is a little tricky since 'with' is actually new syntax.
# We use eval() to make this method safe for old python versions.
import sys
if sys.version_info[0] >= 2 and sys.version_info[1] >= 5:
# A successful 'with' statement
contents = "testing the with statement"
code = "from __future__ import with_statement\n"
code += "with self.fs.open('f.txt','wb-') as testfile:\n"
code += " testfile.write(contents)\n"
code += "self.assertEquals(self.fs.getcontents('f.txt', 'rb'),contents)"
code = compile(code, "<string>", 'exec')
eval(code)
# A 'with' statement raising an error
contents = "testing the with statement"
code = "from __future__ import with_statement\n"
code += "with self.fs.open('f.txt','wb-') as testfile:\n"
code += " testfile.write(contents)\n"
code += " raise ValueError\n"
code = compile(code, "<string>", 'exec')
self.assertRaises(ValueError, eval, code, globals(), locals())
self.assertEquals(self.fs.getcontents('f.txt', 'rb'), contents)
def test_pickling(self):
if self.fs.getmeta('pickle_contents', True):
self.fs.setcontents("test1", b("hello world"))
fs2 = pickle.loads(pickle.dumps(self.fs))
self.assert_(fs2.isfile("test1"))
fs3 = pickle.loads(pickle.dumps(self.fs, -1))
self.assert_(fs3.isfile("test1"))
else:
# Just make sure it doesn't throw an exception
fs2 = pickle.loads(pickle.dumps(self.fs))
def test_big_file(self):
"""Test handling of a big file (1MB)"""
chunk_size = 1024 * 256
num_chunks = 4
def chunk_stream():
"""Generate predictable-but-randomy binary content."""
r = random.Random(0)
randint = r.randint
int2byte = six.int2byte
for _i in xrange(num_chunks):
c = b("").join(int2byte(randint(
0, 255)) for _j in xrange(chunk_size//8))
yield c * 8
f = self.fs.open("bigfile", "wb")
try:
for chunk in chunk_stream():
f.write(chunk)
finally:
f.close()
chunks = chunk_stream()
f = self.fs.open("bigfile", "rb")
try:
try:
while True:
if chunks.next() != f.read(chunk_size):
assert False, "bigfile was corrupted"
except StopIteration:
if f.read() != b(""):
assert False, "bigfile was corrupted"
finally:
f.close()
def test_settimes(self):
def cmp_datetimes(d1, d2):
"""Test datetime objects are the same to within the timestamp accuracy"""
dts1 = time.mktime(d1.timetuple())
dts2 = time.mktime(d2.timetuple())
return int(dts1) == int(dts2)
d1 = datetime.datetime(2010, 6, 20, 11, 0, 9, 987699)
d2 = datetime.datetime(2010, 7, 5, 11, 0, 9, 500000)
self.fs.setcontents('/dates.txt', b('check dates'))
# If the implementation supports settimes, check that the times
# can be set and then retrieved
try:
self.fs.settimes('/dates.txt', d1, d2)
except UnsupportedError:
pass
else:
info = self.fs.getinfo('/dates.txt')
self.assertTrue(cmp_datetimes(d1, info['accessed_time']))
self.assertTrue(cmp_datetimes(d2, info['modified_time']))
def test_removeroot(self):
self.assertRaises(RemoveRootError, self.fs.removedir, "/")
def test_zero_read(self):
"""Test read(0) returns empty string"""
self.fs.setcontents('foo.txt', b('Hello, World'))
with self.fs.open('foo.txt', 'rb') as f:
self.assert_(len(f.read(0)) == 0)
with self.fs.open('foo.txt', 'rt') as f:
self.assert_(len(f.read(0)) == 0)
# May be disabled - see end of file
class ThreadingTestCases(object):
"""Testcases for thread-safety of FS implementations."""
# These are either too slow to be worth repeating,
# or cannot possibly break cross-thread.
_dont_retest = ("test_pickling", "test_multiple_overwrite",)
__lock = threading.RLock()
def _yield(self):
# time.sleep(0.001)
# Yields without a delay
time.sleep(0)
def _lock(self):
self.__lock.acquire()
def _unlock(self):
self.__lock.release()
def _makeThread(self, func, errors):
def runThread():
try:
func()
except Exception:
errors.append(sys.exc_info())
thread = threading.Thread(target=runThread)
thread.daemon = True
return thread
def _runThreads(self, *funcs):
check_interval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
errors = []
threads = [self._makeThread(f, errors) for f in funcs]
for t in threads:
t.start()
for t in threads:
t.join()
for (c, e, t) in errors:
raise e, None, t
finally:
sys.setcheckinterval(check_interval)
def test_setcontents_threaded(self):
def setcontents(name, contents):
f = self.fs.open(name, "wb")
self._yield()
try:
f.write(contents)
self._yield()
finally:
f.close()
def thread1():
c = b("thread1 was 'ere")
setcontents("thread1.txt", c)
self.assertEquals(self.fs.getcontents("thread1.txt", 'rb'), c)
def thread2():
c = b("thread2 was 'ere")
setcontents("thread2.txt", c)
self.assertEquals(self.fs.getcontents("thread2.txt", 'rb'), c)
self._runThreads(thread1, thread2)
def test_setcontents_threaded_samefile(self):
def setcontents(name, contents):
f = self.fs.open(name, "wb")
self._yield()
try:
f.write(contents)
self._yield()
finally:
f.close()
def thread1():
c = b("thread1 was 'ere")
setcontents("threads.txt", c)
self._yield()
self.assertEquals(self.fs.listdir("/"), ["threads.txt"])
def thread2():
c = b("thread2 was 'ere")
setcontents("threads.txt", c)
self._yield()
self.assertEquals(self.fs.listdir("/"), ["threads.txt"])
def thread3():
c = b("thread3 was 'ere")
setcontents("threads.txt", c)
self._yield()
self.assertEquals(self.fs.listdir("/"), ["threads.txt"])
try:
self._runThreads(thread1, thread2, thread3)
except ResourceLockedError:
# that's ok, some implementations don't support concurrent writes
pass
def test_cases_in_separate_dirs(self):
class TestCases_in_subdir(self.__class__, unittest.TestCase):
"""Run all testcases against a subdir of self.fs"""
def __init__(this, subdir):
super(TestCases_in_subdir, this).__init__("test_listdir")
this.subdir = subdir
for meth in dir(this):
if not meth.startswith("test_"):
continue
if meth in self._dont_retest:
continue
if not hasattr(FSTestCases, meth):
continue
if self.fs.exists(subdir):
self.fs.removedir(subdir, force=True)
self.assertFalse(self.fs.isdir(subdir))
self.assertTrue(self.fs.isdir("/"))
self.fs.makedir(subdir)
self._yield()
getattr(this, meth)()
@property
def fs(this):
return self.fs.opendir(this.subdir)
def check(this, p):
return self.check(pathjoin(this.subdir, relpath(p)))
def thread1():
TestCases_in_subdir("thread1")
def thread2():
TestCases_in_subdir("thread2")
def thread3():
TestCases_in_subdir("thread3")
self._runThreads(thread1, thread2, thread3)
def test_makedir_winner(self):
errors = []
def makedir():
try:
self.fs.makedir("testdir")
except DestinationExistsError, e:
errors.append(e)
def makedir_noerror():
try:
self.fs.makedir("testdir", allow_recreate=True)
except DestinationExistsError, e:
errors.append(e)
def removedir():
try:
self.fs.removedir("testdir")
except (ResourceNotFoundError, ResourceLockedError), e:
errors.append(e)
# One thread should succeed, one should error
self._runThreads(makedir, makedir)
self.assertEquals(len(errors), 1)
self.fs.removedir("testdir")
# One thread should succeed, two should error
errors = []
self._runThreads(makedir, makedir, makedir)
if len(errors) != 2:
raise AssertionError(errors)
self.fs.removedir("testdir")
# All threads should succeed
errors = []
self._runThreads(makedir_noerror, makedir_noerror, makedir_noerror)
self.assertEquals(len(errors), 0)
self.assertTrue(self.fs.isdir("testdir"))
self.fs.removedir("testdir")
# makedir() can beat removedir() and vice-versa
errors = []
self._runThreads(makedir, removedir)
if self.fs.isdir("testdir"):
self.assertEquals(len(errors), 1)
self.assertFalse(isinstance(errors[0], DestinationExistsError))
self.fs.removedir("testdir")
else:
self.assertEquals(len(errors), 0)
def test_concurrent_copydir(self):
self.fs.makedir("a")
self.fs.makedir("a/b")
self.fs.setcontents("a/hello.txt", b("hello world"))
self.fs.setcontents("a/guido.txt", b("is a space alien"))
self.fs.setcontents("a/b/parrot.txt", b("pining for the fiords"))
def copydir():
self._yield()
self.fs.copydir("a", "copy of a")
def copydir_overwrite():
self._yield()
self.fs.copydir("a", "copy of a", overwrite=True)
# This should error out since we're not overwriting
self.assertRaises(
DestinationExistsError, self._runThreads, copydir, copydir)
self.assert_(self.fs.isdir('a'))
self.assert_(self.fs.isdir('a'))
copydir_overwrite()
self.assert_(self.fs.isdir('a'))
# This should run to completion and give a valid state, unless
# files get locked when written to.
try:
self._runThreads(copydir_overwrite, copydir_overwrite)
except ResourceLockedError:
pass
self.assertTrue(self.fs.isdir("copy of a"))
self.assertTrue(self.fs.isdir("copy of a/b"))
self.assertEqual(self.fs.getcontents(
"copy of a/b/parrot.txt", 'rb'), b("pining for the fiords"))
self.assertEqual(self.fs.getcontents(
"copy of a/hello.txt", 'rb'), b("hello world"))
self.assertEqual(self.fs.getcontents(
"copy of a/guido.txt", 'rb'), b("is a space alien"))
def test_multiple_overwrite(self):
contents = [b("contents one"), b(
"contents the second"), b("number three")]
def thread1():
for i in xrange(30):
for c in contents:
self.fs.setcontents("thread1.txt", c)
self.assertEquals(self.fs.getsize("thread1.txt"), len(c))
self.assertEquals(self.fs.getcontents(
"thread1.txt", 'rb'), c)
def thread2():
for i in xrange(30):
for c in contents:
self.fs.setcontents("thread2.txt", c)
self.assertEquals(self.fs.getsize("thread2.txt"), len(c))
self.assertEquals(self.fs.getcontents(
"thread2.txt", 'rb'), c)
self._runThreads(thread1, thread2)
# Uncomment to temporarily disable threading tests
# class ThreadingTestCases(object):
# _dont_retest = ()
|
api.py
|
import json
import time
import random
import threading
import signal
import os
from vk_api.http_util import HttpUtil
from urllib.parse import urlparse, parse_qs
from vk_api.logger import Logger
from vk_api.tokenizer import Tokenizer
class VkApi:
def __init__(self, options):
"""
Создание объекта API
:param options:
"""
self.commands = []
self.options = options
self.authorize_url = "https://oauth.vk.com/authorize"
self.redirect_uri = "http://oauth.vk.com/blank.html"
self.action = "https://login.vk.com/?act=login&soft=1&utf8=1"
self.method_action = "https://api.vk.com/method/"
self.auth_config = {
"client_id": self.options['client_id'],
"redirect_uri": self.redirect_uri,
"display": "mobile",
"response_type": "token",
"scope": self.options['scope'],
"v": self.options['api_v'],
"revoke": 1
}
self.token = None
self.logger = Logger.get_logger('vk_api')
self.symbol_command = '@'
self.symbol_answer = '#'
signal.signal(signal.SIGINT, self.handler_signal)
def get_token(self, api):
"""
Проверка на существование уже полученного токена
Или его повторное получение, в случае его отсутствия
:return:
"""
if 'token' not in self.options:
token = Tokenizer('.token', api)
token.token_init()
else:
self.logger.info('Авторизация черезе токен группы')
self.set_token(self.options['token'])
def set_token(self, token):
"""
Установка токена
:param token:
:return:
"""
if token:
self.token = token
else:
self.logger.error('Проверьте настройки конфигурации!')
exit()
def login(self):
"""
Авторизация и получение токена API VK
"""
self.logger.log('Авторизация')
http = HttpUtil()
auth_page = http.get_parse(self.authorize_url, self.auth_config)
form_el = {}
for el in auth_page['parse'].find_all('input'):
if 'name' in el.attrs and 'value' in el.attrs:
form_el.update({el.attrs['name']: el.attrs['value']})
form_el.update({'email': self.options['login'], 'pass': self.options['password']})
response = http.parse(http.post(self.action, form_el)['content'])
action = response.find('div', class_='form_item').form.attrs['action']
response_get_token = http.get(action)
url_token = urlparse(response_get_token['url'])
try:
self.set_token(parse_qs(url_token.fragment, encoding='utf8')['access_token'][0])
except KeyError:
self.logger.error(json.loads(response_get_token['content']))
if self.token is not None:
self.logger.log('Авторизация завершена')
else:
self.logger.error('Ошибка авторизации')
exit()
def query(self, name, params):
"""
Выполнение запроса к API VK
:param name:
:param params:
:return:
"""
http = HttpUtil()
query_param = []
for k, v in params.items():
query_param.append(str(str(k) + "=" + str(v)))
url = "https://api.vk.com/method/" + name + "?" + '&'.join(query_param) + \
"&access_token=" + self.token + "&v=" + self.options['api_v']
query = http.get(url)
response = json.loads(query['content'])
time.sleep(self.timeout())
if 'error' in response:
return json.loads(query['content'])['error']
else:
return json.loads(query['content'])['response']
def get_long_poll(self):
"""
Инициализация getLongPoll
:return:
"""
pts = self.query('groups.getLongPollServer', {'group_id': self.options['group_id']})
return pts
def get_ts(self, server, key, ts):
"""
Получение TS для API getLongPoll
:param server:
:param key:
:param ts:
:return:
"""
http = HttpUtil()
url_server = server + '?act=a_check&key=' + key + '&ts=' + ts + '&wait=25'
response = http.get(url_server)
return json.loads(response['content'])
def init_long_poll(self):
"""
Реализация getLongPool подключения
:return:
"""
pts = self.get_long_poll()
key = pts['key']
server = pts['server']
while True:
pts = self.get_ts(server, key, pts['ts'])
if 'updates' not in pts:
pts = self.get_long_poll()
key = pts['key']
server = pts['server']
else:
self.middleware(pts['updates'])
def long_poll(self):
"""
Инициализация и запуск процесса LongPoll
:return:
"""
thread = threading.Thread(target=self.init_long_poll)
thread.start()
self.logger.info('Long pull запущен')
def middleware(self, items):
"""
Обработчик поступающих событий
:param items:
:return:
"""
if items is not None:
for item in items:
thread = threading.Thread(target=self.thread_object, args=[self.is_command, item])
thread.start()
def handler_signal(self, signal_proc, frame):
"""
Уничтожение процесса
:param signal_proc:
:param frame:
:return:
"""
pid = os.getpid()
os.kill(pid, signal.SIGKILL)
def register_commands(self, *commands):
"""
Регистрация комманд
:param commands:
:return:
"""
for cmd in commands:
command = cmd()
self.commands.append({'name': command.name, 'event': command.event,
'result': command.result, 'filter': command.filter})
def register_symbol_command(self, symbol):
"""
Регистрация символа команды
:param symbol:
:return:
"""
self.symbol_command = symbol
def register_symbol_answer(self, symbol):
"""
Регистрация символа ответа
:param symbol:
:return:
"""
self.symbol_answer = symbol
def is_command(self, item):
"""
Проверка: является ли сообщение командой
:param item:
:return:
"""
for cmd in self.commands:
event = item['type']
if event == cmd['event']:
if cmd['filter'](item['object'], self.symbol_command, self.symbol_answer):
resposne = cmd['result'](self, item)
if resposne:
self.replay(resposne, item)
def replay(self, text, item):
"""
Ответ, если он предусмотрен командой
:param text:
:param item:
:return:
"""
peer_id = item['object']['peer_id']
random_id = random.randint(11111111111, 99999999999)
message = text
self.query('messages.send', {'peer_id': peer_id, 'random_id': random_id, 'message': message,
'group_id': self.options['group_id']})
def timeout(self):
"""
Функция реализации таймаута
:return:
"""
return self.options['max_timeout']
@staticmethod
def thread_object(cmd, *args):
"""
Объект потока
:param cmd:
:param args:
:return:
"""
cmd(*args)
exit()
|
split_video.py
|
from multiprocessing.context import Process
import os
import glob
from moviepy.editor import VideoFileClip
path = "../GRID/"
origin = "../video_org"
file = "GRID_files.txt"
def get_duration(filename):
clip = VideoFileClip(filename)
return clip.duration
def run(origins, videos, dests):
clips = []
for i in range(0, len(videos)):
j = 0
rate = get_duration(origins[i])
n = int(rate / 3.3)
pc = rate % 3.3
for j in range(0, n):
base = j * 4
cmd = "ffmpeg -ss {} -i {} -r 29.97 -to {} -vcodec copy -acodec copy -async 1 -strict -2 {}.mp4 -y".format(base, origins[i], 3.3, os.path.join(dests[i], str(j)))
clips.append(os.path.join(dests[i], str(j)) + '.mp4')
os.system(cmd)
base = n * 4
cmd = "ffmpeg -ss {} -i {} -r 29.97 -to {} -vcodec copy -acodec copy -async 1 -strict -2 {}.mp4 -y".format(base, origins[i], 3.3, os.path.join(dests[i], str(n)))
clips.append(os.path.join(dests[i], str(n)) + '.mp4')
os.system(cmd)
with open(file, 'a') as f:
for clip in clips:
f.write(clip)
f.write('\n')
if(__name__ == '__main__'):
files = glob.glob(origin + "/*.mp4")
videos = [os.path.basename(file).replace(".mp4", "") for file in files]
paths = [os.path.join(path, video + '/') for video in videos]
for path in paths:
if(not os.path.exists(path)):
os.makedirs(path)
processes = []
n_p = 1
for i in range(n_p):
p = Process(target=run, args=(files, videos, paths))
p.start()
processes.append(p)
for p in processes:
p.join()
|
massReport.py
|
from colored import fg, attr
import requests
import threading
import time
import random
r = fg(241) # Setup color variables
r2 = fg(255)
b = fg(31)
w = fg(15)
def start():
token = input(f"\n {r2}[{b}?{r2}] Token: ")
guildId = input(f" {r2}[{b}?{r2}] Server Id: ")
channelId = input(f" {r2}[{b}?{r2}] Channel Id: ")
messageId = input(f" {r2}[{b}?{r2}] Message Id: ")
reason = input(f" {r2}[{b}?{r2}] Reason: ")
headers = {
"Content-Type" : "application/json",
"Authorization" : token,
"User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:87.0) Gecko/20100101 Firefox/87.0"
}
payload = {"guild_id" : guildId, "channel_id" : channelId, "message_id" : messageId, "reason" : reason}
def report():
while True:
response = requests.post(
'https://discord.com/api/v6/report',
headers = headers,
json = payload
)
if response.status_code == 201:
print(f" {r2}[{b}+{r2}] Report sent successfully")
elif response.status_code == 429:
print(f" {r2}[{b}!{r2}] Ratelimited, waiting 5 seconds")
time.sleep(5)
elif response.status_code == 401:
print(f" {r2}[{b}!{r2}] Invalid token")
return
else:
print(f" {r2}[{b}!{r2}] Unknown error: {response.status_code}")
for i in range(500):
threading.Thread(target = report).start()
if __name__ == '__main__':
start()
|
tst_flash.py
|
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
from lib_utils import *
from lib_leds import LEDS
import time
import threading
leds = LEDS()
threading.Thread(target=leds.flash).start()
leds.flash_on()
t_start = time.time()
while time.time() - t_start < 60:
time.sleep(0.5)
leds.off()
leds.terminate()
GPIO.cleanup()
|
redshift.py
|
# pylint: disable=C0111,R0903
"""Displays the current color temperature of redshift
Requires the following executable:
* redshift
"""
import threading
import bumblebee.input
import bumblebee.output
import bumblebee.engine
def is_terminated():
for thread in threading.enumerate():
if thread.name == "MainThread" and not thread.is_alive():
return True
return False
def get_redshift_value(widget):
while True:
if is_terminated():
return
widget.get("condition").acquire()
while True:
try:
widget.get("condition").wait(1)
except RuntimeError:
continue
break
widget.get("condition").release()
try:
res = bumblebee.util.execute("redshift -p")
except Exception:
res = ""
widget.set("temp", "n/a")
widget.set("transition", None)
widget.set("state", "day")
for line in res.split("\n"):
line = line.lower()
if "temperature" in line:
widget.set("temp", line.split(" ")[2])
if "period" in line:
state = line.split(" ")[1]
if "day" in state:
widget.set("state", "day")
elif "night" in state:
widget.set("state", "night")
else:
widget.set("state", "transition")
widget.set("transition", " ".join(line.split(" ")[2:]))
class Module(bumblebee.engine.Module):
def __init__(self, engine, config):
widget = bumblebee.output.Widget(full_text=self.text)
super(Module, self).__init__(engine, config, widget)
self._text = ""
self._condition = threading.Condition()
widget.set("condition", self._condition)
self._thread = threading.Thread(target=get_redshift_value, args=(widget,))
self._thread.start()
self._condition.acquire()
self._condition.notify()
self._condition.release()
def text(self, widget):
return "{}".format(self._text)
def update(self, widgets):
widget = widgets[0]
self._condition.acquire()
self._condition.notify()
self._condition.release()
temp = widget.get("temp", "n/a")
self._text = temp
transition = widget.get("transition", None)
if transition:
self._text = "{} {}".format(temp, transition)
def state(self, widget):
return widget.get("state", None)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
pserve.py
|
# (c) 2005 Ian Bicking and contributors; written for Paste
# (http://pythonpaste.org) Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
# For discussion of daemonizing:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
#
# Code taken also from QP: http://www.mems-exchange.org/software/qp/ From
# lib/site.py
import atexit
import ctypes
import errno
import logging
import optparse
import os
import re
import subprocess
import sys
import textwrap
import threading
import time
import traceback
from paste.deploy import loadserver
from paste.deploy import loadapp
from pyramid.compat import PY3
from pyramid.compat import WIN
from pyramid.paster import setup_logging
from pyramid.scripts.common import parse_vars
MAXFD = 1024
if WIN and not hasattr(os, 'kill'): # pragma: no cover
# py 2.6 on windows
def kill(pid, sig=None):
"""kill function for Win32"""
# signal is ignored, semibogus raise message
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if (0 == kernel32.TerminateProcess(handle, 0)):
raise OSError('No such process %s' % pid)
else:
kill = os.kill
def main(argv=sys.argv, quiet=False):
command = PServeCommand(argv, quiet=quiet)
return command.run()
class DaemonizeException(Exception):
pass
class PServeCommand(object):
usage = '%prog config_uri [start|stop|restart|status] [var=value]'
description = """\
This command serves a web application that uses a PasteDeploy
configuration file for the server and application.
If start/stop/restart is given, then --daemon is implied, and it will
start (normal operation), stop (--stop-daemon), or do both.
You can also include variable assignments like 'http_port=8080'
and then use %(http_port)s in your config files.
"""
default_verbosity = 1
parser = optparse.OptionParser(
usage,
description=textwrap.dedent(description)
)
parser.add_option(
'-n', '--app-name',
dest='app_name',
metavar='NAME',
help="Load the named application (default main)")
parser.add_option(
'-s', '--server',
dest='server',
metavar='SERVER_TYPE',
help="Use the named server.")
parser.add_option(
'--server-name',
dest='server_name',
metavar='SECTION_NAME',
help=("Use the named server as defined in the configuration file "
"(default: main)"))
if hasattr(os, 'fork'):
parser.add_option(
'--daemon',
dest="daemon",
action="store_true",
help="Run in daemon (background) mode")
parser.add_option(
'--pid-file',
dest='pid_file',
metavar='FILENAME',
help=("Save PID to file (default to pyramid.pid if running in "
"daemon mode)"))
parser.add_option(
'--log-file',
dest='log_file',
metavar='LOG_FILE',
help="Save output to the given log file (redirects stdout)")
parser.add_option(
'--reload',
dest='reload',
action='store_true',
help="Use auto-restart file monitor")
parser.add_option(
'--reload-interval',
dest='reload_interval',
default=1,
help=("Seconds between checking files (low number can cause "
"significant CPU usage)"))
parser.add_option(
'--monitor-restart',
dest='monitor_restart',
action='store_true',
help="Auto-restart server if it dies")
parser.add_option(
'--status',
action='store_true',
dest='show_status',
help="Show the status of the (presumably daemonized) server")
parser.add_option(
'-v', '--verbose',
default=default_verbosity,
dest='verbose',
action='count',
help="Set verbose level (default "+str(default_verbosity)+")")
parser.add_option(
'-q', '--quiet',
action='store_const',
const=0,
dest='verbose',
help="Suppress verbose output")
if hasattr(os, 'setuid'):
# I don't think these are available on Windows
parser.add_option(
'--user',
dest='set_user',
metavar="USERNAME",
help="Set the user (usually only possible when run as root)")
parser.add_option(
'--group',
dest='set_group',
metavar="GROUP",
help="Set the group (usually only possible when run as root)")
parser.add_option(
'--stop-daemon',
dest='stop_daemon',
action='store_true',
help=('Stop a daemonized server (given a PID file, or default '
'pyramid.pid file)'))
_scheme_re = re.compile(r'^[a-z][a-z]+:', re.I)
_reloader_environ_key = 'PYTHON_RELOADER_SHOULD_RUN'
_monitor_environ_key = 'PASTE_MONITOR_SHOULD_RUN'
possible_subcommands = ('start', 'stop', 'restart', 'status')
def __init__(self, argv, quiet=False):
self.options, self.args = self.parser.parse_args(argv[1:])
if quiet:
self.options.verbose = 0
def out(self, msg): # pragma: no cover
if self.options.verbose > 0:
print(msg)
def get_options(self):
if (len(self.args) > 1
and self.args[1] in self.possible_subcommands):
restvars = self.args[2:]
else:
restvars = self.args[1:]
return parse_vars(restvars)
def run(self): # pragma: no cover
if self.options.stop_daemon:
return self.stop_daemon()
if not hasattr(self.options, 'set_user'):
# Windows case:
self.options.set_user = self.options.set_group = None
# @@: Is this the right stage to set the user at?
self.change_user_group(
self.options.set_user, self.options.set_group)
if not self.args:
self.out('You must give a config file')
return 2
app_spec = self.args[0]
if (len(self.args) > 1
and self.args[1] in self.possible_subcommands):
cmd = self.args[1]
else:
cmd = None
if self.options.reload:
if os.environ.get(self._reloader_environ_key):
if self.options.verbose > 1:
self.out('Running reloading file monitor')
install_reloader(int(self.options.reload_interval), [app_spec])
# if self.requires_config_file:
# watch_file(self.args[0])
else:
return self.restart_with_reloader()
if cmd not in (None, 'start', 'stop', 'restart', 'status'):
self.out(
'Error: must give start|stop|restart (not %s)' % cmd)
return 2
if cmd == 'status' or self.options.show_status:
return self.show_status()
if cmd == 'restart' or cmd == 'stop':
result = self.stop_daemon()
if result:
if cmd == 'restart':
self.out("Could not stop daemon; aborting")
else:
self.out("Could not stop daemon")
return result
if cmd == 'stop':
return result
self.options.daemon = True
if cmd == 'start':
self.options.daemon = True
app_name = self.options.app_name
vars = self.get_options()
if not self._scheme_re.search(app_spec):
app_spec = 'config:' + app_spec
server_name = self.options.server_name
if self.options.server:
server_spec = 'egg:pyramid'
assert server_name is None
server_name = self.options.server
else:
server_spec = app_spec
base = os.getcwd()
if getattr(self.options, 'daemon', False):
if not self.options.pid_file:
self.options.pid_file = 'pyramid.pid'
if not self.options.log_file:
self.options.log_file = 'pyramid.log'
# Ensure the log file is writeable
if self.options.log_file:
try:
writeable_log_file = open(self.options.log_file, 'a')
except IOError as ioe:
msg = 'Error: Unable to write to log file: %s' % ioe
raise ValueError(msg)
writeable_log_file.close()
# Ensure the pid file is writeable
if self.options.pid_file:
try:
writeable_pid_file = open(self.options.pid_file, 'a')
except IOError as ioe:
msg = 'Error: Unable to write to pid file: %s' % ioe
raise ValueError(msg)
writeable_pid_file.close()
if getattr(self.options, 'daemon', False):
try:
self.daemonize()
except DaemonizeException as ex:
if self.options.verbose > 0:
self.out(str(ex))
return 2
if (self.options.monitor_restart
and not os.environ.get(self._monitor_environ_key)):
return self.restart_with_monitor()
if self.options.pid_file:
self.record_pid(self.options.pid_file)
if self.options.log_file:
stdout_log = LazyWriter(self.options.log_file, 'a')
sys.stdout = stdout_log
sys.stderr = stdout_log
logging.basicConfig(stream=stdout_log)
log_fn = app_spec
if log_fn.startswith('config:'):
log_fn = app_spec[len('config:'):]
elif log_fn.startswith('egg:'):
log_fn = None
if log_fn:
log_fn = os.path.join(base, log_fn)
setup_logging(log_fn)
server = self.loadserver(server_spec, name=server_name,
relative_to=base, global_conf=vars)
app = self.loadapp(app_spec, name=app_name, relative_to=base,
global_conf=vars)
if self.options.verbose > 0:
if hasattr(os, 'getpid'):
msg = 'Starting server in PID %i.' % os.getpid()
else:
msg = 'Starting server.'
self.out(msg)
def serve():
try:
server(app)
except (SystemExit, KeyboardInterrupt) as e:
if self.options.verbose > 1:
raise
if str(e):
msg = ' ' + str(e)
else:
msg = ''
self.out('Exiting%s (-v to see traceback)' % msg)
serve()
def loadapp(self, app_spec, name, relative_to, **kw): # pragma: no cover
return loadapp(app_spec, name=name, relative_to=relative_to, **kw)
def loadserver(self, server_spec, name, relative_to, **kw):# pragma:no cover
return loadserver(
server_spec, name=name, relative_to=relative_to, **kw)
def quote_first_command_arg(self, arg): # pragma: no cover
"""
There's a bug in Windows when running an executable that's
located inside a path with a space in it. This method handles
that case, or on non-Windows systems or an executable with no
spaces, it just leaves well enough alone.
"""
if (sys.platform != 'win32' or ' ' not in arg):
# Problem does not apply:
return arg
try:
import win32api
except ImportError:
raise ValueError(
"The executable %r contains a space, and in order to "
"handle this issue you must have the win32api module "
"installed" % arg)
arg = win32api.GetShortPathName(arg)
return arg
def daemonize(self): # pragma: no cover
pid = live_pidfile(self.options.pid_file)
if pid:
raise DaemonizeException(
"Daemon is already running (PID: %s from PID file %s)"
% (pid, self.options.pid_file))
if self.options.verbose > 0:
self.out('Entering daemon mode')
pid = os.fork()
if pid:
# The forked process also has a handle on resources, so we
# *don't* want proper termination of the process, we just
# want to exit quick (which os._exit() does)
os._exit(0)
# Make this the session leader
os.setsid()
# Fork again for good measure!
pid = os.fork()
if pid:
os._exit(0)
# @@: Should we set the umask and cwd now?
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
def _remove_pid_file(self, written_pid, filename, verbosity):
current_pid = os.getpid()
if written_pid != current_pid:
# A forked process must be exiting, not the process that
# wrote the PID file
return
if not os.path.exists(filename):
return
with open(filename) as f:
content = f.read().strip()
try:
pid_in_file = int(content)
except ValueError:
pass
else:
if pid_in_file != current_pid:
msg = "PID file %s contains %s, not expected PID %s"
self.out(msg % (filename, pid_in_file, current_pid))
return
if verbosity > 0:
self.out("Removing PID file %s" % filename)
try:
os.unlink(filename)
return
except OSError as e:
# Record, but don't give traceback
self.out("Cannot remove PID file: (%s)" % e)
# well, at least lets not leave the invalid PID around...
try:
with open(filename, 'w') as f:
f.write('')
except OSError as e:
self.out('Stale PID left in file: %s (%s)' % (filename, e))
else:
self.out('Stale PID removed')
def record_pid(self, pid_file):
pid = os.getpid()
if self.options.verbose > 1:
self.out('Writing PID %s to %s' % (pid, pid_file))
with open(pid_file, 'w') as f:
f.write(str(pid))
atexit.register(self._remove_pid_file, pid, pid_file, self.options.verbose)
def stop_daemon(self): # pragma: no cover
pid_file = self.options.pid_file or 'pyramid.pid'
if not os.path.exists(pid_file):
self.out('No PID file exists in %s' % pid_file)
return 1
pid = read_pidfile(pid_file)
if not pid:
self.out("Not a valid PID file in %s" % pid_file)
return 1
pid = live_pidfile(pid_file)
if not pid:
self.out("PID in %s is not valid (deleting)" % pid_file)
try:
os.unlink(pid_file)
except (OSError, IOError) as e:
self.out("Could not delete: %s" % e)
return 2
return 1
for j in range(10):
if not live_pidfile(pid_file):
break
import signal
kill(pid, signal.SIGTERM)
time.sleep(1)
else:
self.out("failed to kill web process %s" % pid)
return 3
if os.path.exists(pid_file):
os.unlink(pid_file)
return 0
def show_status(self): # pragma: no cover
pid_file = self.options.pid_file or 'pyramid.pid'
if not os.path.exists(pid_file):
self.out('No PID file %s' % pid_file)
return 1
pid = read_pidfile(pid_file)
if not pid:
self.out('No PID in file %s' % pid_file)
return 1
pid = live_pidfile(pid_file)
if not pid:
self.out('PID %s in %s is not running' % (pid, pid_file))
return 1
self.out('Server running in PID %s' % pid)
return 0
def restart_with_reloader(self): # pragma: no cover
self.restart_with_monitor(reloader=True)
def restart_with_monitor(self, reloader=False): # pragma: no cover
if self.options.verbose > 0:
if reloader:
self.out('Starting subprocess with file monitor')
else:
self.out('Starting subprocess with monitor parent')
while 1:
args = [self.quote_first_command_arg(sys.executable)] + sys.argv
new_environ = os.environ.copy()
if reloader:
new_environ[self._reloader_environ_key] = 'true'
else:
new_environ[self._monitor_environ_key] = 'true'
proc = None
try:
try:
_turn_sigterm_into_systemexit()
proc = subprocess.Popen(args, env=new_environ)
exit_code = proc.wait()
proc = None
except KeyboardInterrupt:
self.out('^C caught in monitor process')
if self.options.verbose > 1:
raise
return 1
finally:
if proc is not None:
import signal
try:
kill(proc.pid, signal.SIGTERM)
except (OSError, IOError):
pass
if reloader:
# Reloader always exits with code 3; but if we are
# a monitor, any exit code will restart
if exit_code != 3:
return exit_code
if self.options.verbose > 0:
self.out('%s %s %s' % ('-' * 20, 'Restarting', '-' * 20))
def change_user_group(self, user, group): # pragma: no cover
if not user and not group:
return
import pwd, grp
uid = gid = None
if group:
try:
gid = int(group)
group = grp.getgrgid(gid).gr_name
except ValueError:
import grp
try:
entry = grp.getgrnam(group)
except KeyError:
raise ValueError(
"Bad group: %r; no such group exists" % group)
gid = entry.gr_gid
try:
uid = int(user)
user = pwd.getpwuid(uid).pw_name
except ValueError:
try:
entry = pwd.getpwnam(user)
except KeyError:
raise ValueError(
"Bad username: %r; no such user exists" % user)
if not gid:
gid = entry.pw_gid
uid = entry.pw_uid
if self.options.verbose > 0:
self.out('Changing user to %s:%s (%s:%s)' % (
user, group or '(unknown)', uid, gid))
if gid:
os.setgid(gid)
if uid:
os.setuid(uid)
class LazyWriter(object):
"""
File-like object that opens a file lazily when it is first written
to.
"""
def __init__(self, filename, mode='w'):
self.filename = filename
self.fileobj = None
self.lock = threading.Lock()
self.mode = mode
def open(self):
if self.fileobj is None:
with self.lock:
self.fileobj = open(self.filename, self.mode)
return self.fileobj
def close(self):
fileobj = self.fileobj
if fileobj is not None:
fileobj.close()
def __del__(self):
self.close()
def write(self, text):
fileobj = self.open()
fileobj.write(text)
fileobj.flush()
def writelines(self, text):
fileobj = self.open()
fileobj.writelines(text)
fileobj.flush()
def flush(self):
self.open().flush()
def live_pidfile(pidfile): # pragma: no cover
"""(pidfile:str) -> int | None
Returns an int found in the named file, if there is one,
and if there is a running process with that process id.
Return None if no such process exists.
"""
pid = read_pidfile(pidfile)
if pid:
try:
kill(int(pid), 0)
return pid
except OSError as e:
if e.errno == errno.EPERM:
return pid
return None
def read_pidfile(filename):
if os.path.exists(filename):
try:
with open(filename) as f:
content = f.read()
return int(content.strip())
except (ValueError, IOError):
return None
else:
return None
def ensure_port_cleanup(
bound_addresses, maxtries=30, sleeptime=2): # pragma: no cover
"""
This makes sure any open ports are closed.
Does this by connecting to them until they give connection
refused. Servers should call like::
ensure_port_cleanup([80, 443])
"""
atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries,
sleeptime=sleeptime)
def _cleanup_ports(
bound_addresses, maxtries=30, sleeptime=2): # pragma: no cover
# Wait for the server to bind to the port.
import socket
import errno
for bound_address in bound_addresses:
for attempt in range(maxtries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(bound_address)
except socket.error as e:
if e.args[0] != errno.ECONNREFUSED:
raise
break
else:
time.sleep(sleeptime)
else:
raise SystemExit('Timeout waiting for port.')
sock.close()
def _turn_sigterm_into_systemexit(): # pragma: no cover
"""
Attempts to turn a SIGTERM exception into a SystemExit exception.
"""
try:
import signal
except ImportError:
return
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
def install_reloader(poll_interval=1, extra_files=None): # pragma: no cover
"""
Install the reloading monitor.
On some platforms server threads may not terminate when the main
thread does, causing ports to remain open/locked. The
``raise_keyboard_interrupt`` option creates a unignorable signal
which causes the whole application to shut-down (rudely).
"""
mon = Monitor(poll_interval=poll_interval)
if extra_files is None:
extra_files = []
mon.extra_files.extend(extra_files)
t = threading.Thread(target=mon.periodic_reload)
t.setDaemon(True)
t.start()
class classinstancemethod(object):
"""
Acts like a class method when called from a class, like an
instance method when called by an instance. The method should
take two arguments, 'self' and 'cls'; one of these will be None
depending on how the method was called.
"""
def __init__(self, func):
self.func = func
self.__doc__ = func.__doc__
def __get__(self, obj, type=None):
return _methodwrapper(self.func, obj=obj, type=type)
class _methodwrapper(object):
def __init__(self, func, obj, type):
self.func = func
self.obj = obj
self.type = type
def __call__(self, *args, **kw):
assert not 'self' in kw and not 'cls' in kw, (
"You cannot use 'self' or 'cls' arguments to a "
"classinstancemethod")
return self.func(*((self.obj, self.type) + args), **kw)
class Monitor(object): # pragma: no cover
"""
A file monitor and server restarter.
Use this like:
..code-block:: Python
install_reloader()
Then make sure your server is installed with a shell script like::
err=3
while test "$err" -eq 3 ; do
python server.py
err="$?"
done
or is run from this .bat file (if you use Windows)::
@echo off
:repeat
python server.py
if %errorlevel% == 3 goto repeat
or run a monitoring process in Python (``pserve --reload`` does
this).
Use the ``watch_file(filename)`` function to cause a reload/restart for
other non-Python files (e.g., configuration files). If you have
a dynamic set of files that grows over time you can use something like::
def watch_config_files():
return CONFIG_FILE_CACHE.keys()
add_file_callback(watch_config_files)
Then every time the reloader polls files it will call
``watch_config_files`` and check all the filenames it returns.
"""
instances = []
global_extra_files = []
global_file_callbacks = []
def __init__(self, poll_interval):
self.module_mtimes = {}
self.keep_running = True
self.poll_interval = poll_interval
self.extra_files = list(self.global_extra_files)
self.instances.append(self)
self.file_callbacks = list(self.global_file_callbacks)
def _exit(self):
# use os._exit() here and not sys.exit() since within a
# thread sys.exit() just closes the given thread and
# won't kill the process; note os._exit does not call
# any atexit callbacks, nor does it do finally blocks,
# flush open files, etc. In otherwords, it is rude.
os._exit(3)
def periodic_reload(self):
while True:
if not self.check_reload():
self._exit()
break
time.sleep(self.poll_interval)
def check_reload(self):
filenames = list(self.extra_files)
for file_callback in self.file_callbacks:
try:
filenames.extend(file_callback())
except:
print(
"Error calling reloader callback %r:" % file_callback)
traceback.print_exc()
for module in sys.modules.values():
try:
filename = module.__file__
except (AttributeError, ImportError):
continue
if filename is not None:
filenames.append(filename)
for filename in filenames:
try:
stat = os.stat(filename)
if stat:
mtime = stat.st_mtime
else:
mtime = 0
except (OSError, IOError):
continue
if filename.endswith('.pyc') and os.path.exists(filename[:-1]):
mtime = max(os.stat(filename[:-1]).st_mtime, mtime)
if not filename in self.module_mtimes:
self.module_mtimes[filename] = mtime
elif self.module_mtimes[filename] < mtime:
print("%s changed; reloading..." % filename)
return False
return True
def watch_file(self, cls, filename):
"""Watch the named file for changes"""
filename = os.path.abspath(filename)
if self is None:
for instance in cls.instances:
instance.watch_file(filename)
cls.global_extra_files.append(filename)
else:
self.extra_files.append(filename)
watch_file = classinstancemethod(watch_file)
def add_file_callback(self, cls, callback):
"""Add a callback -- a function that takes no parameters -- that will
return a list of filenames to watch for changes."""
if self is None:
for instance in cls.instances:
instance.add_file_callback(callback)
cls.global_file_callbacks.append(callback)
else:
self.file_callbacks.append(callback)
add_file_callback = classinstancemethod(add_file_callback)
watch_file = Monitor.watch_file
add_file_callback = Monitor.add_file_callback
# For paste.deploy server instantiation (egg:pyramid#wsgiref)
def wsgiref_server_runner(wsgi_app, global_conf, **kw): # pragma: no cover
from wsgiref.simple_server import make_server
host = kw.get('host', '0.0.0.0')
port = int(kw.get('port', 8080))
server = make_server(host, port, wsgi_app)
print('Starting HTTP server on http://%s:%s' % (host, port))
server.serve_forever()
# For paste.deploy server instantiation (egg:pyramid#cherrypy)
def cherrypy_server_runner(
app, global_conf=None, host='127.0.0.1', port=None,
ssl_pem=None, protocol_version=None, numthreads=None,
server_name=None, max=None, request_queue_size=None,
timeout=None
): # pragma: no cover
"""
Entry point for CherryPy's WSGI server
Serves the specified WSGI app via CherryPyWSGIServer.
``app``
The WSGI 'application callable'; multiple WSGI applications
may be passed as (script_name, callable) pairs.
``host``
This is the ipaddress to bind to (or a hostname if your
nameserver is properly configured). This defaults to
127.0.0.1, which is not a public interface.
``port``
The port to run on, defaults to 8080 for HTTP, or 4443 for
HTTPS. This can be a string or an integer value.
``ssl_pem``
This an optional SSL certificate file (via OpenSSL) You can
generate a self-signed test PEM certificate file as follows:
$ openssl genrsa 1024 > host.key
$ chmod 400 host.key
$ openssl req -new -x509 -nodes -sha1 -days 365 \\
-key host.key > host.cert
$ cat host.cert host.key > host.pem
$ chmod 400 host.pem
``protocol_version``
The protocol used by the server, by default ``HTTP/1.1``.
``numthreads``
The number of worker threads to create.
``server_name``
The string to set for WSGI's SERVER_NAME environ entry.
``max``
The maximum number of queued requests. (defaults to -1 = no
limit).
``request_queue_size``
The 'backlog' argument to socket.listen(); specifies the
maximum number of queued connections.
``timeout``
The timeout in seconds for accepted connections.
"""
is_ssl = False
if ssl_pem:
port = port or 4443
is_ssl = True
if not port:
if ':' in host:
host, port = host.split(':', 1)
else:
port = 8080
bind_addr = (host, int(port))
kwargs = {}
for var_name in ('numthreads', 'max', 'request_queue_size', 'timeout'):
var = locals()[var_name]
if var is not None:
kwargs[var_name] = int(var)
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer(bind_addr, app,
server_name=server_name, **kwargs)
if ssl_pem is not None:
if not PY3:
server.ssl_certificate = server.ssl_private_key = ssl_pem
else:
# creates wsgiserver.ssl_builtin as side-effect
wsgiserver.get_ssl_adapter_class()
server.ssl_adapter = wsgiserver.ssl_builtin.BuiltinSSLAdapter(
ssl_pem, ssl_pem)
if protocol_version:
server.protocol = protocol_version
try:
protocol = is_ssl and 'https' or 'http'
if host == '0.0.0.0':
print('serving on 0.0.0.0:%s view at %s://127.0.0.1:%s' %
(port, protocol, port))
else:
print('serving on %s://%s:%s' % (protocol, host, port))
server.start()
except (KeyboardInterrupt, SystemExit):
server.stop()
return server
|
main.py
|
# -*- coding: utf-8 -*-
"""
author: ZSAIm
github: https://github.com/ZSAIm/iqiyi-parser
"""
import gui
import wx, time, sys
import GUIEventBinder
import socket
import threading
import flow
import handler
from handler.logs import STDRedirect
import io
socket.setdefaulttimeout(3)
def main():
threading.Thread(target=__main__).start()
def __main__():
wx.CallAfter(flow.Entry.handle)
def wait_thread():
"""can't run while debugging"""
main_thread = threading.main_thread()
for i in threading.enumerate():
if i != main_thread:
i.join()
if __name__ == '__main__':
# with open('error.log', 'a+') as ferr:
# ferr.write('------------------------\n')
# ferr.write('%s\n' % time.asctime(time.localtime(time.time())))
# ferr.write('------------------------\n')
# sys.stderr = ferr
gui.init()
GUIEventBinder.init()
sys.stdout = STDRedirect(sys.stdout)
sys.stderr = STDRedirect(sys.stderr)
main()
gui.MainLoop()
# handler.downloader.join()
# wait_thread()
|
main.py
|
import requests, zlib, hashlib, json
import time, threading
import logging
import random
from multiprocessing.pool import ThreadPool
from enum import Enum
class GameConstants:
RUBLES = '5449016a4bdc2d6f028b456f'
DOLLARS = '5696686a4bdc2da3298b456a'
EURO = '569668774bdc2da2298b4568'
Therapist = '54cb57776803fa99248b456e'
gasan = '590a3efd86f77437d351a25b'
labKey = '5c94bbff86f7747ee735c08f'
salewa = '544fb45d4bdc2dee738b4568'
class FleaOffer:
def __init__(self, offer: dict):
self.offer = offer
self.id = offer['_id']
self.user = dict()
self.user['id'] = offer['user']['id']
self.user['nickname'] = offer['user']['nickname']
self.item_tpl = offer['items'][0]['_tpl']
self.count = offer['items'][0]['upd']['StackObjectsCount']
self.requirements = list()
self.requirements = offer['requirements']
self.summary_cost = offer['summaryCost']
self.start_time = offer['startTime']
self.end_time = offer['endTime']
def __str__(self):
return f'{self.user["nickname"]} - {self.summary_cost} - x{self.count}'
def _repr_(self):
return str(self)
def remove_white_space(s):
return s.replace(' ', '').replace('\n', '')
class GameRequest:
def __init__(self, url: str, data: str, cookies={}):
self.request = requests.post(url, data, cookies=cookies)
self.cookies = self.request.cookies.get_dict()
def _get_content(self):
return zlib.decompress(self.request.content).decode()
def __str__(self):
return self._get_content()
def __repr__(self):
return str(self)
def get_json(self) -> dict:
return json.loads(self._get_content())
class GameConnection:
logger = logging.getLogger("GameConnection")
logger.setLevel("DEBUG")
cookies = None
def __init__(self, email="", password="", cookies=''):
self.logger.debug("Connecting to game")
if cookies != '':
self.cookies = {'PHPSESSID': cookies}
else:
self.email = email
self.password = password
self.cookies = self._get_cookies()
self.logger.debug(f"Using cookies: {self.cookies}")
def _get_cookies(self):
loginReq = self._login()
return loginReq.cookies
def _login(self):
if self.email == "" or self.password == "":
raise ValueError("Email or password are invalid")
device_id = "ENTER_YOUR_DEVICE_ID_HERE"
major_v = "ENTER_MAJOR_GAME_VERSION_HERE" # eg. "0.11.7.3087"
# minor_v = "bgkidft87ddd"
data = dict()
data['version'] = {}
data['version']['major'] = major_v
data['version']['backend'] = 6
data['device_id'] = device_id
data['develop'] = True
data['email'] = self.email
data['pass'] = self.password
req = self.prod_request('/client/game/login', json.dumps(data))
self.logger.debug("Login request: " + str(req))
# TODO: require Hardware code if login is done for the first time
return req
def _send_request(self, path, data):
data = zlib.compress(remove_white_space(data).encode())
cookies = {}
if self.cookies is not None:
cookies = self.cookies
req = GameRequest(path, data=data, cookies=cookies)
return req
def prod_request(self, path: str, data: str) -> GameRequest:
return self._send_request("http://prod.escapefromtarkov.com" + path, data)
def trading_request(self, path: str, data: str) -> GameRequest:
return self._send_request("http://trading.escapefromtarkov.com" + path, data)
def ragfair_request(self, path: str, data: str) -> GameRequest:
return self._send_request("http://ragfair.escapefromtarkov.com" + path, data)
class FleaBuyResult(Enum):
OK = 0,
BOUGHT = 1
OUTOFSPACE = 2
UNKNOWN = -1
class Game:
logger = logging.getLogger("Game")
logger.setLevel("DEBUG")
profileLock = threading.Lock()
def __init__(self, email="", password="", cookies=None):
self.logger.debug("Initializing game")
self.connection = GameConnection(email, password, cookies)
self.keep_alive_thread = threading.Thread(target=self._keep_alive)
self.keep_alive_thread.daemon = True
self.keep_alive_thread.start()
self.moneyStacks = {}
self.PMC = None
self.update_profile()
self.connection.prod_request('/client/game/profile/select',
json.dumps({'uid': self.PMC['_id']}))
self.all_item_list = self.connection.prod_request('/client/items', '{}').get_json()['data']
"""
Method running in separate thread. Sends alive req to server to keep cookies valid.
"""
def _keep_alive(self):
while True:
self.connection.prod_request('/client/game/keepalive', '')
time.sleep(5 * 60)
"""
Reloads information about PMC, including money, inventory items etc.
"""
def update_profile(self):
with self.profileLock:
list_req = self.connection.prod_request("/client/game/profile/list", "{}")
profile_list = list_req.get_json()
for item in profile_list['data']:
if item['Info']['LastTimePlayedAsSavage'] == 0:
self.PMC = item
self._inventory = dict()
for item in self.PMC['Inventory']['items']:
self._inventory[item['_id']] = item
for currency in (GameConstants.RUBLES, GameConstants.DOLLARS, GameConstants.EURO):
self.moneyStacks[currency] = {}
for item_id, item in self._inventory.items():
for currency in (GameConstants.RUBLES, GameConstants.DOLLARS, GameConstants.EURO):
if item['_tpl'] == currency:
count = item['upd']['StackObjectsCount']
self.moneyStacks[currency][item_id] = count
"""
:return dictionary of pairs item_id -> item_desc
"""
def get_inventory(self):
with self.profileLock:
return self._inventory
"""
Returns money stack ids, which sum >= value
"""
def find_moneystack(self, money: int, currency=GameConstants.RUBLES) -> list:
with self.profileLock:
result = []
for (id, value) in self.moneyStacks[currency].items():
if value >= money:
result.append((id, money))
break
else:
money -= value
result.append((id, value))
return result
"""
Get inventory item ids by item template
"""
def inventory_items_ids(self, item_tpl: str) -> list:
return [item['_id'] for item in self.PMC['Inventory']['items'] if item['_tpl'] == item_tpl]
def get_traders_list(self):
req = self.connection.trading_request('/client/trading/api/getTradersList', '')
result = dict()
for trader in req.get_json()['data']:
result[trader['_id']] = trader
return result
def get_trader_assort(self, trader_id: str) -> list:
req = self.connection.trading_request('/client/trading/api/getTraderAssort/' + trader_id, '')
return req.get_json()['data']
def flea_find(self, limit=15, priceFrom=0, priceTo=0,
removeBartering=True, removeMerchantOffers=True, item_tpl=''):
data = {
"page": 0,
"limit": limit,
"sortType": 5,
"sortDirection": 0,
"currency": 0,
"priceFrom": priceFrom,
"priceTo": priceTo,
"quantityFrom": 0,
"quantityTo": 0,
"conditionFrom": 0,
"conditionTo": 100,
"oneHourExpiration": False,
"onlyPrioritized": False,
"removeBartering": removeBartering,
"removeMerchantOffers": removeMerchantOffers,
"onlyFunctional": True,
"updateOfferCount": True,
"handbookId": item_tpl,
"linkedSearchId": "",
"neededSearchId": ""
}
req = self.connection.ragfair_request('/client/ragfair/search', json.dumps(data))
# for item in req.get_json()['data']['offers']:
offers = req.get_json()['data']['offers']
result = list()
for offer in offers:
result.append(FleaOffer(offer))
result.sort(key=lambda x: x.summary_cost)
return result
def flea_buy(self, offer: FleaOffer) -> FleaBuyResult:
self.logger.info(f'------------ Buying {offer.id} x {offer.count} for {offer.summary_cost} ------------')
spent_time = time.time() - offer.start_time
start_from = 56
if spent_time < start_from:
to_wait = start_from - spent_time
self.logger.info(f"Need to wait {to_wait}")
time.sleep(to_wait)
while time.time() < offer.end_time:
try:
time.sleep(0.05)
data = {
"data": [
{
"Action": "RagFairBuyOffer",
"offerId": offer.id,
"count": offer.count,
"items": []
}
]
}
# TODO: support not only rubbles purchases
stacks = self.find_moneystack(offer.summary_cost * offer.count, GameConstants.RUBLES)
for stack in stacks:
stack_info = dict()
stack_info['id'] = stack[0]
stack_info['count'] = stack[1]
data['data'][0]['items'].append(stack_info)
req = self.connection.prod_request('/client/game/profile/items/moving', json.dumps(data))
result_data = req.get_json()
# still is not available
if result_data['err'] in (228, 1512):
continue
if result_data['err'] == 1505:
return FleaBuyResult.OUTOFSPACE
# this means that transaction is okay
if result_data['err'] == 0:
self.update_profile()
# offer was sold out
if len(result_data['data']['badRequest']) > 0:
return FleaBuyResult.BOUGHT
# added new item to inventory
elif len(result_data['data']['items'].keys()) > 0:
return FleaBuyResult.OK
print(result_data)
return FleaBuyResult.UNKNOWN
except Exception as e:
self.logger.exception(str(e))
def get_stash_size(self):
stash_id = self.PMC['Inventory']['stash']
stash_tpl = self.get_inventory()[stash_id]['tpl']
stash_props = self.all_item_list[stash_tpl]['_props']['Grids'][0]['_props']
return stash_props['cellsV'], stash_props['cellsH']
class FleaBuyThread(ThreadPool):
def __init__(self, game: Game, offer: FleaOffer):
super().__init__(processes=1)
self.game = game
self.offer = offer
self.async_result = None
def start(self):
self.async_result = self.apply_async(self.game.flea_buy, [self.offer])
def is_ready(self):
return self.async_result.ready()
def get_result(self, timeout=None):
return self.async_result.get(timeout)
class TarkovBot:
logger = logging.getLogger("TarkovBot")
logger.setLevel("DEBUG")
def __init__(self, email='', password='', cookies=''):
FORMAT = '%(asctime)s: %(message)s'
logging.basicConfig(format=FORMAT, )
self.logger.debug("Initializing bot")
self.game = Game(email, password, cookies)
def filter_inventory(self, item_tpl):
inv = self.game.get_inventory()
return list(filter(lambda x: x[1]['_tpl'] == item_tpl, inv.items()))
def flea_market_buy(self, item_tpl: str, upper_price: int, offer_count=5, until_amount=None, delay_from=5, delay_to=10):
if until_amount is None:
until_amount = 1000
offer_container = list()
offer_id_set = set()
while len(self.filter_inventory(item_tpl)) < until_amount:
try:
container_copy = list(offer_container)
for offer_thread in offer_container:
if offer_thread.is_ready():
offer_id_set.remove(offer_thread.offer.id)
container_copy.remove(offer_thread)
result = offer_thread.get_result()
offer = offer_thread.offer
assert isinstance(offer, FleaOffer)
if result == FleaBuyResult.OK:
self.logger.info(
f'------------ Successfully bought offer {offer.id}'
f' for {offer.summary_cost} -----------'
)
else:
self.logger.info(
f'------------ Failed to buy offer {offer.id} - {result} -----------'
)
offer_container = container_copy
if len(offer_container) < offer_count:
new_offers = self.game.flea_find(limit=15, priceTo=upper_price, item_tpl=item_tpl)
new_offers = [item for item in new_offers if item.id not in offer_id_set]
if len(new_offers) != 0:
can_add_count = offer_count - len(offer_container)
self.logger.info(f'Found {len(new_offers)} offers. Can add {can_add_count}')
for i in range(min(can_add_count, len(new_offers))):
buy_thread = FleaBuyThread(self.game, new_offers[i])
buy_thread.start()
offer_container.append(buy_thread)
offer_id_set.add(new_offers[i].id)
except KeyboardInterrupt as keyBoard:
for offer_thread in offer_container:
self.logger.debug(f"Terminating thread for {offer_thread.offer.id}")
offer_thread.terminate()
break
except Exception as e:
self.logger.exception(str(e))
finally:
try:
time.sleep(random.randint(delay_from, delay_to))
except KeyboardInterrupt as keyBoard:
for offer_thread in offer_container:
self.logger.debug(f"Terminating thread for {offer_thread.offer.id}")
offer_thread.terminate()
break
"""
Tries to free some space by merging and transfering ruble stacks
"""
def merge_all_rubles(self):
all_rubles = sorted(list(bot.game.moneyStacks[GameConstants.RUBLES].items()), key=lambda x: x[1])
all_rubles = [item for item in all_rubles if item[1] != 500000]
merge_data = []
for i in range(len(all_rubles)):
itemI = list(all_rubles[i])
if itemI[1] == 500000:
continue
for j in range(i + 1, len(all_rubles)):
itemJ = list(all_rubles[j])
# merge i to j
if itemI[1] == 0 or itemJ[1] == 500000:
continue
can_merge = 500000 - itemJ[1]
if itemI[1] > can_merge:
itemI[1] -= can_merge
itemJ[1] = 500000
merge_data.append([itemI[0], itemJ[0], can_merge])
else:
itemJ[1] += itemI[1]
itemI[1] = 0
merge_data.append([itemI[0], itemJ[0]])
all_rubles[i] = itemI
all_rubles[j] = itemJ
if itemI[1] == 0:
break
data = {
'data': []
}
for merge in merge_data:
if len(merge) == 2:
d = {"Action":"Merge","item":merge[0],"with":merge[1]}
else:
d = {"Action":"Transfer","item":merge[0],"with":merge[1], 'count': merge[2]}
data['data'].append(d)
if len(data['data']) > 0:
req = bot.game.connection.prod_request('/client/game/profile/items/moving', json.dumps(data))
print(req)
bot.game.update_profile()
email = "ENTER_YOUR_EMAIL_HERE"
password = "ENTER_YOUR_PASSWORD_HASH_HERE"
cookie = 'ENTER_COOKIE_IF_NEEDED_HERE'
bot = TarkovBot(email=email, password=password, cookies=cookie)
|
AMTP.py
|
import socket
import sys
import threading
class Message_from_server:
def __init__(self,raw_message):
lines = raw_message.split("\n")
self.response_code = int(lines[0])
self.headers = {}
for i in range(1,len(lines)):
parts = lines[i].split(":")
self.headers[parts[0].strip()] = parts[1].strip()
class Message_to_server:
def __init__(self,command, headers, protocol="AMTP/0.0"):
self.command = command
self.headers = headers
self.protocol = protocol
def __str__(self):
header_block = ""
for key in self.headers:
header_block += key+": "+self.headers[key]+"\n"
return self.protocol+" "+self.command+"\n"+header_block+"\n"
class AMTP_client:
def __init__(self,token, message_handler):
self.message_handler = message_handler
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server = ("localhost",1805)
self.sock.connect(server)
self.my_slot = -1
self.buffer = ""
threading.Thread(target=self.read_thread).start()
auth_require_message = Message_to_server("CLAIM",{"Role":"Player","Secret":token,"Identifier":"Auth"})
self.send(auth_require_message)
print(auth_require_message)
def send(self,msg: Message_to_server):
self.sock.sendall(str(msg).encode())
def read_thread(self):
while True:
data = self.sock.recv(1024)
self.buffer += data.decode()
while "\n\n" in self.buffer:
raw_message = self.buffer[:self.buffer.index("\n\n")]
self.buffer = self.buffer[len(raw_message)+2:]
try:
self.on_message(raw_message)
except:
pass
def on_message(self,raw_message):
message = Message_from_server(raw_message)
if "Identifier" in message.headers:
if message.response_code == 9:
print("Server requesting shutdown")
sys.exit(0)
if message.headers["Identifier"] == "Auth":
if message.response_code == 0:
self.my_slot = int(message.headers["Slot"])
print("Authentication successful: ",self.my_slot)
else:
print("ERROR: Authentication failed, exiting")
sys.exit(1)
if "ActionRequiredBy" in message.headers:
arb_header = message.headers["ActionRequiredBy"]
if arb_header == "*" or arb_header == str(self.my_slot):
self.message_handler(message,True)
else:
self.message_handler(message,False)
else:
self.message_handler(message,False)
|
06_semaphore.py
|
import threading
def worker(i, semaphore):
assert isinstance(semaphore, threading.Semaphore)
semaphore.acquire()
print('Working', i)
semaphore = threading.Semaphore(0)
n_workers = 10
for i in range(n_workers):
t = threading.Thread(target=worker, args=(i, semaphore,))
t.start()
for i in range(n_workers):
semaphore.release()
|
2.Multiprocessing.Lock.py
|
# -*- coding: utf-8 -*-
import multiprocessing
import sys
def worker_with(lock, f):
with lock:
fs = open(f, 'a+')
n = 10
while n > 1:
fs.write("Lockd acquired via with\n")
n -= 1
fs.close()
def worker_no_with(lock, f):
lock.acquire()
try:
fs = open(f, 'a+')
n = 10
while n > 1:
fs.write("Lock acquired directly\n")
n -= 1
fs.close()
finally:
lock.release()
if __name__ == "__main__":
lock = multiprocessing.Lock()
f = "file.txt"
w = multiprocessing.Process(target = worker_with, args=(lock, f))
nw = multiprocessing.Process(target = worker_no_with, args=(lock, f))
w.start()
nw.start()
print "end"
|
hierarchy.py
|
# Copyright 2019 Sebastian Kahl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Hierarchy
Created on 16.08.2017
@author: skahl
"""
# Imports from __future__ in case we're running Python 2
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
# system modules
import threading
from collections import deque # from Queue import Queue
import sys
import os
# own modules
from .configurator import Config
from .visionlayer import VisionLayer
from .motorcontrol import MotorControl
from .sequencelayer import SequenceLayer
from .clusterlayer import ClusterLayer
from .toplayer import TopLayer
from .realizations import Realizations
from .goals import Goals
from .ment_rule_parser import Parser
from .ment import MetaCommunication
from .functions import *
# timing for information update
from time import time
class Hierarchy(object):
def __init__(self, config):
""" Initialize the hierarchy by a config list, containing name and type dictionaries.
"""
# logging setup
self.logger = Logger('White', 'HPBU')
self.log = self.logger.log
self.error = self.logger.error
# hierarchy layer setup
self.is_stopped = False
self.updates_enabled = False
self.config = config
self.layer_io = None
self.layer_vision = None
self.layer_between = []
self.layer_top = None
self.update_delay = self.config.parameters['update_delay'] # seconds
# optional interaction layer modules
self.personmodel = None
self.time_step = self.config.parameters['time_step'] # seconds
self.update_info_delay = 0.1 # seconds TODO: decrease!!
self.time_since_update = 0
self.last_time_info = time()
# TODO: make use of self.config.parameters['memory_len'] # currently defaulting to 5 everywhere
# hierarchy, possible modules
layer_dict = {
'MotorControl': MotorControl,
'Vision': VisionLayer,
'Sequence': SequenceLayer,
'Cluster': ClusterLayer,
'Top': TopLayer,
'Realizations': Realizations,
'Goals': Goals
}
# load and parse PersonModel from xml, if parameter is given
if self.config.parameters["personmodel_file"] is not "":
personmodel_file = self.config.parameters["personmodel_file"]
# if we are processing PersonModel knowledge for 'Seq' layer, load, parse and connect it
self.log(1, "Loading PersonModel knowledge from xml...", personmodel_file)
resource_path = os.environ.get('RESOURCE_PATH')
# select xml-file specific parser
parser_selector = Parser(filename=resource_path + os.sep + personmodel_file)
parser = parser_selector.select_parser()
# parse all available personmodels
personmodels = parser.parse()
if personmodels is not None and personmodels is not {}:
# select the one personmodel identified in config by my_id
my_id = self.config.parameters.get("my_id", None)
if my_id in personmodels:
personmodel = personmodels[my_id]
personmodel.my_id = my_id
self.log(1, "Selected PersonModel:\n", personmodel)
self.personmodel = personmodel
else:
print(my_id)
self.error("Configured my_id cannot be found in available PersonModels:", my_id)
sys.exit(1)
# if PersonModel was available, also try to configure a MetaCommunication object
if self.personmodel is not None:
meta_comm_queue = deque(maxlen=100)
meta_communication = MetaCommunication(self.personmodel.my_id, self.personmodel.agents, meta_comm_queue)
self.personmodel.set_meta_communication(meta_communication)
# set PersonModel interaction_influence_mode parameter
if self.config.parameters["interaction_influence_mode"] is not None:
self.personmodel.interaction_influence_mode = self.config.parameters["interaction_influence_mode"]
# load and parse configured layers
for line in self.config.layers:
layer = layer_dict[line['type']]
if line['type'] in ['MotorControl']:
self.layer_io = layer(line['name'])
self.layer_io.set_logger(line['color'])
self.layer_io.set_parameters(line['parameters'] if "parameters" in line else {})
# add global parameters to local layer parameters
for key, val in self.config.parameters.items():
self.layer_io.params[key] = val
# reference personmodel, if available
self.layer_io.personmodel = self.personmodel
# reference to hierarchy methods
self.layer_io.hierarchy_sleep = self.hierarchy_sleep
elif line['type'] in ['Top', 'Goals']:
self.layer_top = layer(line['name'])
self.layer_top.set_logger(line['color'])
self.layer_top.set_parameters(line['parameters'] if "parameters" in line else {})
# add global parameters to local layer parameters
for key, val in self.config.parameters.items():
self.layer_top.params[key] = val
# reference personmodel, if available
self.layer_top.personmodel = self.personmodel
# reference to hierarchy methods
self.layer_top.hierarchy_sleep = self.hierarchy_sleep
if "filename" in line and line['filename'] is not None:
""" loading routine to parse MENT-knowledge and load into hierarchy
"""
self.log(1, "Loading top layer knowledge from xml...", line['filename'])
resource_path = os.environ.get('RESOURCE_PATH')
# select xml-file specific parser
parser_selector = Parser(filename=resource_path + os.sep + line['filename'])
parser = parser_selector.select_parser()
# parse
_dict = parser.parse()
# print(line['name'], "layer xml deserialization:", _dict)
# load into layer structure
num_hypos = len(_dict)
for hypo_id, hypo in _dict.items():
self.layer_top.hypotheses.add_hypothesis_from_existing_repr(hypo, 1. / num_hypos)
elif "Vision" == line['type']:
self.layer_vision = layer(line['name'])
self.layer_vision.set_logger(line['color'])
self.layer_vision.set_parameters(line['parameters'] if "parameters" in line else {})
# add global parameters to local layer parameters
for key, val in self.config.parameters.items():
self.layer_vision.params[key] = val
# reference personmodel, if available
self.layer_vision.personmodel = self.personmodel
# reference to hierarchy methods
self.layer_vision.hierarchy_sleep = self.hierarchy_sleep
else:
self.layer_between.append(layer(line['name']))
self.layer_between[-1].set_logger(line['color'])
self.layer_between[-1].set_parameters(line['parameters'] if "parameters" in line else {})
# add global parameters to local layer parameters
for key, val in self.config.parameters.items():
self.layer_between[-1].params[key] = val
# reference personmodel, if available
self.layer_between[-1].personmodel = self.personmodel
# reference to hierarchy methods
self.layer_between[-1].hierarchy_sleep = self.hierarchy_sleep
if "filename" in line and line['filename'] is not None:
""" loading routine to parse MENT-knowledge and load into hierarchy
"""
self.log(1, "Loading in-between layer knowledge from xml...", line['filename'])
resource_path = os.environ.get('RESOURCE_PATH')
# select xml-file specific parser
parser_selector = Parser(filename=resource_path + os.sep + line['filename'])
parser = parser_selector.select_parser()
# parse
_dict = parser.parse()
# print(line['name'], "layer xml deserialization:", _dict)
# load into layer structure
num_hypos = len(_dict)
for hypo_id, hypo in _dict.items():
self.layer_between[-1].hypotheses.add_hypothesis_from_existing_repr(hypo, 1. / num_hypos)
if self.layer_io is None or self.layer_top is None:
self.error("At least one layer of type 'MotorControl' and 'Top' have to be defined!")
sys.exit(1)
else:
self.log(1, "Successfully set up the hierarchy with the following layers:")
self.log(1, self.layer_top)
[self.log(1, l) for l in self.layer_between[::-1]]
self.log(1, self.layer_vision)
self.log(1, self.layer_io)
# mark top-layer as top-layer
self.layer_top.is_top_layer = True
""" loading routine to retrieve already learned layer knowledge from data storage
"""
if self.config.parameters["read_knowledge"]:
self.log(1, "Restoring layer knowledge from data storage...")
self.layer_top, self.layer_between, self.layer_io, self.layer_vision = self.config.restore_knowledge_from_storage(self.layer_top, self.layer_between, self.layer_io, self.layer_vision)
""" Define influence, output and introspection necessities """
self.evidence_top_layer = None
self.long_range_projection = {}
self.lower_level_hypotheses = {}
# prepare lower_level_hypotheses update house keeping
lower_level = self.layer_vision
for l in self.layer_between:
self.lower_level_hypotheses[l.name] = lower_level.hypotheses
lower_level = l
self.lower_level_hypotheses[self.layer_top.name] = lower_level.hypotheses
# prepare evidence update to next higher level house keeping
self.evidences = {}
self.evidences[self.layer_top.name] = [None, None, None]
[self.evidences.update({l.name: [None, None, None]}) for l in self.layer_between]
self.evidences[self.layer_io.name] = [None]
""" Define multithreading necessities """
self.queue_input = deque(maxlen=100)
self.queue_output = deque(maxlen=100)
self.queue_info = deque(maxlen=100)
self.queue_top_influence = deque(maxlen=100)
self.queue_long_range_projection = deque(maxlen=100)
# @profile
def update(self):
""" Full hierarchy update routine.
This is a "prediction-first" update, so we update with the TopLayer first,
traversing the hierarchy down until we reach the IOLayer.
Send input via queue_input using dict {"input":your input}
Quit the update thread using dict {"control":"quit"} in queue_input.
Receive output via queue_output.
"""
# control prep
control = ""
# visualization information collection prep
collected_info = {}
# highly specialized variables to make sure realization intentions are stored in model info
realizations_intention = None
#################################
while control != "quit":
# input prep
input_proprioception = None
input_vision = None
input_meta = None # meta-communicative signals
influence_top_layer = [None, None]
prediction = [None, None]
prunable = None
long_range_projection = {}
higher_layer_name = None
lrp = None
# highly specialized variables to make sure realization intentions are stored in model info
realizations_intention_changed = False
# Per-layer clean-up
self.layer_top.clean_up()
for layer in self.layer_between:
layer.clean_up()
self.layer_vision.clean_up()
self.layer_io.clean_up()
# timer
self.time_since_update = time() - self.last_time_info
# hierarchy update
# Check queue for sensory input or control input
# print("update input queue:", len(self.queue_input))
if len(self.queue_input) > 0:
_input = self.queue_input.pop()
_val = _input[list(_input.keys())[0]]
if "control" in _val:
control = _val["control"]
if "proprioception" in _val:
input_proprioception = _val["proprioception"]
if "vision" in _val:
# filter by attention on interaction partner agent
if self.personmodel is not None:
if "agent_name" in _val and _val["agent_name"] in [self.personmodel["me_focus"], self.personmodel["my_id"], None]:
input_vision = _val["vision"]
elif "agent_name" in _val:
print("unfocused agent:", _val["agent_name"], "focus:", [self.personmodel["me_focus"], self.personmodel["my_id"], None])
else:
print("received behavior from unknown agent")
else:
input_vision = _val["vision"]
if "meta" in _val:
input_meta = _val["meta"]
if self.personmodel is not None:
# todo: could be encapsulated in external comm_channel extending method
self.personmodel.meta_communication.comm_channel.appendleft({"in": input_meta, "agent_name": _val["agent_name"]})
self.personmodel.meta_communication.rcv_meta()
# Check queue for influencing top-layer input
if len(self.queue_top_influence) > 0:
_input = self.queue_top_influence.pop()
if "input" in _input:
influence_top_layer = _input["input"]
# inhibit updates if necessary
if not self.updates_enabled:
sleep(0.1)
else:
# Check queue for outside long-range projection input
if len(self.queue_long_range_projection) > 0:
_lr = self.queue_long_range_projection.pop()
for target, com in _lr.items():
long_range_projection[target] = com
# print("\nlrp:", long_range_projection)
# TopLayer update
self.layer_top.receive_prediction(influence_top_layer) # from next higher layer, or external source
self.layer_top.receive_long_range_projection(long_range_projection.get(self.layer_top.name, None))
self.layer_top.receive_lower_level_hypos(self.lower_level_hypotheses[self.layer_top.name])
self.layer_top.receive_evidence(self.evidences[self.layer_top.name])
self.layer_top.update()
prediction = self.layer_top.send_prediction()
prunable = self.layer_top.send_prunable()
lrp = self.layer_top.send_long_range_projection()
self.set_long_range_projection(lrp)
higher_layer_name = self.layer_top.name
# collect layer information for visualization
if len(self.layer_top.hypotheses) > 0:
collected_info[self.layer_top.name] = {"hypotheses": self.layer_top.hypotheses.dpd.tolist(),
"free_energy": self.layer_top.free_energy,
"precision": self.layer_top.K,
"self": self.layer_top.self_estimate,
"intention": str(self.layer_top.intention)}
# Traverse the hierarchy in reverse order (top-down)
for layer in self.layer_between[::-1]:
# enable PersonModel influence in Seq layer if personmodel is available
if layer.name == "Seq" and self.personmodel is not None:
layer.personmodel_influence = True # enable PersonModel influence to allow prior knowledge to influence belief updates
layer.receive_prediction(prediction)
layer.receive_long_range_projection(long_range_projection.get(layer.name, None))
layer.receive_lower_level_hypos(self.lower_level_hypotheses[layer.name])
layer.receive_evidence(self.evidences[layer.name])
layer.receive_prunable(prunable)
layer.update()
self.lower_level_hypotheses[higher_layer_name] = layer.send_level_hypos()
self.evidences[higher_layer_name] = layer.send_evidence() # store evidence dict by layer name on the fly
higher_layer_name = layer.name # save this layer's name so that next lower layer knows where to send evidence
prediction = layer.send_prediction()
prunable = layer.send_prunable()
lrp = layer.send_long_range_projection()
self.set_long_range_projection(lrp)
str_intention = str(layer.intention)
# highly specialized realizations intention query
if layer.name == "Realizations" and layer.intention is not None:
if str_intention != realizations_intention:
realizations_intention_changed = True
realizations_intention = str_intention
# collect layer information for visualization
if len(layer.hypotheses) > 0:
collected_info[layer.name] = {"hypotheses": layer.hypotheses.dpd.tolist(),
"free_energy": layer.free_energy,
"precision": layer.K,
"self": layer.self_estimate,
"intention": str_intention}
# VisionLayer update (only receives visual updates and sends evidence to next higher layer)
self.layer_vision.receive_prediction(prediction)
self.layer_vision.receive_long_range_projection(long_range_projection.get(self.layer_vision.name, None))
self.layer_vision.receive_evidence(input_vision) # input from vision
self.layer_vision.receive_prunable(prunable)
self.layer_vision.update()
# no predictions in this layer and new higher_layer_name for evidence
self.evidences[higher_layer_name] = self.layer_vision.send_evidence()
lrp = self.layer_vision.send_long_range_projection()
self.lower_level_hypotheses[higher_layer_name] = self.layer_vision.send_level_hypos()
self.set_long_range_projection(lrp)
# collect layer information for visualization
if len(self.layer_vision.hypotheses) > 0:
collected_info[self.layer_vision.name] = {"hypotheses": self.layer_vision.hypotheses.dpd.tolist(),
"free_energy": self.layer_vision.free_energy,
"precision": self.layer_vision.K,
"self": self.layer_vision.self_estimate,
"intention": str(self.layer_vision.intention)}
# motor update
# IOLayer update (only receives proprioceptive updates and sends evidence to next higher layer, skipping VisionLayer)
self.layer_io.receive_prediction(prediction)
self.layer_io.receive_long_range_projection(long_range_projection.get(self.layer_io.name, None))
self.layer_io.receive_evidence(input_proprioception) # input from proprioception
self.layer_io.receive_prunable(prunable)
self.layer_io.update()
lrp = self.layer_io.send_long_range_projection()
self.set_long_range_projection(lrp)
# no new higher_layer_name for evidence
# self.evidences[higher_layer_name] = self.layer_io.send_evidence()
# collect layer information for visualization
if len(self.layer_io.hypotheses) > 0:
collected_info[self.layer_io.name] = {"hypotheses": self.layer_io.hypotheses.dpd.tolist(),
"free_energy": self.layer_io.free_energy,
"precision": self.layer_io.K,
"self": self.layer_io.self_estimate,
"intention": str(self.layer_io.intention)}
# output (active inference)
prediction = self.layer_io.send_prediction()
if prediction is not None and prediction[0] is not None:
only_prediction = prediction[0]
self.queue_output.appendleft({"angle": only_prediction})
# output of meta-communicative signals
if self.personmodel is not None:
comm_channel = self.personmodel.meta_communication.comm_channel
if len(comm_channel) > 0:
if "out" in comm_channel[-1]:
# received a meta-communicative signal
signal = comm_channel.pop()["out"]
self.queue_output.appendleft({"meta": signal})
# communicative intention available in Realizations?
# check time since info updates
if self.time_since_update > self.update_info_delay or realizations_intention_changed:
self.time_since_update = 0.
self.last_time_info = time()
# Collect Layer info and queue as info
# self.log(0, "storing collected information")
self.queue_info.appendleft(collected_info)
# sleep a bit to save processor time
sleep(self.update_delay)
#################################
################ finalize
self.finalize()
self.log(0, "... quit hierarchy update process.")
def disable_updates(self):
self.updates_enabled = False
def enable_updates(self):
self.updates_enabled = True
def get_output(self):
""" Return output from the hierarchy if there is some.
"""
if len(self.queue_output) > 0:
return self.queue_output.pop()
else:
return None
def set_input(self, uuid, _input):
""" Put given input dict into the queue.
A dict is expected with either a "control", "proprioception", or "vision" key.
"""
# source agent mustn't be this model, here, if meta-communication is going on
if "control" in _input or "proprioception" in _input or "vision" in _input or\
("meta" in _input and "agent_name" in _input and _input["agent_name"] != self.personmodel.my_id):
if len(self.queue_input) > 0:
_prior_input = self.queue_input[0]
if uuid in _prior_input:
new_input = _prior_input[uuid]
new_input.update(_input)
self.queue_input[0] = {uuid: new_input}
else:
self.queue_input.appendleft({uuid: _input})
# print("appended:", uuid)
else:
self.queue_input.appendleft({uuid: _input})
# print("no prior entries:", uuid)
# print(len(self.queue_input), self.queue_input)
return True
else:
self.error("A dict is expected with at least one of these keys: control, proprioception, vision, or meta")
return False
if len(self.queue_input) > 99:
self.error("Input queue is full!")
return False
def set_topdown_influence(self, _input):
""" Set a specific influence boost in the top-layer.
"""
if "input" in _input:
self.queue_top_influence.appendleft(_input)
else:
self.error("A dict is expected with an 'input' key.")
if len(self.queue_top_influence) > 99:
self.error("Top-down influence queue is full!")
def set_long_range_projection(self, _input):
""" Set a specific input to a layer.
Input expects a list of [layer_name, influence].
"""
if _input is not None:
if len(_input) > 0:
self.queue_long_range_projection.appendleft(_input)
else:
self.error("A list is expected with structure [layer_name, influence].")
if len(self.queue_top_influence) > 99:
self.error("Long range projection queue is full!")
def finalize(self):
""" Call finalize method for each layer.
"""
self.layer_top.finalize()
for layer in self.layer_between[::-1]:
layer.finalize()
self.layer_io.finalize()
self.layer_vision.finalize()
# signal hierarchy processing stopping
self.is_stopped = True
def print_report(self):
""" Display a report on model statistics and content.
"""
self.log(0, "\n\n")
self.log(0, "-" * 10, "Hierarchy output:")
self.log(0, self.layer_top.print_out())
for layer in self.layer_between[::-1]:
self.log(0, layer.print_out())
self.log(0, self.layer_io.print_out())
self.log(0, self.layer_vision.print_out())
def hierarchy_sleep(self, seconds):
sleep(seconds)
def run(self):
""" Start the thread updating the hierarchy to have
a non-blocking instance of the predictive hierarchy.
"""
update_thread = threading.Thread(target=self.update)
update_thread.daemon = False
update_thread.start()
self.log(1, "Running hierarchy update process ...")
def hierarchy_is_stopped(self):
return self.is_stopped
|
_logs.py
|
from __future__ import annotations
import itertools
import logging
import os
import shlex
import subprocess
import sys
import threading
from datetime import datetime, timedelta
from pathlib import Path
from typing import Any, TextIO, cast
import porcupine
from porcupine import dirs
log = logging.getLogger(__name__)
FILENAME_FIRST_PART_FORMAT = "%Y-%m-%dT%H-%M-%S"
# might be useful to grep something from old logs, but 30 days was way too much
LOG_MAX_AGE_DAYS = 7
def _remove_old_logs() -> None:
for path in Path(dirs.user_log_dir).glob("*.txt"):
# support '<log dir>/<first_part>_<number>.txt' and '<log dir>/<firstpart>.txt'
first_part = path.stem.split("_")[0]
try:
log_date = datetime.strptime(first_part, FILENAME_FIRST_PART_FORMAT)
except ValueError:
log.info(f"{path.parent} contains a file with an unexpected name: {path.name}")
continue
how_old = datetime.now() - log_date
if how_old > timedelta(days=LOG_MAX_AGE_DAYS):
log.info(f"{path} is more than {LOG_MAX_AGE_DAYS} days old, removing")
path.unlink()
def _run_command(command: str) -> None:
try:
output = subprocess.check_output(shlex.split(command), stderr=subprocess.STDOUT).decode(
"utf-8", errors="replace"
)
log.info(f"output from '{command}':\n{output}")
except FileNotFoundError as e:
log.info(f"cannot run '{command}': {e}")
except (subprocess.CalledProcessError, OSError):
log.warning(f"unexpected error when running '{command}'", exc_info=True)
def _open_log_file() -> TextIO:
timestamp = datetime.now().strftime(FILENAME_FIRST_PART_FORMAT)
filenames = (
f"{timestamp}.txt" if i == 0 else f"{timestamp}_{i}.txt" for i in itertools.count()
)
for filename in filenames:
try:
return (Path(dirs.user_log_dir) / filename).open("x", encoding="utf-8")
except FileExistsError:
continue
assert False # makes mypy happy
class _FilterThatDoesntHideWarnings(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
return record.levelno >= logging.WARNING or super().filter(record)
# verbose_logger can be:
# - empty string (print everything)
# - logger name (print only messages from that logger)
# - None (only print errors)
def setup(verbose_logger: str | None) -> None:
handlers: list[logging.Handler] = []
log_file = _open_log_file()
print(f"log file: {log_file.name}")
file_handler = logging.StreamHandler(log_file)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(
logging.Formatter("[%(asctime)s] %(name)s %(levelname)s: %(message)s")
)
handlers.append(file_handler)
if sys.stderr is not None:
# not running in pythonw.exe, can also show something in terminal
print_handler = logging.StreamHandler(sys.stderr)
if verbose_logger is None:
print_handler.setLevel(logging.WARNING)
else:
print_handler.setLevel(logging.DEBUG)
print_handler.addFilter(_FilterThatDoesntHideWarnings(verbose_logger))
print_handler.setFormatter(logging.Formatter("%(name)s %(levelname)s: %(message)s"))
handlers.append(print_handler)
# don't know why level must be specified here
logging.basicConfig(level=logging.DEBUG, handlers=handlers)
porcupine_path = cast(Any, porcupine).__path__[0]
log.debug(f"starting Porcupine {porcupine.__version__} from '{porcupine_path}'")
log.debug(f"PID: {os.getpid()}")
log.debug("running on Python %d.%d.%d from '%s'", *sys.version_info[:3], sys.executable)
log.debug(f"sys.platform is {sys.platform!r}")
if sys.platform != "win32":
# lsb_release is a python script on ubuntu so running it takes
# about 0.12 seconds on this system, i really want porcupine to
# start as fast as possible
_run_command("uname -a")
threading.Thread(target=_run_command, args=["lsb_release -a"]).start()
# don't fail to run if old logs can't be deleted for some reason
try:
_remove_old_logs()
except OSError:
log.exception("unexpected problem with removing old log files")
|
apd.py
|
#!/usr/bin/python
import sys
import time
import argparse
from datetime import datetime
import socket
import multiprocessing
THREADS_NUMBER = 8
args = {}
startupMessages = []
services = {
22: 'SSH',
80: 'HTTP',
443: 'HTTPS'
} #TODO: This is an example list, read this from a file later
def splitChunks(a, n):
k, m = divmod(len(a), n)
return (a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))
def argparser():
global args
global startupMessages
# TODO: Full options like --timeout, --file
parser = argparse.ArgumentParser(description='APD port scanner')
parser.add_argument('TargetAdress', type=str, help='Target address')
parser.add_argument('-T', type=int, help='Thread count (default: 8)', default=8)
parser.add_argument('-t', type=float, help='Connection timeout (default: 1.0s)', default=1)
parser.add_argument('-f', type=str, help='Output to filename', default='')
parser.add_argument('-pS', type=int, help='Beginning of the port range to scan (default: 1)', default=1)
parser.add_argument('-pE', type=int, help='Upper end of the port range to scan (default: 65535)', default=65535)
parser.add_argument('-v', action=argparse.BooleanOptionalAction, help='Show verbose information')
parser.add_argument('-vv', action=argparse.BooleanOptionalAction, help='Show debug information')
parser.add_argument('--show-progress', default=False, action='store_true', help='Show progress')
parser.add_argument('--no-logo', default=False, action='store_true', help='Disable ASCII-art at startup')
args = parser.parse_args()
startupMessages.append('[I] Connection timeout: {}s'.format(str(args.t)))
if args.T != 8:
startupMessages.append('[I] Thread count: {}'.format(str(args.T)))
if args.f != '':
startupMessages.append('[I] Outputting results to file: {}'.format(args.f))
if(args.v or args.vv) and args.show_progress:
startupMessages.append("[!] Verbose / debug options (-v or -vv) and --show-progress aren't supported together. Progress is disabled.")
args.show_progress = False
if(args.pS < 1) or (args.pE > 65535):
print("[-] Port range has to be within 1-65535. Cannot continue - exiting...")
sys.exit()
elif (args.pS > args.pE):
print("[-} Beginning of the port range has to be lower or equal to upper end of the port range - exiting...")
sys.exit()
elif (args.pS != 1) and (args.pE != 65535): #DEFAULT VALUES / ALL PORT NUMBERS
startupMessages.append('[I] Port range to scan: {}-{}'.format(str(args.pS),str(args.pE)))
def verbprint(text):
if args.v or args.vv:
print(text)
def debugprint(text):
if args.vv:
print(text)
def printProgress(finishedPort, rangeBeginning, rangeEnding):
if args.show_progress:
currentIndex = finishedPort - rangeBeginning
length = rangeEnding - rangeBeginning+1
print("Progress: {}%".format(str(int(float((currentIndex / length))*100))), end='\r') #TODO: This is one of the worst ways to calculate percentage
def scan(addr, portRange, queue, isFirst):
global args
openports_thread = [] # LOCAL SCOPE
rangeBeginning = portRange[0]
rangeEnding = portRange[-1]
try:
for port in portRange:
debugprint("[D] Initializing socket")
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP socket
debugprint("[D] Setting socket timeout")
# Set timeout to user option in args
soc.settimeout(args.t)
debugprint("[D] Connecting to port {}".format(str(port)))
result = soc.connect_ex((addr,port))
if result == 0:
openports_thread.append(port)
if port in services:
print("[*] Port {} is open - {} ".format(str(port), services[port])) #Whitespace because of the progress message length
# TODO: Change showing progress to better way
# SHOW PROGRESS
if isFirst:
printProgress(port, rangeBeginning, rangeEnding)
else:
print("[*] Port {} is open ".format(str(port))) # Whitespace because of progress message length
# SHOW PROGRESS
if isFirst:
# SHOW PROGRESS
printProgress(port, rangeBeginning, rangeEnding)
else:
if isFirst:
printProgress(port, rangeBeginning, rangeEnding)
verbprint("[!] Port {} is closed - {}".format(str(port), str(result)))
queue.put(openports_thread)
except socket.gaierror: #TODO: Fix multithread error (repeated 8 times)
print("[-] Cannot resolve hostname: {}".format(str(addr)))
sys.exit()
except KeyboardInterrupt:
pass
def main():
openports = []
global startupMessages
argparser()
if not args.no_logo:
print(" __\n ____ _____ ____/ /\n / __ `/ __ \/ __ / \n/ /_/ / /_/ / /_/ / \n\__,_/ .___/\__,_/ \n /_/ ")
print("\n APD starting..."))
# PRINT ALL WARNINGS
try:
if startupMessages:
for message in startupMessages:
print(message)
# 1s delay allowing user to read message even if verbose or debug mode is enabled
time.sleep(1)
allPortsRanges = splitChunks(range(args.pS, args.pE+1), args.T)
print("[*] Starting TCP scan on {} at {}".format(args.TargetAdress, str(datetime.now())))
jobs = []
queue = multiprocessing.Queue()
isFirst = True
for portRange in allPortsRanges:
if list(portRange) == []: # Avoid division by zero error in show_progress
if(args.T != 8): #TODO: Fix: hardcoded value to prevent from warning with default value
print("[!] Ignoring user thread count: cannot start more threads than ports count")
break
if isFirst:
jobs.append(multiprocessing.Process(target=scan, args=(args.TargetAdress, portRange, queue, True)))
isFirst = False
else:
jobs.append(multiprocessing.Process(target=scan, args=(args.TargetAdress, portRange, queue, False)))
jobs[-1].start()
for job in jobs:
job.join() # If job is already finished, .join() will return instantly
openports = openports + list(queue.get()) # TODO: I think this is not the most optimal solution (we are calling queue.get() here 8 times)
# Scan finished
print("[+] Port scan on {} finished at {}".format(args.TargetAdress, str(datetime.now())))
# Create a string with open ports
text_openports = " ".join(str(openport) for openport in openports)
print("[+] All open ports: {}".format(text_openports))
# Save to file
if args.f != '':
try:
output_file = open(args.f, "w")
output_file.write(args.TargetAdress + ': ' + text_openports)
print("[+] Output written to file: {}".format(args.f))
except Exception as e:
print("[-] Cannot write output to file {}: {}".format(args.f, e))
except KeyboardInterrupt:
print("\r[-] Cancelling scan: keyboard interrupt")
sys.exit()
if __name__ == "__main__":
main()
|
a3c.py
|
import gym
import multiprocessing
import threading
import numpy as np
import os
import shutil
import matplotlib.pyplot as plt
import tensorflow as tf
#PARAMETERS
OUTPUT_GRAPH = True # safe logs
RENDER=True # render one worker
LOG_DIR = './log' # savelocation for logs
N_WORKERS = multiprocessing.cpu_count() # number of workers
MAX_EP_STEP = 200 # maxumum number of steps per episode
MAX_GLOBAL_EP = 2000 # total number of episodes
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10 # sets how often the global net is updated
GAMMA = 0.90 # discount factor
ENTROPY_BETA = 0.01 # entropy factor
LR_A = 0.0001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
# set environment
GAME = 'Pendulum-v0'
env = gym.make(GAME)
env.reset()
#if RENDER: # uncomment if rendering does not work
# env.render()
N_S = env.observation_space.shape[0] # number of states
N_A = env.action_space.shape[0] # number of actions
A_BOUND = [env.action_space.low, env.action_space.high] # action bounds
# Network for the Actor Critic
class ACNet(object):
def __init__(self, scope, sess, globalAC=None):
self.sess=sess
self.actor_optimizer = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA') # optimizer for the actor
self.critic_optimizer = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC') # optimizer for the critic
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S') # state
self.a_params, self.c_params = self._build_net(scope)[-2:] # parameters of actor and critic net
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S') # state
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A') # action
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget') # v_target value
mu, sigma, self.v, self.a_params, self.c_params = self._build_net(scope) # get mu and sigma of estimated action from neural net
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
mu, sigma = mu * A_BOUND[1], sigma + 1e-4
normal_dist = tf.contrib.distributions.Normal(mu, sigma)
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * td
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), A_BOUND[0], A_BOUND[1]) # sample a action from distribution
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss, self.a_params) #calculate gradients for the network weights
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'): # update local and global network weights
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = self.actor_optimizer.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = self.critic_optimizer.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope): # neural network structure of the actor and critic
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu') # estimated action value
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma') # estimated variance
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # estimated value for state
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return mu, sigma, v, a_params, c_params
def update_global(self, feed_dict): # run by a local
self.sess.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
self.sess.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s): # run by a local
s = s[np.newaxis, :]
return self.sess.run(self.A, {self.s: s})[0]
# worker class that inits own environment, trains on it and updloads weights to global net
class Worker(object):
def __init__(self, name, globalAC, sess):
self.env = gym.make(GAME).unwrapped # make environment for each worker
self.name = name
self.AC = ACNet(name, sess, globalAC) # create ACNet for each worker
self.sess=sess
def work(self):
global global_rewards, global_episodes
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not coord.should_stop() and global_episodes < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
for ep_t in range(MAX_EP_STEP):
if self.name == 'W_0' and RENDER:
self.env.render()
a = self.AC.choose_action(s) # estimate stochastic action based on policy
s_, r, done, info = self.env.step(a) # make step in environment
done = True if ep_t == MAX_EP_STEP - 1 else False
ep_r += r
# save actions, states and rewards in buffer
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r+8)/8) # normalize reward
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = self.sess.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
self.AC.update_global(feed_dict) # actual training step, update global ACNet
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global() # get global parameters to local ACNet
s = s_
total_step += 1
if done:
if len(global_rewards) < 5: # record running episode reward
global_rewards.append(ep_r)
else:
global_rewards.append(ep_r)
global_rewards[-1] =(np.mean(global_rewards[-5:])) # smoothing
print(
self.name,
"Ep:", global_episodes,
"| Ep_r: %i" % global_rewards[-1],
)
global_episodes += 1
break
if __name__ == "__main__":
global_rewards = []
global_episodes = 0
sess = tf.Session()
with tf.device("/cpu:0"):
global_ac = ACNet(GLOBAL_NET_SCOPE,sess) # we only need its params
workers = []
# Create workers
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, global_ac,sess))
coord = tf.train.Coordinator()
sess.run(tf.global_variables_initializer())
if OUTPUT_GRAPH: # write log file
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, sess.graph)
worker_threads = []
for worker in workers: #start workers
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
coord.join(worker_threads) # wait for termination of workers
plt.plot(np.arange(len(global_rewards)), global_rewards) # plot rewards
plt.xlabel('step')
plt.ylabel('total moving reward')
plt.show()
|
station_scan.py
|
from multiprocessing import Process
class Station_Scan():
def __init__(self, settings, channels):
self.config = settings
self.channels = channels
self.chanscan = Process(target=self.runscan)
def scan(self):
print("Channel Scan Requested by Client.")
try:
self.chanscan.start()
except AssertionError:
print("Channel Scan Already In Progress!")
def runscan(self):
self.channels.get_channels(forceupdate=True)
print("Requested Channel Scan Complete.")
def scanning(self):
try:
self.chanscan.join(timeout=0)
return self.chanscan.is_alive()
except AssertionError:
return False
|
file_stream.py
|
import base64
import binascii
import collections
import itertools
import logging
import os
import sys
import random
import requests
import threading
import time
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple
import wandb
from wandb import util
from wandb import env
import six
from six.moves import queue
from ..lib import file_stream_utils
logger = logging.getLogger(__name__)
Chunk = collections.namedtuple("Chunk", ("filename", "data"))
if TYPE_CHECKING:
from typing import Any, List, Dict
class DefaultFilePolicy(object):
def __init__(self, start_chunk_id=0):
self._chunk_id = start_chunk_id
def process_chunks(self, chunks):
chunk_id = self._chunk_id
self._chunk_id += len(chunks)
return {"offset": chunk_id, "content": [c.data for c in chunks]}
class JsonlFilePolicy(DefaultFilePolicy):
def process_chunks(self, chunks):
chunk_id = self._chunk_id
# TODO: chunk_id is getting reset on each request...
self._chunk_id += len(chunks)
chunk_data = []
for chunk in chunks:
if len(chunk.data) > util.MAX_LINE_BYTES:
msg = "Metric data exceeds maximum size of {} ({})".format(
util.to_human_size(util.MAX_LINE_BYTES),
util.to_human_size(len(chunk.data)),
)
wandb.termerror(msg, repeat=False)
util.sentry_message(msg)
else:
chunk_data.append(chunk.data)
return {
"offset": chunk_id,
"content": chunk_data,
}
class SummaryFilePolicy(DefaultFilePolicy):
def process_chunks(self, chunks):
data = chunks[-1].data
if len(data) > util.MAX_LINE_BYTES:
msg = "Summary data exceeds maximum size of {}. Dropping it.".format(
util.to_human_size(util.MAX_LINE_BYTES)
)
wandb.termerror(msg, repeat=False)
util.sentry_message(msg)
return False
return {"offset": 0, "content": [data]}
class StreamCRState:
"""There are two streams: stdout and stderr.
We create two instances for each stream.
An instance holds state about:
found_cr: if a carriage return has been found in this stream.
cr: most recent offset (line number) where we found \r.
We update this offset with every progress bar update.
last_normal: most recent offset without a \r in this stream.
i.e the most recent "normal" line.
"""
found_cr: bool
cr: Optional[int]
last_normal: Optional[int]
def __init__(self):
self.found_cr = False
self.cr = None
self.last_normal = None
class CRDedupeFilePolicy(DefaultFilePolicy):
"""File stream policy that removes characters that would be erased by
carriage returns.
This is what a terminal does. We use it for console output to reduce the
amount of data we need to send over the network (eg. for progress bars),
while preserving the output's appearance in the web app.
CR stands for "carriage return", for the character \r. It tells the terminal
to move the cursor back to the start of the current line. Progress bars
(like tqdm) use \r repeatedly to overwrite a line with newer updates.
This gives the illusion of the progress bar filling up in real-time.
"""
def __init__(self, start_chunk_id=0):
super(CRDedupeFilePolicy, self).__init__(start_chunk_id=start_chunk_id)
self._prev_chunk = None
self.global_offset = 0
# cr refers to carriage return \r
self.stderr = StreamCRState()
self.stdout = StreamCRState()
def get_consecutive_offsets(self, console: Dict) -> List[Any]:
"""
Args:
console: Dict[int, str] which maps offsets (line numbers) to lines of text.
It represents a mini version of our console dashboard on the UI.
Returns:
A list of intervals (we compress consecutive line numbers into an interval).
Example:
>>> console = {2: "", 3: "", 4: "", 5: "", 10: "", 11: "", 20: ""}
>>> get_consecutive_offsets(console)
[(2, 5), (10, 11), (20, 20)]
"""
offsets = sorted(list(console.keys()))
intervals: List = []
for i, num in enumerate(offsets):
if i == 0:
intervals.append([num, num])
continue
largest = intervals[-1][1]
if num == largest + 1:
intervals[-1][1] = num
else:
intervals.append([num, num])
return intervals
def split_chunk(self, chunk: Chunk) -> Tuple[str, str]:
"""
Args:
chunk: object with two fields: filename (str) & data (str)
`chunk.data` is a str containing the lines we want. It usually contains \n or \r or both.
`chunk.data` has two possible formats (for the two streams - stdout and stderr):
- "2020-08-25T20:38:36.895321 this is my line of text\nsecond line\n"
- "ERROR 2020-08-25T20:38:36.895321 this is my line of text\nsecond line\nthird\n"
Here's another example with a carriage return \r.
- "ERROR 2020-08-25T20:38:36.895321 \r progress bar\n"
Returns:
A 2-tuple of strings.
First str is prefix, either "ERROR {timestamp} " or "{timestamp} ".
Second str is the rest of the string.
Example:
>>> chunk = Chunk(filename="output.log", data="ERROR 2020-08-25T20:38 this is my line of text\n")
>>> split_chunk(chunk)
("ERROR 2020-08-25T20:38 ", "this is my line of text\n")
"""
prefix = ""
token, rest = chunk.data.split(" ", 1)
if token == "ERROR":
prefix += token + " "
token, rest = rest.split(" ", 1)
prefix += token + " "
return prefix, rest
def process_chunks(self, chunks: List) -> List[Dict]:
"""
Args:
chunks: List of Chunk objects. See description of chunk above in `split_chunk(...)`.
Returns:
List[Dict]. Each dict in the list contains two keys: an `offset` which holds the line number
and `content` which maps to a list of consecutive lines starting from that offset.
`offset` here means global line number in our console on the UI.
Example:
>>> chunks = [
Chunk("output.log", "ERROR 2020-08-25T20:38 this is my line of text\nboom\n"),
Chunk("output.log", "2020-08-25T20:38 this is test\n"),
]
>>> process_chunks(chunks)
[
{"offset": 0, "content": [
"ERROR 2020-08-25T20:38 this is my line of text\n",
"ERROR 2020-08-25T20:38 boom\n",
"2020-08-25T20:38 this is test\n"
]
}
]
"""
# Dict[int->str], each offset (line number) mapped to a line.
# Represents a mini-version of our console pane on the UI.
console = {}
sep = os.linesep
for c in chunks:
prefix, logs_str = self.split_chunk(c)
logs = logs_str.split(sep)
for line in logs:
stream = self.stderr if prefix.startswith("ERROR ") else self.stdout
if line.startswith("\r"):
# line starting with \r will always overwrite a previous offset.
offset = stream.cr if stream.found_cr else stream.last_normal or 0
stream.cr = offset
stream.found_cr = True
console[offset] = prefix + line[1:] + "\n"
# Usually logs_str = "\r progress bar\n" for progress bar updates.
# If instead logs_str = "\r progress bar\n text\n text\n",
# treat this as the end of a progress bar and reset accordingly.
if (
logs_str.count(sep) > 1
and logs_str.replace(sep, "").count("\r") == 1
):
stream.found_cr = False
elif line:
console[self.global_offset] = prefix + line + "\n"
stream.last_normal = self.global_offset
self.global_offset += 1
intervals = self.get_consecutive_offsets(console)
ret = []
for (a, b) in intervals:
ret.append({"offset": a, "content": [console[i] for i in range(a, b + 1)]})
return ret
class BinaryFilePolicy(DefaultFilePolicy):
def process_chunks(self, chunks):
data = b"".join([c.data for c in chunks])
enc = base64.b64encode(data).decode("ascii")
self._offset += len(data)
return {"offset": self._offset, "content": enc, "encoding": "base64"}
class FileStreamApi(object):
"""Pushes chunks of files to our streaming endpoint.
This class is used as a singleton. It has a thread that serializes access to
the streaming endpoint and performs rate-limiting and batching.
TODO: Differentiate between binary/text encoding.
"""
Finish = collections.namedtuple("Finish", ("exitcode"))
Preempting = collections.namedtuple("Preempting", ())
PushSuccess = collections.namedtuple("PushSuccess", ("artifact_id", "save_name"))
HTTP_TIMEOUT = env.get_http_timeout(10)
MAX_ITEMS_PER_PUSH = 10000
def __init__(self, api, run_id, start_time, settings=None):
if settings is None:
settings = dict()
# NOTE: exc_info is set in thread_except_body context and readable by calling threads
self._exc_info = None
self._settings = settings
self._api = api
self._run_id = run_id
self._start_time = start_time
self._client = requests.Session()
self._client.auth = ("api", api.api_key)
self._client.timeout = self.HTTP_TIMEOUT
self._client.headers.update(
{
"User-Agent": api.user_agent,
"X-WANDB-USERNAME": env.get_username(),
"X-WANDB-USER-EMAIL": env.get_user_email(),
}
)
self._file_policies = {}
self._dropped_chunks = 0
self._queue = queue.Queue()
self._thread = threading.Thread(target=self._thread_except_body)
# It seems we need to make this a daemon thread to get sync.py's atexit handler to run, which
# cleans this thread up.
self._thread.name = "FileStreamThread"
self._thread.daemon = True
self._init_endpoint()
def _init_endpoint(self):
settings = self._api.settings()
settings.update(self._settings)
self._endpoint = "{base}/files/{entity}/{project}/{run}/file_stream".format(
base=settings["base_url"],
entity=settings["entity"],
project=settings["project"],
run=self._run_id,
)
def start(self):
self._init_endpoint()
self._thread.start()
def set_default_file_policy(self, filename, file_policy):
"""Set an upload policy for a file unless one has already been set."""
if filename not in self._file_policies:
self._file_policies[filename] = file_policy
def set_file_policy(self, filename, file_policy):
self._file_policies[filename] = file_policy
@property
def heartbeat_seconds(self):
# Defaults to 30
return self._api.dynamic_settings["heartbeat_seconds"]
def rate_limit_seconds(self):
run_time = time.time() - self._start_time
if run_time < 60:
return max(1, self.heartbeat_seconds / 15)
elif run_time < 300:
return max(2.5, self.heartbeat_seconds / 3)
else:
return max(5, self.heartbeat_seconds)
def _read_queue(self):
# called from the push thread (_thread_body), this does an initial read
# that'll block for up to rate_limit_seconds. Then it tries to read
# as much out of the queue as it can. We do this because the http post
# to the server happens within _thread_body, and can take longer than
# our rate limit. So next time we get a chance to read the queue we want
# read all the stuff that queue'd up since last time.
#
# If we have more than MAX_ITEMS_PER_PUSH in the queue then the push thread
# will get behind and data will buffer up in the queue.
return util.read_many_from_queue(
self._queue, self.MAX_ITEMS_PER_PUSH, self.rate_limit_seconds()
)
def _thread_body(self):
posted_data_time = time.time()
posted_anything_time = time.time()
ready_chunks = []
uploaded = set()
finished = None
while finished is None:
items = self._read_queue()
for item in items:
if isinstance(item, self.Finish):
finished = item
elif isinstance(item, self.Preempting):
request_with_retry(
self._client.post,
self._endpoint,
json={
"complete": False,
"preempting": True,
"dropped": self._dropped_chunks,
"uploaded": list(uploaded),
},
)
uploaded = set()
elif isinstance(item, self.PushSuccess):
uploaded.add(item.save_name)
else:
# item is Chunk
ready_chunks.append(item)
cur_time = time.time()
if ready_chunks and (
finished or cur_time - posted_data_time > self.rate_limit_seconds()
):
posted_data_time = cur_time
posted_anything_time = cur_time
self._send(ready_chunks)
ready_chunks = []
if cur_time - posted_anything_time > self.heartbeat_seconds:
posted_anything_time = cur_time
self._handle_response(
request_with_retry(
self._client.post,
self._endpoint,
json={
"complete": False,
"failed": False,
"dropped": self._dropped_chunks,
"uploaded": list(uploaded),
},
)
)
uploaded = set()
# post the final close message. (item is self.Finish instance now)
request_with_retry(
self._client.post,
self._endpoint,
json={
"complete": True,
"exitcode": int(finished.exitcode),
"dropped": self._dropped_chunks,
"uploaded": list(uploaded),
},
)
def _thread_except_body(self):
# TODO: Consolidate with internal_util.ExceptionThread
try:
self._thread_body()
except Exception as e:
exc_info = sys.exc_info()
self._exc_info = exc_info
logger.exception("generic exception in filestream thread")
util.sentry_exc(exc_info, delay=True)
raise e
def _handle_response(self, response):
"""Logs dropped chunks and updates dynamic settings"""
if isinstance(response, Exception):
wandb.termerror(
"Dropped streaming file chunk (see wandb/debug-internal.log)"
)
logging.exception("dropped chunk %s" % response)
self._dropped_chunks += 1
else:
parsed: dict = None
try:
parsed = response.json()
except Exception:
pass
if isinstance(parsed, dict):
limits = parsed.get("limits")
if isinstance(limits, dict):
self._api.dynamic_settings.update(limits)
def _send(self, chunks):
# create files dict. dict of <filename: chunks> pairs where chunks is a list of
# [chunk_id, chunk_data] tuples (as lists since this will be json).
files = {}
# Groupby needs group keys to be consecutive, so sort first.
chunks.sort(key=lambda c: c.filename)
for filename, file_chunks in itertools.groupby(chunks, lambda c: c.filename):
file_chunks = list(file_chunks) # groupby returns iterator
# Specific file policies are set by internal/sender.py
self.set_default_file_policy(filename, DefaultFilePolicy())
files[filename] = self._file_policies[filename].process_chunks(file_chunks)
if not files[filename]:
del files[filename]
for fs in file_stream_utils.split_files(files, max_bytes=util.MAX_LINE_BYTES):
self._handle_response(
request_with_retry(
self._client.post,
self._endpoint,
json={"files": fs, "dropped": self._dropped_chunks},
retry_callback=self._api.retry_callback,
)
)
def stream_file(self, path):
name = path.split("/")[-1]
with open(path) as f:
self._send([Chunk(name, line) for line in f])
def enqueue_preempting(self):
self._queue.put(self.Preempting())
def push(self, filename, data):
"""Push a chunk of a file to the streaming endpoint.
Arguments:
filename: Name of file that this is a chunk of.
chunk_id: TODO: change to 'offset'
chunk: File data.
"""
self._queue.put(Chunk(filename, data))
def push_success(self, artifact_id, save_name):
"""Notification that a file upload has been successfully completed
Arguments:
artifact_id: ID of artifact
save_name: saved name of the uploaded file
"""
self._queue.put(self.PushSuccess(artifact_id, save_name))
def finish(self, exitcode):
"""Cleans up.
Anything pushed after finish will be dropped.
Arguments:
exitcode: The exitcode of the watched process.
"""
self._queue.put(self.Finish(exitcode))
# TODO(jhr): join on a thread which exited with an exception is a noop, clean up this path
self._thread.join()
if self._exc_info:
logger.error("FileStream exception", exc_info=self._exc_info)
# reraising the original exception, will get recaught in internal.py for the sender thread
six.reraise(*self._exc_info)
MAX_SLEEP_SECONDS = 60 * 5
def request_with_retry(func, *args, **kwargs):
"""Perform a requests http call, retrying with exponential backoff.
Arguments:
func: An http-requesting function to call, like requests.post
max_retries: Maximum retries before giving up. By default we retry 30 times in ~2 hours before dropping the chunk
*args: passed through to func
**kwargs: passed through to func
"""
max_retries = kwargs.pop("max_retries", 30)
retry_callback = kwargs.pop("retry_callback", None)
sleep = 2
retry_count = 0
while True:
try:
response = func(*args, **kwargs)
response.raise_for_status()
return response
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
) as e:
if isinstance(e, requests.exceptions.HTTPError):
# Non-retriable HTTP errors.
#
# We retry 500s just to be cautious, and because the back end
# returns them when there are infrastructure issues. If retrying
# some request winds up being problematic, we'll change the
# back end to indicate that it shouldn't be retried.
if e.response is not None and e.response.status_code in {
400,
403,
404,
409,
}:
return e
if retry_count == max_retries:
return e
retry_count += 1
delay = sleep + random.random() * 0.25 * sleep
if isinstance(e, requests.exceptions.HTTPError) and (
e.response is not None and e.response.status_code == 429
):
err_str = "Filestream rate limit exceeded, retrying in {} seconds".format(
delay
)
if retry_callback:
retry_callback(e.response.status_code, err_str)
logger.info(err_str)
else:
pass
logger.warning(
"requests_with_retry encountered retryable exception: %s. func: %s, args: %s, kwargs: %s",
e,
func,
args,
kwargs,
)
time.sleep(delay)
sleep *= 2
if sleep > MAX_SLEEP_SECONDS:
sleep = MAX_SLEEP_SECONDS
except requests.exceptions.RequestException as e:
error_message = "unknown error"
try:
error_message = response.json()["error"] # XXX clean this up
except Exception:
pass
logger.error("requests_with_retry error: {}".format(error_message))
logger.exception(
"requests_with_retry encountered unretryable exception: %s", e
)
return e
|
TServer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sys
import os
import threading
if sys.version_info[0] >= 3:
import queue
Queue = queue
else:
import Queue
import warnings
from thrift.Thrift import TProcessor, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol.THeaderProtocol import THeaderProtocolFactory
class TConnectionContext:
def getPeerName(self):
"""Gets the address of the client.
Returns:
The equivalent value of socket.getpeername() on the client socket
"""
raise NotImplementedError
class TRpcConnectionContext(TConnectionContext):
"""Connection context class for thrift RPC calls"""
def __init__(self, client_socket, iprot=None, oprot=None):
"""Initializer.
Arguments:
client_socket: the TSocket to the client
"""
self._client_socket = client_socket
self.iprot = iprot
self.oprot = oprot
def setProtocols(self, iprot, oprot):
self.iprot = iprot
self.oprot = oprot
def getPeerName(self):
"""Gets the address of the client.
Returns:
Same value as socket.peername() for the TSocket
"""
return self._client_socket.getPeerName()
def getSockName(self):
"""Gets the address of the server.
Returns:
Same value as socket.getsockname() for the TSocket
"""
return self._client_socket.getsockname()
class TServerEventHandler:
"""Event handler base class.
Override selected methods on this class to implement custom event handling
"""
def preServe(self, address):
"""Called before the server begins.
Arguments:
address: the address that the server is listening on
"""
pass
def newConnection(self, context):
"""Called when a client has connected and is about to begin processing.
Arguments:
context: instance of TRpcConnectionContext
"""
pass
def clientBegin(self, iprot, oprot):
"""Deprecated: Called when a new connection is made to the server.
For all servers other than TNonblockingServer, this function is called
whenever newConnection is called and vice versa. This is the old-style
for event handling and is not supported for TNonblockingServer. New
code should always use the newConnection method.
"""
pass
def connectionDestroyed(self, context):
"""Called when a client has finished request-handling.
Arguments:
context: instance of TRpcConnectionContext
"""
pass
class TServer:
"""Base interface for a server, which must have a serve method."""
""" constructors for all servers:
1) (processor, serverTransport)
2) (processor, serverTransport, transportFactory, protocolFactory)
3) (processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)
Optionally, the handler can be passed instead of the processor,
and a processor will be created automatically:
4) (handler, serverTransport)
5) (handler, serverTransport, transportFacotry, protocolFactory)
6) (handler, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)
The attribute serverEventHandler (default: None) receives
callbacks for various events in the server lifecycle. It should
be set to an instance of TServerEventHandler.
"""
def __init__(self, *args):
if (len(args) == 2):
self.__initArgs__(args[0], args[1],
TTransport.TTransportFactoryBase(),
TTransport.TTransportFactoryBase(),
TBinaryProtocol.TBinaryProtocolFactory(),
TBinaryProtocol.TBinaryProtocolFactory())
elif (len(args) == 4):
self.__initArgs__(args[0], args[1], args[2], args[2], args[3],
args[3])
elif (len(args) == 6):
self.__initArgs__(args[0], args[1], args[2], args[3], args[4],
args[5])
def __initArgs__(self, processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory):
self.processor = self._getProcessor(processor)
self.serverTransport = serverTransport
self.inputTransportFactory = inputTransportFactory
self.outputTransportFactory = outputTransportFactory
self.inputProtocolFactory = inputProtocolFactory
self.outputProtocolFactory = outputProtocolFactory
self.serverEventHandler = TServerEventHandler()
def _getProcessor(self, processor):
""" Check if a processor is really a processor, or if it is a handler
auto create a processor for it """
if isinstance(processor, TProcessor):
return processor
elif hasattr(processor, "_processor_type"):
handler = processor
return handler._processor_type(handler)
else:
raise TApplicationException(
message="Could not detect processor type")
def setServerEventHandler(self, handler):
self.serverEventHandler = handler
def _clientBegin(self, context, iprot, oprot):
self.serverEventHandler.newConnection(context)
self.serverEventHandler.clientBegin(iprot, oprot)
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
if isinstance(self.inputProtocolFactory, THeaderProtocolFactory):
oprot = iprot
else:
oprot = self.outputProtocolFactory.getProtocol(otrans)
context = TRpcConnectionContext(client, iprot, oprot)
self._clientBegin(context, iprot, oprot)
try:
while True:
self.processor.process(iprot, oprot, context)
except TTransport.TTransportException:
pass
except Exception as x:
logging.exception(x)
self.serverEventHandler.connectionDestroyed(context)
itrans.close()
otrans.close()
def serve(self):
pass
class TSimpleServer(TServer):
"""Simple single-threaded server that just pumps around one transport."""
def __init__(self, *args):
warnings.warn("TSimpleServer is deprecated. Please use one of "
"Nonblocking, Twisted, or Gevent server instead.",
DeprecationWarning)
TServer.__init__(self, *args)
def serve(self):
self.serverTransport.listen()
for name in self.serverTransport.getSocketNames():
self.serverEventHandler.preServe(name)
while True:
client = self.serverTransport.accept()
self.handle(client)
class TThreadedServer(TServer):
"""Threaded server that spawns a new thread per each connection."""
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.daemon = kwargs.get("daemon", False)
def serve(self):
self.serverTransport.listen()
for name in self.serverTransport.getSocketNames():
self.serverEventHandler.preServe(name)
while True:
try:
client = self.serverTransport.accept()
t = threading.Thread(target=self.handle, args=(client,))
t.daemon = self.daemon
t.start()
except KeyboardInterrupt:
raise
except Exception as x:
logging.exception(x)
|
client.py
|
from sys import maxsize
import threading
import grpc
import protobuffer_pb2 as game
import protobuffer_pb2_grpc as rpc
from engine_revised import Engine
from queue import Queue
address = 'localhost'
port = 50051
class Client():
def __init__(self, id:str, engine: Engine):
self.id = id
self.engine = engine
self.gotten_data = Queue(maxsize=0)
with open('server.crt', 'rb') as f:
trusted_certs = f.read()
credentials = grpc.ssl_channel_credentials(root_certificates=trusted_certs)
channel = grpc.secure_channel(address+':'+str(port), credentials)
self.conn = rpc.GameServerStub(channel)
#New listening thread for getting messages
self.id_message = game.Id()
self.id_message.id = self.id
threading.Thread(target=self.__listen_for_messages, daemon=True).start()
def __listen_for_messages(self):
for data in self.conn.GameStream(self.id_message):
#print(data)
self.gotten_data.put(data)
'''
print('- - - - - - - - - - -')
all_items = self.engine.get_items_on_screen(self.id, data)
self.engine.render_field(all_items)
'''
def send_action(self, action):
if action != '':
n = game.Action()
n.id = self.id
n.direction = action
print(f'Sending action: {action}')
self.conn.GameAction(n)
def get_high_scores(self):
return self.conn.GameScores(game.Nothing())
if __name__ == '__main__':
from sys import argv
client = None
if len(argv) == 2:
client = Client(argv[1], Engine)
else:
client = Client('Thomas', Engine)
for score in client.get_high_scores().scores:
print(score.id, score.score)
|
miniterm.py
|
#!/home/sincgrid-ubuntu/Desktop/jupyterNotebook/PhySyncEnv/bin/python
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.